repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Eric3911/miniDetection | [
"6fb6e1bce3ab6e4adb832b37e78325803c7424b6"
] | [
"slim/export_inference_graph.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Saves out a GraphDef containing the architecture of the model.\n\nTo use it, run something like this, with a model name defined by slim:\n\nbazel build tensorflow_models/research/slim:export_inference_graph\nbazel-bin/tensorflow_models/research/slim/export_inference_graph \\\n--model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb\n\nIf you then want to use the resulting model with your own or pretrained\ncheckpoints as part of a mobile model, you can run freeze_graph to get a graph\ndef with the variables inlined as constants using:\n\nbazel build tensorflow/python/tools:freeze_graph\nbazel-bin/tensorflow/python/tools/freeze_graph \\\n--input_graph=/tmp/inception_v3_inf_graph.pb \\\n--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \\\n--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \\\n--output_node_names=InceptionV3/Predictions/Reshape_1\n\nThe output node names will vary depending on the model, but you can inspect and\nestimate them using the summarize_graph tool:\n\nbazel build tensorflow/tools/graph_transforms:summarize_graph\nbazel-bin/tensorflow/tools/graph_transforms/summarize_graph \\\n--in_graph=/tmp/inception_v3_inf_graph.pb\n\nTo run the resulting graph in C++, you can look at the label_image sample code:\n\nbazel build tensorflow/examples/label_image:label_image\nbazel-bin/tensorflow/examples/label_image/label_image \\\n--image=${HOME}/Pictures/flowers.jpg \\\n--input_layer=input \\\n--output_layer=InceptionV3/Predictions/Reshape_1 \\\n--graph=/tmp/frozen_inception_v3.pb \\\n--labels=/tmp/imagenet_slim_labels.txt \\\n--input_mean=0 \\\n--input_std=255\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import gfile\nfrom datasets import dataset_factory\nfrom nets import nets_factory\n\nfrom object_detection.protos import pipeline_pb2\n\nfrom models.research.slim import exporter\n\nslim = tf.contrib.slim\n\ntf.app.flags.DEFINE_string(\n 'model_name', 'inception_v3', 'The name of the architecture to save.')\n\ntf.app.flags.DEFINE_boolean(\n 'is_training', False,\n 'Whether to save out a training-focused version of the model.')\n\ntf.app.flags.DEFINE_integer(\n 'image_size', None,\n 'The image size to use, otherwise use the model default_image_size.')\n\ntf.app.flags.DEFINE_integer(\n 'batch_size', None,\n 'Batch size for the exported model. Defaulted to \"None\" so batch size can '\n 'be specified at model runtime.')\n\ntf.app.flags.DEFINE_string('dataset_name', 'imagenet',\n 'The name of the dataset to use with the model.')\n\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\n\ntf.app.flags.DEFINE_string(\n 'output_file', '', 'Where to save the resulting file to.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir', '', 'Directory to save intermediate dataset files to')\n\ntf.app.flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be '\n 'one of [`image_tensor`, `encoded_image_string_tensor`, '\n '`tf_example`]')\ntf.app.flags.DEFINE_string('trained_checkpoint_prefix', None,\n 'Path to trained checkpoint, typically of the form '\n 'path/to/model.ckpt')\ntf.app.flags.DEFINE_string('output_directory', None, 'Path to write outputs.')\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(_):\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n if not FLAGS.output_file:\n raise ValueError('You must supply the path to save to with --output_file')\n tf.logging.set_verbosity(tf.logging.INFO)\n with tf.Graph().as_default() as graph:\n dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',\n FLAGS.dataset_dir)\n network_fn = nets_factory.get_network_fn(\n FLAGS.model_name,\n num_classes=(dataset.num_classes - FLAGS.labels_offset),\n is_training=FLAGS.is_training)\n image_size = FLAGS.image_size or network_fn.default_image_size\n placeholder = tf.placeholder(name='input', dtype=tf.float32,\n shape=[FLAGS.batch_size, image_size,\n image_size, 3])\n network_fn(placeholder)\n graph_def = graph.as_graph_def()\n with gfile.GFile(FLAGS.output_file, 'wb') as f:\n f.write(graph_def.SerializeToString())\n exporter.export_inference_graph(\n FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix,\n FLAGS.output_directory)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.placeholder",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Mueldavc/Python-Projs | [
"3ac9e5ca20e98e8af1adf9dd711848e276ad530d"
] | [
"Modelo/Test_2.py"
] | [
"from Data_treatment.DT_1 import StockData\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom deap import base, creator, tools, algorithms\nfrom scipy.stats import bernoulli\nfrom bitstring import BitArray\nfrom datetime import datetime\nfrom keras.utils import to_categorical\nfrom keras.layers import Dense\n\nnp.random.seed(1120)\n\nstockdata = StockData('WIN$', date=datetime.today(), window_days=1000, timeframe=5)\n\n\n# stockdata = StockData('PETR4', date=datetime.today(), window_days=1000, timeframe=TIMEFRAME_H4)\n\ndef prepare_dataset(window_size, data_days, cols_selec, dump=False):\n stockdata.data_final(n_in=window_size, window_days=data_days, cols=cols_selec, dump=dump)\n return stockdata\n\n\ndef uint_conf(num):\n num = num.uint\n if isinstance(num, int):\n return num\n else:\n a = 1\n\n\ndef train_evaluate(ga_individual_solution):\n # Decode GA solution to integer for window_size and num_units\n window_size_bits = BitArray(ga_individual_solution[0:6])\n window_days = BitArray(ga_individual_solution[6:14])\n num_units_bits = BitArray(ga_individual_solution[14:18])\n num_units_bits_1 = BitArray(ga_individual_solution[18:22])\n epocas_bits = BitArray(ga_individual_solution[22:26])\n cols_selec = ga_individual_solution[26:]\n\n window_size = uint_conf(window_size_bits)\n window_days = uint_conf(window_days)\n num_units = uint_conf(num_units_bits)\n num_units_1 = uint_conf(num_units_bits_1)\n epocas = uint_conf(epocas_bits)\n\n print('\\nWindow Size: ', window_size,\n ', windows days:', window_days,\n ', Num of Units: ', num_units,\n ', num_units_1:', num_units_1,\n ', epocas:', epocas)\n\n # Return fitness score of 100 if window_size or num_unit is zero\n if any([0 in [window_days, num_units, epocas],\n window_days * 40 < window_size,\n window_size <= num_units,\n window_size <= 1,\n sum(cols_selec) < 2]):\n return 0,\n\n # Segment the train_data based on new window_size; split into train and validation (80/20)\n stockdata = prepare_dataset(window_size, window_days, cols_selec)\n print(stockdata.cols_ch)\n train_x = stockdata.train_x.values\n y_train = stockdata.train_y.values\n test_x = stockdata.val_x.values\n y_test = stockdata.val_y\n y_train = to_categorical(y_train)\n\n model = Sequential()\n\n if num_units_1 != 0:\n model.add(Dense(num_units, input_dim=train_x.shape[1], activation='relu'))\n model.add(Dense(num_units_1, activation='relu'))\n else:\n model.add(Dense(num_units, input_dim=train_x.shape[1], activation='relu'))\n\n model.add(Dense(2, activation='linear'))\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n model.fit(train_x, y_train, epochs=epocas, batch_size=10, shuffle=True, verbose=2)\n y_pred = model.predict(test_x)\n y_pred = [np.argmax(i) for i in y_pred]\n\n # Calculate the RMSE score as fitness score for GA\n rmse = np.sqrt(mean_squared_error(y_test, y_pred[:-1]))\n print('Validation RMSE: ', rmse, '\\n')\n\n return rmse,\n\n\npopulation_size = 40\nnum_generations = 40\ngene_length = 26 + 22\n\n# As we are trying to minimize the RMSE score, that's why using -1.0.\n# In case, when you want to maximize accuracy for instance, use 1.0\ncreator.create('FitnessMax', base.Fitness, weights=(-1.0,))\ncreator.create('Individual', list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\ntoolbox.register('binary', bernoulli.rvs, 0.5)\ntoolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.binary, n=gene_length)\ntoolbox.register('population', tools.initRepeat, list, toolbox.individual)\n\ntoolbox.register('mate', tools.cxOrdered)\ntoolbox.register('mutate', tools.mutShuffleIndexes, indpb=0.6)\ntoolbox.register('select', tools.selRoulette)\ntoolbox.register('evaluate', train_evaluate)\n\npopulation = toolbox.population(n=population_size)\nr = algorithms.eaSimple(population, toolbox, cxpb=0.4, mutpb=0.1, ngen=num_generations, verbose=False)\n\n# Print top N solutions - (1st only, for now)\nbest_individuals = tools.selBest(population, k=1)\nwindow_size = None\nwindow_days = None\nnum_units = None\nnum_units_1 = None\nepocas = None\n\nfor bi in best_individuals:\n window_size_bits = BitArray(bi[0:6])\n window_days = BitArray(bi[6:13])\n num_units_bits = BitArray(bi[13:17])\n num_units_bits_1 = BitArray(bi[17:21])\n epocas_bits = BitArray(bi[21:25])\n cols_selec = bi[25:]\n\n window_size = uint_conf(window_size_bits)\n window_days = uint_conf(window_days)\n num_units = uint_conf(num_units_bits)\n num_units_1 = uint_conf(num_units_bits_1)\n epocas = uint_conf(epocas_bits)\n\n # if best_window_size != 0 and best_days_size != 0 and best_num_units != 0:\n # break\nprint('Final\\nWindow Size: ', window_size,\n ', windows days:', window_days,\n ', Num of Units: ', num_units,\n ', num_units_1:', num_units_1,\n ', epocas:', epocas)\n\n# Train the model using best configuration on complete training set\n# and make predictions on the test set\nstockdata = prepare_dataset(window_size, window_days, cols_selec, dump=True)\nprint(stockdata.cols_ch)\ntrain_x = stockdata.train_x.values\ny_train = stockdata.train_y.values\ntest_x = stockdata.val_x.values\ny_test = stockdata.val_y\ny_train = to_categorical(y_train)\n\nmodel = Sequential()\n\nif num_units_1 != 0:\n model.add(Dense(num_units, input_dim=train_x.shape[1], activation='relu'))\n model.add(Dense(num_units_1, activation='relu'))\nelse:\n model.add(Dense(num_units, input_dim=train_x.shape[1], activation='relu'))\n\nmodel.add(Dense(2, activation='linear'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(train_x, y_train, epochs=epocas, batch_size=10, shuffle=True, verbose=2)\nmodel.save(r'C:\\Users\\mueld\\Documents\\Python_Projects\\Stock_1\\model.h5')\n\ny_pred = model.predict(test_x)\ny_pred = [np.argmax(i) for i in y_pred]\nrmse = np.sqrt(mean_squared_error(y_test, y_pred[:-1]))\nprint('Test RMSE: ', rmse)\nprint(cols_selec)\na = 1\n"
] | [
[
"numpy.argmax",
"numpy.random.seed",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kene111/ivy | [
"17be67499d02594a94f05016539cf89b294acbb7"
] | [
"ivy/functional/backends/numpy/array_api/linear_algebra_extension.py"
] | [
"# global\nimport numpy as np\nfrom typing import Union, Optional, Tuple, Literal\n\n# local\nfrom ivy import inf\n\n\ndef vector_norm(x: np.ndarray, \n p: Union[int, float, Literal[inf, - inf]] = 2, \n axis: Optional[Union[int, Tuple[int]]] = None, \n keepdims: bool = False)\\\n -> np.ndarray:\n\n np_normalized_vector = None\n\n if axis == None:\n np_normalized_vector = np.linalg.norm(x.flatten(),p, axis,keepdims)\n\n else:\n np_normalized_vector = np.linalg.norm(x,p, axis,keepdims)\n\n if np_normalized_vector.shape == tuple():\n return np.expand_dims(np_normalized_vector, 0)\n return np_normalized_vector\n"
] | [
[
"numpy.expand_dims",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rpatil524/mlrun | [
"bb2259a959f871d7a479834ddc55ad1470e6c2c0"
] | [
"tests/feature-store/test_infer.py"
] | [
"import pandas as pd\n\nimport mlrun.feature_store as fs\nfrom mlrun.data_types import InferOptions\nfrom mlrun.feature_store.api import infer_from_static_df\nfrom tests.conftest import tests_root_directory\n\nthis_dir = f\"{tests_root_directory}/feature-store/\"\n\nexpected_schema = [\n {\"name\": \"bad\", \"value_type\": \"int\"},\n {\"name\": \"department\", \"value_type\": \"str\"},\n {\"name\": \"room\", \"value_type\": \"int\"},\n {\"name\": \"hr\", \"value_type\": \"float\"},\n {\"name\": \"hr_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"rr\", \"value_type\": \"int\"},\n {\"name\": \"rr_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"spo2\", \"value_type\": \"int\"},\n {\"name\": \"spo2_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"movements\", \"value_type\": \"float\"},\n {\"name\": \"movements_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"turn_count\", \"value_type\": \"float\"},\n {\"name\": \"turn_count_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"is_in_bed\", \"value_type\": \"int\"},\n {\"name\": \"is_in_bed_is_error\", \"value_type\": \"bool\"},\n {\"name\": \"timestamp\", \"value_type\": \"str\"},\n]\n\n\ndef test_infer_from_df():\n key = \"patient_id\"\n df = pd.read_csv(this_dir + \"testdata.csv\")\n df.set_index(key, inplace=True)\n featureset = fs.FeatureSet(\"testdata\")\n infer_from_static_df(df, featureset, options=InferOptions.all())\n # print(featureset.to_yaml())\n\n # test entity infer\n assert len(featureset.spec.entities) == 1, \"entity not properly inferred\"\n assert (\n list(featureset.spec.entities.keys())[0] == key\n ), \"entity key not properly inferred\"\n assert (\n list(featureset.spec.entities.values())[0].value_type == \"str\"\n ), \"entity type not properly inferred\"\n\n # test infer features\n assert (\n featureset.spec.features.to_dict() == expected_schema\n ), \"did not infer schema properly\"\n\n preview = featureset.status.preview\n # by default preview should be 20 lines + 1 for headers\n assert len(preview) == 21, \"unexpected num of preview lines\"\n assert len(preview[0]) == df.shape[1], \"unexpected num of header columns\"\n assert len(preview[1]) == df.shape[1], \"unexpected num of value columns\"\n\n features = sorted(featureset.spec.features.keys())\n stats = sorted(featureset.status.stats.keys())\n stats.remove(key)\n assert features == stats, \"didnt infer stats for all features\"\n\n stat_columns = list(featureset.status.stats[\"movements\"].keys())\n assert stat_columns == [\n \"count\",\n \"mean\",\n \"std\",\n \"min\",\n \"max\",\n \"hist\",\n ], \"wrong stats result\"\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Molin-L/RLRC | [
"b2f0593d997696e1d1acd33edf0f3c44bbf8dd10"
] | [
"format_data.py"
] | [
"# !/usr/bin/env python\n# --------------------------------------------------------------\n# File: format_data.py\n# Project: RLRC\n# Created: Sunday, 5th July 2020 9:31:26 am\n# @Author: Molin Liu, MSc in Data Science, University of Glasgow\n# Contact: [email protected]\n# Last Modified: Sunday, 5th July 2020 9:31:28 am\n# Copyright © Rockface 2019 - 2020\n# --------------------------------------------------------------\n\nimport json\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\nimport re\nimport unidecode\nimport logging\nfrom logger import logger\nlogger = logging.getLogger(__name__)\n\n# Set validation data ratio\nvalid_ratio = 0.1\n\n\ndef _line_prepender(filename, line):\n with open(filename, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip('\\r\\n') + '\\n' + content)\n\n\ndef _clean(entity):\n entity = entity.lower()\n result = re.sub('[\\W_]+', '_', entity)\n result = unidecode.unidecode(result)\n return result\n\n\ndef _valid_set(train_file):\n valid_set = []\n train_set = []\n valid_file = os.path.join(os.path.dirname(train_file), 'valid.json')\n if os.path.exists(valid_file):\n exit()\n with open(train_file) as json_file:\n json_reader = json.load(json_file)\n i = 0\n for sen in json_reader:\n if i < len(json_reader):\n valid_set.append(sen)\n else:\n train_set.append(sen)\n with open(valid_file, 'w') as out_json:\n json.dump(valid_set, out_json, indent=4, sort_keys=True)\n logger.info(\"Write to %s\" % valid_file)\n with open(train_file, 'w') as out_json:\n json.dump(train_set, out_json, indent=4, sort_keys=True)\n logger.info(\"Write to %s\" % train_file)\n\n\ndef _clean_entity(file):\n content = []\n char_pattern = re.compile('')\n with open(file, 'r+') as f:\n entity_df = pd.read_csv(\n f, sep=\"\\t\", index_col=None, header=None, skiprows=1)\n entity_df.iloc[:][0] = entity_df.iloc[:][0].apply(_clean)\n entity_df.to_csv(file, header=False, index=False, sep='\\t')\n _line_prepender(file, str(len(entity_df)))\n\n\ndef _fetch_data_nyt10(inPath):\n if not os.path.exists(inPath):\n logger.error(\"%s not found\" % inPath)\n logger.error(os.listdir(os.path.dirname(inPath)))\n raise FileNotFoundError\n with open(inPath) as json_file:\n json_reader = json.load(json_file)\n\n e1_list = []\n e2_list = []\n rel_list = []\n sentence_list = []\n\n logger.info(\"Read from json file...\")\n for sen in tqdm(json_reader):\n e1_list.append(sen['head']['word'])\n e2_list.append(sen['tail']['word'])\n rel_list.append(sen['relation'])\n sentence_list.append(\"%s\" % sen['sentence'])\n\n outfile = inPath[:-4]+'csv'\n logger.info(\"Preparing data for %s...\" % outfile)\n data_dict = {'Entity1': e1_list, 'Entity2': e2_list,\n 'Relation': rel_list, 'Sentence': sentence_list}\n out_frame = pd.DataFrame.from_dict(data_dict)\n out_frame.to_csv(outfile, index=False)\n logger.info(\"Finish write %s\" % outfile)\n\n return e1_list, e2_list, rel_list\n\n\ndef preprocess_nyt10(inPath):\n test_path = os.path.join(inPath, 'test.json')\n train_path = os.path.join(inPath, 'train.json')\n le_entity = preprocessing.LabelEncoder()\n le_rel = preprocessing.LabelEncoder()\n\n train_e1, train_e2, train_rel = _fetch_data_nyt10(train_path)\n test_e1, test_e2, test_rel = _fetch_data_nyt10(test_path)\n \"\"\"\n Generate test and train data for word embedding\n For more detail, check: https://github.com/thunlp/OpenKE#data\n \"\"\"\n \"\"\"\n Convert entities to id\n \"\"\"\n entity_list = train_e1 + train_e2 + test_e1 + test_e2\n le_entity.fit(entity_list)\n entities_list = le_entity.classes_\n entities_df = pd.DataFrame(\n {'entitiy': entities_list, 'id': np.arange(len(entities_list))})\n entities_out_file = os.path.join(inPath, 'entity2id.csv')\n entities_df.to_csv(entities_out_file, header=False, index=False, sep='\\t')\n _line_prepender(entities_out_file, str(len(entities_df)))\n logger.info(\"Finish write %s\" % entities_out_file)\n\n train_e1_id = le_entity.transform(train_e1)\n train_e2_id = le_entity.transform(train_e2)\n\n test_e1_id = le_entity.transform(test_e1)\n test_e2_id = le_entity.transform(test_e2)\n \"\"\"\n Convert relations to id\n \"\"\"\n le_rel.fit(train_rel+test_rel)\n train_rel_id = le_rel.transform(train_rel)\n test_rel_id = le_rel.transform(test_rel)\n\n rel_map_list = le_rel.classes_\n rel_df = pd.DataFrame(\n {'relation': rel_map_list, 'id': np.arange(len(rel_map_list))})\n rel_out_file = os.path.join(inPath, 'relation2id.csv')\n rel_df.to_csv(rel_out_file, header=False, index=False, sep='\\t')\n _line_prepender(rel_out_file, str(len(rel_df)))\n logger.info(\"Finish write %s\" % rel_out_file)\n\n train_dataid_dict = {'entity1': train_e1_id,\n 'entity2': train_e2_id, 'relation': train_rel_id}\n train_id_df = pd.DataFrame.from_dict(train_dataid_dict)\n train_id_file = os.path.join(inPath, 'train2id.csv')\n train_id_df.iloc[int(valid_ratio*len(train_id_df)):][:].to_csv(\n train_id_file, index=False, header=False, sep='\\t')\n _line_prepender(train_id_file, str(\n len(train_id_df.iloc[int(valid_ratio*len(train_id_df)):][:])))\n valid_id_file = os.path.join(inPath, 'valid2id.csv')\n train_id_df.iloc[:int(valid_ratio*len(train_id_df))\n ][:].to_csv(valid_id_file, index=False, header=False, sep='\\t')\n _line_prepender(valid_id_file, str(\n len(train_id_df.iloc[:int(valid_ratio*len(train_id_df))][:])))\n logger.info(\"Finish write %s\" % train_id_file)\n\n test_dataid_dict = {'entity1': test_e1_id,\n 'entity2': test_e2_id, 'relation': test_rel_id}\n\n test_id_df = pd.DataFrame.from_dict(test_dataid_dict)\n test_id_file = os.path.join(inPath, 'test2id.csv')\n test_id_df.to_csv(test_id_file, index=False, header=False, sep='\\t')\n _line_prepender(test_id_file, str(len(test_id_df)))\n logger.info(\"Finish write %s\" % test_id_file)\n\n\nif __name__ == \"__main__\":\n preprocess_nyt10('data/NYT10')\n _clean_entity(\n '/Users/meow/Documents/Projects/UoG_Proj/RLRC/data/shrink/entity2id.txt')\n _valid_set(\n \"/Users/meow/Documents/Projects/UoG_Proj/RLRC/data/shrink/train.json\")\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
smh-hosseiny/ConvTract | [
"dc42978a9db99480feacd18b7ba21c7e7cbac829"
] | [
"utils/Network.py"
] | [
"import tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom keras.layers import Dropout\r\n\r\n\r\ndef residual_block(y, nb_channels, _strides=(1, 1, 1), _kernel_size=(1, 1, 1), _project_shortcut=True):\r\n shortcut = y\r\n\r\n y = layers.Conv3D(nb_channels, kernel_size=_kernel_size, strides=_strides, padding='same')(y)\r\n\r\n if _project_shortcut or _strides != (1, 1, 1):\r\n shortcut = layers.Conv3D(nb_channels, kernel_size=(1, 1, 1), strides=_strides, padding='same')(shortcut)\r\n\r\n y = layers.add([shortcut, y])\r\n y = layers.ReLU()(y)\r\n return y\r\n \r\n\r\ndef network(grad_directions, output_size, dropout):\r\n\r\n inputs = keras.Input(shape=(3,3,3,grad_directions,), name='diffusion_data')\r\n x = residual_block(inputs, 256)\r\n x = layers.Conv3D(512, kernel_size=(2,2,2))(x)\r\n x = residual_block(x, 512)\r\n x = layers.LayerNormalization()(x)\r\n x = layers.Flatten()(x)\r\n i = inputs[:,1,1,1]\r\n\r\n x2 = layers.Reshape((1,1,1,100))(i)\r\n x2 = residual_block(x2,704)\r\n x2 = layers.LayerNormalization()(x2)\r\n x2 = layers.Flatten()(x2)\r\n\r\n f = layers.Concatenate(axis=-1)([x,x2])\r\n f = layers.Reshape((40,40,3))(f)\r\n encoder = keras.Model(inputs = inputs, outputs = f, name=\"encoder\")\r\n\r\n pi = keras.Input(shape=(40,40,3,), name='encoded_data')\r\n x = layers.Conv2D(32, (3,3), activation = 'relu')(pi)\r\n x = layers.LayerNormalization()(x)\r\n x = layers.Conv2D(64, (3,3), activation = 'relu')(x)\r\n x = layers.Conv2D(64, (3,3), activation = 'relu')(x)\r\n x = layers.LayerNormalization()(x)\r\n x = layers.Dropout(dropout)(x)\r\n x = layers.MaxPool2D(pool_size=(2,2), strides = None)(x)\r\n x = layers.Conv2D(128, (3,3), activation = 'relu')(x)\r\n x = layers.Conv2D(128, (3,3), activation = 'relu')(x)\r\n x = layers.MaxPool2D(pool_size=(2,2), strides = None)(x)\r\n x = layers.LayerNormalization()(x)\r\n x = layers.Conv2D(256, (3,3), activation = 'relu')(x)\r\n x = layers.Conv2D(256, (3,3), padding = 'same', activation = 'relu')(x)\r\n x = layers.Conv2D(512, (3,3), activation = 'relu')(x)\r\n x = layers.Flatten()(x)\r\n x = layers.Dropout(dropout)(x)\r\n out = layers.Dense(output_size, activation = 'linear')(x)\r\n predictor = keras.Model(inputs = pi, outputs = out, name=\"predictor\")\r\n\r\n features = encoder(inputs)\r\n preds = predictor(features)\r\n model = keras.Model(inputs = inputs, outputs = preds, name=\"Model\")\r\n\r\n return model\r\n \r\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
eayumba/CS221Project | [
"9aca2e4a0051296ba95cefac7cc611b61effbbff"
] | [
"predict.py"
] | [
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Activation, Dropout, Dense, Lambda\nfrom tensorflow.keras.layers import BatchNormalization as BatchNorm\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.utils import to_categorical\nfrom Data_Parser import getNotes\nimport numpy\nimport music21\nfrom Data_Parser import getNotes\nimport pickle\nimport random\n\n#CONSTANTS\nOUTPUT_DIR = 'final_output_ep100_t7'\nWEIGHTS_DIR = 'final_weights_ep100'\nSEQUENCE_LEN = 20\nLOADED = True # must change if songs are added to training/testing data\n#HYPERPARAMETERS\nTEMP = 0.7\nLSTM_LAYER_SIZE = 256\nDROPOUT_RATE = 0.2\nEPOCHS = 50\nBATCH_SIZE = 64\nN_NEW_NOTES = 200\n\ndef main():\n input, output, mapping = getNotes(SEQUENCE_LEN, False, LOADED) # getNotes(int, bool train, bool loaded)\n test_input = [[mapping[note] for note in sequence] for sequence in input]\n\n model = rebuild_model(test_input, mapping)\n test_output = [mapping[note]for note in output]\n test_input_np = numpy.reshape(test_input, (len(test_input), len(test_input[0]), 1))\n test_output = to_categorical(test_output, num_classes = len(mapping))\n model.evaluate(test_input_np, test_output, batch_size=BATCH_SIZE)\n makeNotes(model, test_input, mapping)\n\n\n\ndef rebuild_model(test_input, mapping):\n test_input = numpy.reshape(test_input, (len(test_input), len(test_input[0]), 1))\n\n #New\n model = Sequential()\n model.add(LSTM(LSTM_LAYER_SIZE, # num nodes\n input_shape=(test_input.shape[1], test_input.shape[2]), # Since this is the first layer, we know dimentions of input\n return_sequences=True)) # creates recurrence\n model.add(LSTM(LSTM_LAYER_SIZE,\n return_sequences=True, # creates recurrence\n recurrent_dropout=DROPOUT_RATE,)) # fraction to leave out from recurrence\n\n model.add(LSTM(LSTM_LAYER_SIZE)) # multiple LSTM layers create Deep Neural Network for greater accuracy\n model.add(BatchNorm()) # normalizes inputs to neural network layers to make training faster\n model.add(Dropout(DROPOUT_RATE)) # prevents overfitting\n model.add(Dense(len(mapping))) # classification layer - output must be same dimentions as mapping\n model.add(Lambda(lambda x: x / TEMP))# adds temperature settings\n model.add(Activation('softmax')) # transforms output into a probability distribution\n\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n #load weights\n model.load_weights('%s.hdf5' %WEIGHTS_DIR)\n\n return model\n\n\ndef makeNotes(model, test_input, mapping):\n start = numpy.random.randint(0, len(test_input)-1)\n\n int_to_note = dict((mapping[note], note) for note in mapping.keys())\n initial_sequence = test_input[start]\n #output = [] we used this for error checking\n\n s = music21.stream.Stream()\n\n for i in range(N_NEW_NOTES):\n prediction_input = numpy.reshape(initial_sequence, (1, len(initial_sequence), 1))\n\n prediction = model.predict(prediction_input, verbose=0)\n index = numpy.random.choice(numpy.arange(len(prediction[0])), p = prediction[0]) # samples from distribution\n\n result = int_to_note[index]\n\n #add the note to output stream\n if \".\" in result:\n note = music21.chord.Chord(result.split(\".\"))\n #print(\"created_chord\")\n elif (result == 'R'):\n note = music21.note.Rest()\n else:\n note = music21.note.Note(result)\n #print(\"created_note\")\n s.append(note)\n #output.append(result)\n\n initial_sequence.append(index)\n initial_sequence = initial_sequence[1:len(initial_sequence)]\n s.write('midi', fp=\"%s.mid\" %OUTPUT_DIR)\n #print(output)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
banditbandito/TopographicVAE | [
"690d845123da633eb2a997e1df76766446e50307"
] | [
"tvae/experiments/bubbles_dsprites.py"
] | [
"import os\nimport torch\nfrom torch import optim\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom tvae.data.dsprites import get_dataloader\nfrom tvae.containers.tvae import TVAE\nfrom tvae.models.mlp import MLP_Encoder, MLP_Decoder\nfrom tvae.containers.encoder import Gaussian_Encoder\nfrom tvae.containers.decoder import Bernoulli_Decoder\nfrom tvae.containers.grouper import Stationary_Capsules_1d\nfrom tvae.utils.logging import configure_logging, get_dirs\nfrom tvae.utils.train_loops import train_epoch_dsprites\n\ndef create_model(n_caps, cap_dim, mu_init, n_transforms, k_time, k_space):\n s_dim = n_caps * cap_dim\n group_kernel = (k_time, k_space, 1)\n z_encoder = Gaussian_Encoder(MLP_Encoder(s_dim=s_dim, n_cin=1, n_hw=64),\n loc=0.0, scale=1.0)\n\n u_encoder = Gaussian_Encoder(MLP_Encoder(s_dim=s_dim, n_cin=1, n_hw=64), \n loc=0.0, scale=1.0)\n\n decoder = Bernoulli_Decoder(MLP_Decoder(s_dim=s_dim, n_cout=1, n_hw=64))\n\n grouper = Stationary_Capsules_1d(\n nn.ConvTranspose3d(in_channels=1, out_channels=1,\n kernel_size=group_kernel, \n padding=(2*(group_kernel[0] // 2)-(k_time+1)%2, \n 2*(group_kernel[1] // 2)-(k_space+1)%2,\n 2*(group_kernel[2] // 2)),\n stride=(1,1,1), padding_mode='zeros', bias=False),\n lambda x: F.pad(x, (group_kernel[2] // 2, group_kernel[2] // 2,\n group_kernel[1] // 2+(k_space+1)%2, group_kernel[1] // 2,\n group_kernel[0] // 2+(k_time+1)%2, group_kernel[0] // 2), \n mode='circular'),\n n_caps=n_caps, cap_dim=cap_dim, n_transforms=n_transforms,\n mu_init=mu_init)\n \n return TVAE(z_encoder, u_encoder, decoder, grouper)\n\n\ndef main():\n config = {\n 'wandb_on': False,\n 'lr': 1e-4,\n 'momentum': 0.9,\n 'batch_size': 8,\n 'max_epochs': 100,\n 'eval_epochs': 5,\n 'dataset': 'DSprites',\n 'seq_transforms': ['posX', 'posY', 'orientation', 'scale'],\n 'avail_transforms': ['posX', 'posY', 'orientation', 'scale', 'shape'],\n 'seed': 1,\n 'n_caps': 15,\n 'cap_dim': 15,\n 'n_transforms': 15,\n 'max_transform_len': 30,\n 'mu_init': 30.0,\n 'k_time': 5,\n 'k_space': 5,\n 'n_is_samples': 10\n }\n\n name = 'Bubbles_dSprites_L=1/6_K=5'\n\n config['savedir'], config['data_dir'], config['wandb_dir'] = get_dirs()\n\n savepath = os.path.join(config['savedir'], name)\n\n train_loader = get_dataloader(dir=config['data_dir'], \n seq_transforms=config['seq_transforms'],\n avail_transforms=config['avail_transforms'],\n seq_len=config['n_transforms']-1, \n max_transform_len=config['max_transform_len'],\n batch_size=config['batch_size'])\n\n model = create_model(n_caps=config['n_caps'], cap_dim=config['cap_dim'], mu_init=config['mu_init'], \n n_transforms=config['n_transforms'], k_time=config['k_time'], k_space=config['k_space'])\n model.to('cuda')\n\n log, checkpoint_path = configure_logging(config, name, model)\n # model.load_state_dict(torch.load(checkpoint_path))\n\n optimizer = optim.SGD(model.parameters(), \n lr=config['lr'],\n momentum=config['momentum'])\n scheduler = StepLR(optimizer, step_size=1, gamma=1.0)\n\n for e in range(config['max_epochs']):\n log('Epoch', e)\n\n total_loss, total_neg_logpx_z, total_kl, total_eq_loss, total_dis_corr, num_batches = train_epoch_dsprites(model, optimizer, \n train_loader, log,\n savepath, e, eval_batches=3000,\n plot_weights=False,\n plot_fullcaptrav=True,\n compute_capcorr=True,\n wandb_on=config['wandb_on'])\n\n log(\"Epoch Avg Loss\", total_loss / num_batches)\n log(\"Epoch Avg -LogP(x|z)\", total_neg_logpx_z / num_batches)\n log(\"Epoch Avg KL\", total_kl / num_batches)\n log(\"Epoch Avg EQ Loss\", total_eq_loss / num_batches)\n \n for idx, t in enumerate(['shape', 'scale', 'orientation', 'posX', 'posY']):\n log(\"Epoch Avg Cap Corr: {}\".format(t), total_dis_corr[idx])\n\n scheduler.step()\n torch.save(model.state_dict(), checkpoint_path)\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.nn.ConvTranspose3d",
"torch.nn.functional.pad",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hokmund/mmdetection | [
"7d49b7b535456929333d71a543159a00d7ae2faf"
] | [
"mmdet/models/detectors/two_stage.py"
] | [
"import torch\n\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\[email protected]_module()\nclass TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(TwoStageDetector, self).__init__(init_cfg)\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n roi_head.pretrained = pretrained\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n\n # get origin input shape to onnx dynamic input shape\n if torch.onnx.is_in_onnx_export():\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n"
] | [
[
"torch.randn",
"torch.onnx.is_in_onnx_export",
"torch._shape_as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EricGustin/SmartRedis | [
"42c42fb4312c0822a58e3c869f60b7e51d4bdd05"
] | [
"tests/python/test_errors.py"
] | [
"import os\n\nimport numpy as np\nimport pytest\nfrom smartredis import Client, Dataset\nfrom smartredis.error import RedisConnectionError, RedisReplyError\n\n\ndef test_SSDB_not_set(use_cluster):\n ssdb = os.environ[\"SSDB\"]\n del os.environ[\"SSDB\"]\n with pytest.raises(RedisConnectionError):\n c = Client(None, use_cluster)\n os.environ[\"SSDB\"] = ssdb\n\n\ndef test_bad_SSDB(use_cluster):\n ssdb = os.environ[\"SSDB\"]\n del os.environ[\"SSDB\"]\n os.environ[\"SSDB\"] = \"not-an-address:6379;\"\n with pytest.raises(RedisConnectionError):\n c = Client(None, use_cluster)\n os.environ[\"SSDB\"] = ssdb\n\n\ndef test_bad_get_tensor(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(RedisReplyError):\n c.get_tensor(\"not-a-key\")\n\n\ndef test_bad_get_dataset(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(RedisReplyError):\n c.get_dataset(\"not-a-key\")\n\n\ndef test_bad_type_put_dataset(use_cluster):\n c = Client(None, use_cluster)\n array = np.array([1, 2, 3, 4])\n with pytest.raises(TypeError):\n c.put_dataset(array)\n\n\ndef test_bad_type_put_tensor(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(TypeError):\n c.put_tensor(\"key\", [1, 2, 3, 4])\n\n\ndef test_unsupported_type_put_tensor(use_cluster):\n \"\"\"test an unsupported numpy type\"\"\"\n c = Client(None, use_cluster)\n data = np.array([1, 2, 3, 4]).astype(np.uint64)\n with pytest.raises(TypeError):\n c.put_tensor(\"key\", data)\n\n\ndef test_bad_type_add_tensor(use_cluster):\n d = Dataset(\"test-dataset\")\n with pytest.raises(TypeError):\n d.add_tensor(\"test-tensor\", [1, 2, 3])\n\n\ndef test_bad_script_file(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(FileNotFoundError):\n c.set_script_from_file(\"key\", \"not-a-file\")\n\n\ndef test_bad_callable(use_cluster):\n \"\"\"user provides none callable function to set_function\"\"\"\n c = Client(None, use_cluster)\n with pytest.raises(TypeError):\n c.set_function(\"key\", \"not-a-file\")\n\n\ndef test_bad_device(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(TypeError):\n c.set_script(\"key\", \"some_script\", device=\"not-a-gpu\")\n\n\ndef test_get_non_existant_script(use_cluster):\n c = Client(None, use_cluster)\n with pytest.raises(RedisReplyError):\n script = c.get_script(\"not-a-script\")\n\n\ndef test_bad_function_execution(use_cluster):\n \"\"\"Error raised inside function\"\"\"\n\n c = Client(None, use_cluster)\n c.set_function(\"bad-function\", bad_function)\n data = np.array([1, 2, 3, 4])\n c.put_tensor(\"bad-func-tensor\", data)\n with pytest.raises(RedisReplyError):\n c.run_script(\"bad-function\", \"bad_function\", [\"bad-func-tensor\"], [\"output\"])\n\n\ndef test_missing_script_function(use_cluster):\n \"\"\"User requests to run a function not in the script\"\"\"\n\n c = Client(None, use_cluster)\n c.set_function(\"bad-function\", bad_function)\n with pytest.raises(RedisReplyError):\n c.run_script(\n \"bad-function\", \"not-a-function-in-script\", [\"bad-func-tensor\"], [\"output\"]\n )\n\n\ndef test_wrong_model_name(mock_data, mock_model, use_cluster):\n \"\"\"User requests to run a model that is not there\"\"\"\n\n data = mock_data.create_data(1)\n\n model = mock_model.create_torch_cnn()\n c = Client(None, use_cluster)\n c.set_model(\"simple_cnn\", model, \"TORCH\", \"CPU\")\n c.put_tensor(\"input\", data[0])\n with pytest.raises(RedisReplyError):\n c.run_model(\"wrong_cnn\", [\"input\"], [\"output\"])\n\n\ndef test_wrong_model_name_from_file(mock_data, mock_model, use_cluster):\n \"\"\"User requests to run a model that is not there\n that was loaded from file.\"\"\"\n\n try:\n data = mock_data.create_data(1)\n mock_model.create_torch_cnn(filepath=\"./torch_cnn.pt\")\n c = Client(None, use_cluster)\n c.set_model_from_file(\"simple_cnn_from_file\", \"./torch_cnn.pt\", \"TORCH\", \"CPU\")\n c.put_tensor(\"input\", data[0])\n with pytest.raises(RedisReplyError):\n c.run_model(\"wrong_cnn\", [\"input\"], [\"output\"])\n finally:\n os.remove(\"torch_cnn.pt\")\n\n\ndef test_set_data_wrong_type():\n \"\"\"A call to Dataset.set_data is made with the wrong\n type (i.e. not Pydataset).\n \"\"\"\n d = Dataset(\"test_dataset\")\n input_param = Dataset(\"wrong_input_param\")\n with pytest.raises(TypeError):\n d.set_data(input_param)\n\n\ndef test_from_pybind_wrong_type():\n \"\"\"A call to Dataset.set_data is made with the wrong\n type (i.e. not Pydataset).\n \"\"\"\n input_param = Dataset(\"wrong_input_param\")\n with pytest.raises(TypeError):\n d = Dataset.from_pybind(input_param)\n\n\ndef bad_function(data):\n \"\"\"Bad function which only raises an exception\"\"\"\n return False\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jngaravitoc/nba | [
"d2a64a69fd743e066fe3e0bad9c9bc109763ff97"
] | [
"nba/orbits/nbody_orbit_extractor.py"
] | [
"\"\"\"\nScript to extract a particle orbit from a N-body halo simulation\n\n\"\"\"\n\nimport numpy as np\n#import pygadgetreader \nimport sys\nfrom gadget_reader import read_snap \n\n\n# This function is not used in this script, but it is helpful to build the particle ids file. \n\ndef get_particle_id(pos, vel, ids, r_lim, dr, v_lim, dv):\n\t\"\"\"\n\n\tSelects particle ids based on positions and velocities. \n\tThis is useful to select particles on specific orbits. \n\n\t\"\"\"\n\tr = np.sqrt(np.sum(pos**2, axis=1))\n\tv = np.sqrt(np.sum(vel**2, axis=1))\n\tindex = np.where((r<r_lim+dr) & (r>r_lim-dr) & (v<v_lim+dv) & (v>v_lim-dv))\n\treturn ids[index]\n\n\ndef get_snap_ids(snap, ids_p, i):\n\t\"\"\"\n\n\tSelect ids of particles in a snapshot\n\n\t\"\"\"\n\n #os = read_snap(snap+'_{:0>3d}'.format(i), 'pos', 'dm')\n #ds = read_snap(snap+'_{:0>3d}'.format(i), 'pid', 'dm')\n\t# Read snapshot\n\tpos = read_snap(snap+'_{:0>3d}.hdf5'.format(i), 'PartType1', 'Coordinates')\n\tvel = read_snap(snap+'_{:0>3d}.hdf5'.format(i), 'PartType1', 'Velocities')\n\tids = read_snap(snap+'_{:0>3d}.hdf5'.format(i), 'PartType1', 'ParticleIDs')\n\t\n\t# select ids \n\tsort_ids = np.argsort(np.ascontiguousarray(ids))\n\tparticles = np.in1d(np.ascontiguousarray(ids)[sort_ids], ids_p)\n\tprint(ids_p, ids[particles], np.linspace(0, int(len(ids)-1), int(len(ids)))[particles])\t\n\t#print(len(particles))\n\tpos_orbit = np.ascontiguousarray(pos)[sort_ids][particles]\n\tvel_orbit = np.ascontiguousarray(vel)[sort_ids][particles]\n\tassert len(pos_orbit) == len(ids_p), \"something wrong with selecting particles in this snapshot\" \n\treturn pos_orbit, vel_orbit\n\ndef extract_orbits(snap, snap_i, snap_f, ids_p):\n\t\"\"\"\n\tExtract particles orbits by selecting the particle ids in different snapshots\n\n\t\"\"\"\n\tN_snaps = snap_f - snap_i +1 \n\tN_part = len(ids_p)\n\tpos_orbits = np.zeros((N_snaps, N_part, 3))\n\tvel_orbits = np.zeros((N_snaps, N_part, 3))\n\tj=0\n\tfor i in range(snap_i, snap_f+1):\n\t\tpos_orbits[j], vel_orbits[j] = get_snap_ids(snap, ids_p, i)\n\t\tj+=1\n\n\treturn pos_orbits, vel_orbits\n \ndef get_orbits(out_name, snapname, init_snap, final_snap, ids_particles):\n\t\"\"\"\n\tWrite all the orbtis. One file per particle orbit\n\t\n\treturn\n\t0\n\t\"\"\"\n\tN_part = len(ids_particles)\n\tall_pos, all_vel = extract_orbits(snapname, init_snap, final_snap, ids_particles)\n\tassert N_part < 1000, \"Currently only supporting up to 1000 orbits, if more are needed edit this function\"\n\n\tfor i in range(N_part):\n\t\tnp.savetxt(out_name+\"_particle_{:0>3d}.txt\".format(i), np.array([all_pos[:,i,0], all_pos[:,i,1], all_pos[:,i,2],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tall_vel[:,i,0], all_vel[:,i,1], all_vel[:,i,2]]).T)\n\treturn 0 \n\n \nif __name__ == \"__main__\":\n\tids_file = sys.argv[1]\n\tsnaps_file = sys.argv[2]\n\tout_path = sys.argv[3]\n\tout_name = sys.argv[4]\n\tsnap_i = int(sys.argv[5])\n\tsnap_f = int(sys.argv[6])\n\tids_all = np.loadtxt(ids_file)\t\n\tprint(\"Done loading particle IDs\")\n\tget_orbits(out_path+out_name, snaps_file, snap_i, snap_f, ids_all)\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.zeros",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jorgenwh/npstructures | [
"0eab421f1ddde159b2450e3e8b3cadbdaad00ba9"
] | [
"profiling/hashtable.py"
] | [
"from npstructures.hashtable import HashTable, IHashTable, Counter\nimport timeit\nimport cProfile\nimport pstats\nimport numpy as np\nnp.random.seed(10000)\ndef get_random_data(n_keys, key_space, n_samples):\n keys = np.cumsum(np.random.randint(1, key_space, n_keys))\n samples = np.random.choice(keys, size=n_samples)\n return keys, samples\n\n\nkeys, samples = get_random_data(80000000, 7, 5000000)\n# table = HashTable(keys, np.arange(keys.size), keys.size*3-1)\ncounter = Counter(keys, keys.size*3-1)\np_stats_name = \"profiling/.hash_table.txt\"\ncProfile.run(\"counter.count(samples)\", p_stats_name)\nstats = pstats.Stats(p_stats_name)\nstats.sort_stats(\"cumulative\")\nstats.print_stats()\n\n\nfor cls in (HashTable,):\n print(cls.__name__)\n table = cls(keys, np.arange(keys.size), keys.size*3-1)\n table[samples]\n print(\"running\")\n print(timeit.repeat(\"table[samples]\", globals=globals(), number=1, repeat=1))\n"
] | [
[
"numpy.arange",
"numpy.random.randint",
"numpy.random.seed",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
awen1988/yry | [
"b65ccd7062d60f605fc978a87e060d0015cf1d4c"
] | [
"core/triangulation.py"
] | [
"import cv2\nimport numpy as np\n\n\ndef draw_point(img, p, color):\n cv2.circle(img, (p[0], p[1]), 2, color, cv2.FILLED, cv2.LINE_AA, 0)\n\n\ndef rect_contains(rect, point):\n if point[0] < rect[0]:\n return False\n elif point[1] < rect[1]:\n return False\n elif point[0] > rect[2]:\n return False\n elif point[1] > rect[3]:\n return False\n return True\n\n\ndef measure_triangle(image, points):\n rect = (0, 0, image.shape[1], image.shape[0])\n sub_div = cv2.Subdiv2D(rect)\n\n for p in points:\n sub_div.insert(p)\n\n triangle_list = sub_div.getTriangleList()\n\n triangle = []\n pt = []\n\n for t in triangle_list:\n pt.append((t[0], t[1]))\n pt.append((t[2], t[3]))\n pt.append((t[4], t[5]))\n\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n\n if rect_contains(rect, pt1) and rect_contains(rect, pt2) and rect_contains(rect, pt3):\n ind = []\n for j in range(0, 3):\n for k in range(0, len(points)):\n if abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0:\n ind.append(k)\n if len(ind) == 3:\n triangle.append((ind[0], ind[1], ind[2]))\n\n pt = []\n\n return triangle\n\n\ndef morph_triangle(src, dst, img, t_src, t_dst, t, alpha):\n r1 = cv2.boundingRect(np.float32([t_src]))\n r2 = cv2.boundingRect(np.float32([t_dst]))\n r = cv2.boundingRect(np.float32([t]))\n\n t1_rect = []\n t2_rect = []\n t_rect = []\n\n for i in range(0, 3):\n t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))\n t1_rect.append(((t_src[i][0] - r1[0]), (t_src[i][1] - r1[1])))\n t2_rect.append(((t_dst[i][0] - r2[0]), (t_dst[i][1] - r2[1])))\n\n mask = np.zeros((r[3], r[2], 3), dtype=np.float32)\n cv2.fillConvexPoly(mask, np.int32(t_rect), (1.0, 1.0, 1.0), 16, 0)\n\n img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n img2_rect = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]\n\n size = (r[2], r[3])\n\n warp_img1 = affine_transform(img1_rect, t1_rect, t_rect, size)\n warp_img2 = affine_transform(img2_rect, t2_rect, t_rect, size)\n\n img_rect = (1.0 - alpha) * warp_img1 + alpha * warp_img2\n\n img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + img_rect * mask\n\n\ndef affine_triangle(src, dst, t_src, t_dst):\n r1 = cv2.boundingRect(np.float32([t_src]))\n r2 = cv2.boundingRect(np.float32([t_dst]))\n\n t1_rect = []\n t2_rect = []\n t2_rect_int = []\n\n for i in range(0, 3):\n t1_rect.append((t_src[i][0] - r1[0], t_src[i][1] - r1[1]))\n t2_rect.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))\n t2_rect_int.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))\n\n mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)\n cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0), 16, 0)\n\n img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n\n size = (r2[2], r2[3])\n\n img2_rect = affine_transform(img1_rect, t1_rect, t2_rect, size)\n img2_rect = img2_rect * mask\n\n dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (\n (1.0, 1.0, 1.0) - mask)\n\n dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect\n\n\ndef affine_transform(src, src_tri, dst_tri, size):\n warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))\n\n dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]),\n None,\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_REFLECT_101)\n\n return dst\n"
] | [
[
"numpy.int32",
"numpy.zeros",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkc-morita/secl | [
"d0156cea4fd95ea5071126dbf076a6da69752a37"
] | [
"modules/sparsemax.py"
] | [
"\"\"\"Sparsemax activation function.\n\nPytorch implementation of Sparsemax function from:\n-- \"From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label Classification\"\n-- André F. T. Martins, Ramón Fernandez Astudillo (http://arxiv.org/abs/1602.02068)\n\nModified implementation by Kris Korrel.\nhttps://github.com/KrisKorrel/sparsemax-pytorch\n\"\"\"\n\nfrom __future__ import division\n\nimport torch\nimport torch.nn as nn\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Sparsemax(nn.Module):\n \"\"\"Sparsemax function.\"\"\"\n\n def __init__(self, dim=None):\n \"\"\"Initialize sparsemax activation\n \n Args:\n dim (int, optional): The dimension over which to apply the sparsemax function.\n \"\"\"\n super(Sparsemax, self).__init__()\n\n self.dim = -1 if dim is None else dim\n\n def forward(self, input):\n \"\"\"Forward function.\n\n Args:\n input (torch.Tensor): Input tensor. First dimension should be the batch size\n\n Returns:\n torch.Tensor: [batch_size x number_of_logits] Output tensor\n\n \"\"\"\n # Sparsemax currently only handles 2-dim tensors,\n # so we reshape and reshape back after sparsemax\n original_size = input.size()\n input = input.view(-1, input.size(self.dim))\n \n dim = 1\n number_of_logits = input.size(dim)\n\n # Translate input by max for numerical stability\n input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input)\n\n # Sort input in descending order.\n # (NOTE: Can be replaced with linear time selection method described here:\n # http://stanford.edu/~jduchi/projects/DuchiShSiCh08.html)\n zs = torch.sort(input=input, dim=dim, descending=True)[0]\n range = torch.arange(start=1, end=number_of_logits+1, device=input.device).view(1, -1)\n range = range.expand_as(zs).type(input.type())\n\n # Determine sparsity of projection\n bound = 1 + range * zs\n cumulative_sum_zs = torch.cumsum(zs, dim)\n is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())\n k = torch.max(is_gt * range, dim, keepdim=True)[0]\n\n # Compute threshold function\n zs_sparse = is_gt * zs\n\n # Compute taus\n taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k\n taus = taus.expand_as(input)\n\n # Sparsemax\n self.output = torch.max(torch.zeros_like(input), input - taus)\n\n output = self.output.view(original_size)\n\n return output\n\n def backward(self, grad_output):\n \"\"\"Backward function.\"\"\"\n dim = 1\n\n nonzeros = torch.ne(self.output, 0)\n sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim)\n self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))\n\n return self.grad_input"
] | [
[
"torch.max",
"torch.zeros_like",
"torch.sum",
"torch.sort",
"torch.arange",
"torch.gt",
"torch.cumsum",
"torch.ne"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
J-Massey/lotus_docs | [
"a855b7d5e44872dc2969b69a76bb8d1b57aaf2a0"
] | [
"Examples/sphere/postproc/stat.py"
] | [
"#!/usr/bin/env python3\n# ----------------------------------------- #\n# stat.py\n# ----------------------------------------- #\n#\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n#\n# read data and drop unwanted rows and columns\ntry:\n df = pd.read_csv('fort.9',delim_whitespace = True,\n names=[\"time\",\"CFL\",\"drag\",\"lift\",\"side\"])\nexcept FileNotFoundError:\n exit('stat: fort.9 not found')\ndf.drop(df.index[:3], inplace=True)\n\ntry:\n mg = pd.read_csv('fort.8',delim_whitespace = True,\n names=[\"itr\",\"res0\",\"res\",\"inf\"])[2:]\nexcept FileNotFoundError:\n exit('stat: fort.8 not found')\n#\n# -- plot PDF pages\ndef str_rnd(num,d=4): return str(round(num,d))\n\ndef plot_hist(pdf,name,label):\n ax = df.plot(x='time',y=name,figsize=(8,4))\n plt.xlabel(r'$t/T$', fontsize=12)\n plt.ylabel(label, fontsize=12)\n mean,mad = df[name].mean(), 1.5748*df[name].mad()\n x1,x2,y1,y2 = plt.axis()\n mx,mn = min(y2,mean+3*mad),max(y1,mean-3*mad)\n plt.ylim([mn,mx])\n txt = 'mean='+str_rnd(mean)+', mad='+str_rnd(mad)\n plt.text(0.5,0.01,txt,transform=ax.transAxes)\n pdf.savefig()\n plt.close()\n\nwith PdfPages('history.pdf') as pdf:\n plot_hist(pdf,name='drag',label=r'$C_{Xp}$')\n plot_hist(pdf,name='lift',label=r'$C_{Yp}$')\n plot_hist(pdf,name='side',label=r'$C_{Zp}$')\n plot_hist(pdf,name='CFL',label=r'$\\frac{\\Delta t U}{\\Delta x}$')\n\n mg.plot(y=['res0','res','inf'],figsize=(8,4))\n plt.yscale('log')\n pdf.savefig()\n\n mg.plot(y='itr',figsize=(8,4))\n pdf.savefig()\n"
] | [
[
"matplotlib.backends.backend_pdf.PdfPages",
"pandas.read_csv",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Efekurdoglu/T-Rex-Runner-via-OpenCV-and-CNN | [
"b9ed78043632fb3f64677c96644ba2772db6a6b7"
] | [
"func.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 14 01:09:46 2021\r\n\r\n@author: Efe Kurdoğlu\r\n\"\"\"\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nfrom PIL import Image\r\nfrom mss import mss\r\n\r\ndef onehot_labels(values):\r\n label_encoder = LabelEncoder()\r\n integer_encoded = label_encoder.fit_transform(values)\r\n onehot_encoder = OneHotEncoder(sparse = False)\r\n integer_encoded = integer_encoded.reshape(len(integer_encoded),1)\r\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n return onehot_encoded\r\n\r\ndef frames(id, key): # key is the buttons from the keyboard\r\n global i\r\n \r\n i += 1\r\n print(\"{}: {}\".format(key, i)) # key: up, down, left, right arrow\r\n mon = {\"top\":385, \"left\":520, \"width\":250, \"height\":100} # i: for how many times we pressed the button\r\n img = mss().grab(mon) # get the ROC with the size of specified in mon\r\n im = Image.frombytes(\"RGB\", img.size, img.rgb)\r\n im.save(\"./images/{}_{}_{}.png\".format(key, id, i))\r\n "
] | [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.OneHotEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juntang-zhuang/pytorch-image-models | [
"fb896c0b264d5a48dea62e5001405574170e4cbb"
] | [
"timm/models/pit.py"
] | [
"\"\"\" Pooling-based Vision Transformer (PiT) in PyTorch\n\nA PyTorch implement of Pooling-based Vision Transformers as described in\n'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302\n\nThis code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.\n\nModifications for timm by / Copyright 2020 Ross Wightman\n\"\"\"\n# PiT\n# Copyright 2021-present NAVER Corp.\n# Apache License v2.0\n\nimport math\nimport re\nfrom copy import deepcopy\nfrom functools import partial\nfrom typing import Tuple\n\nimport torch\nfrom torch import nn\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg, overlay_external_default_cfg\nfrom .layers import trunc_normal_, to_2tuple\nfrom .registry import register_model\nfrom .vision_transformer import Block\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'patch_embed.conv', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # deit models (FB weights)\n 'pit_ti_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),\n 'pit_xs_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),\n 'pit_s_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),\n 'pit_b_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),\n 'pit_ti_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',\n classifier=('head', 'head_dist')),\n 'pit_xs_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',\n classifier=('head', 'head_dist')),\n 'pit_s_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',\n classifier=('head', 'head_dist')),\n 'pit_b_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',\n classifier=('head', 'head_dist')),\n}\n\n\nclass SequentialTuple(nn.Sequential):\n \"\"\" This module exists to work around torchscript typing issues list -> list\"\"\"\n def __init__(self, *args):\n super(SequentialTuple, self).__init__(*args)\n\n def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n for module in self:\n x = module(x)\n return x\n\n\nclass Transformer(nn.Module):\n def __init__(\n self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):\n super(Transformer, self).__init__()\n self.layers = nn.ModuleList([])\n embed_dim = base_dim * heads\n\n self.blocks = nn.Sequential(*[\n Block(\n dim=embed_dim,\n num_heads=heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=True,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=drop_path_prob[i],\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n for i in range(depth)])\n\n self.pool = pool\n\n def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n x, cls_tokens = x\n B, C, H, W = x.shape\n token_length = cls_tokens.shape[1]\n\n x = x.flatten(2).transpose(1, 2)\n x = torch.cat((cls_tokens, x), dim=1)\n\n x = self.blocks(x)\n\n cls_tokens = x[:, :token_length]\n x = x[:, token_length:]\n x = x.transpose(1, 2).reshape(B, C, H, W)\n\n if self.pool is not None:\n x, cls_tokens = self.pool(x, cls_tokens)\n return x, cls_tokens\n\n\nclass ConvHeadPooling(nn.Module):\n def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):\n super(ConvHeadPooling, self).__init__()\n\n self.conv = nn.Conv2d(\n in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,\n padding_mode=padding_mode, groups=in_feature)\n self.fc = nn.Linear(in_feature, out_feature)\n\n def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:\n\n x = self.conv(x)\n cls_token = self.fc(cls_token)\n\n return x, cls_token\n\n\nclass ConvEmbedding(nn.Module):\n def __init__(self, in_channels, out_channels, patch_size, stride, padding):\n super(ConvEmbedding, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass PoolingVisionTransformer(nn.Module):\n \"\"\" Pooling-based Vision Transformer\n\n A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'\n - https://arxiv.org/abs/2103.16302\n \"\"\"\n def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,\n mlp_ratio, num_classes=1000, in_chans=3, distilled=False,\n attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):\n super(PoolingVisionTransformer, self).__init__()\n\n padding = 0\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)\n width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)\n\n self.base_dims = base_dims\n self.heads = heads\n self.num_classes = num_classes\n self.num_tokens = 2 if distilled else 1\n\n self.patch_size = patch_size\n self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))\n self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)\n\n self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n transformers = []\n # stochastic depth decay rule\n dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]\n for stage in range(len(depth)):\n pool = None\n if stage < len(heads) - 1:\n pool = ConvHeadPooling(\n base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)\n transformers += [Transformer(\n base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,\n drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])\n ]\n self.transformers = SequentialTuple(*transformers)\n self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)\n self.embed_dim = base_dims[-1] * heads[-1]\n\n # Classifier head\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) \\\n if num_classes > 0 and distilled else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) \\\n if num_classes > 0 and self.num_tokens == 2 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n x = self.pos_drop(x + self.pos_embed)\n cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)\n x, cls_tokens = self.transformers((x, cls_tokens))\n cls_tokens = self.norm(cls_tokens)\n return cls_tokens\n\n def forward(self, x):\n x = self.forward_features(x)\n x_cls = self.head(x[:, 0])\n if self.num_tokens > 1:\n x_dist = self.head_dist(x[:, 1])\n if self.training and not torch.jit.is_scripting():\n return x_cls, x_dist\n else:\n return (x_cls + x_dist) / 2\n else:\n return x_cls\n\n\ndef checkpoint_filter_fn(state_dict, model):\n \"\"\" preprocess checkpoints \"\"\"\n out_dict = {}\n p_blocks = re.compile(r'pools\\.(\\d)\\.')\n for k, v in state_dict.items():\n # FIXME need to update resize for PiT impl\n # if k == 'pos_embed' and v.shape != model.pos_embed.shape:\n # # To resize pos embedding when using model at different size from pretrained weights\n # v = resize_pos_embed(v, model.pos_embed)\n k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k)\n out_dict[k] = v\n return out_dict\n\n\ndef _create_pit(variant, pretrained=False, **kwargs):\n default_cfg = deepcopy(default_cfgs[variant])\n overlay_external_default_cfg(default_cfg, kwargs)\n default_num_classes = default_cfg['num_classes']\n default_img_size = default_cfg['input_size'][-2:]\n img_size = kwargs.pop('img_size', default_img_size)\n num_classes = kwargs.pop('num_classes', default_num_classes)\n\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n model = build_model_with_cfg(\n PoolingVisionTransformer, variant, pretrained,\n default_cfg=default_cfg,\n img_size=img_size,\n num_classes=num_classes,\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs)\n\n return model\n\n\n@register_model\ndef pit_b_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=14,\n stride=7,\n base_dims=[64, 64, 64],\n depth=[3, 6, 4],\n heads=[4, 8, 16],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_b_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_s_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[3, 6, 12],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_s_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_xs_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_xs_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_ti_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[32, 32, 32],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_ti_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_b_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=14,\n stride=7,\n base_dims=[64, 64, 64],\n depth=[3, 6, 4],\n heads=[4, 8, 16],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_s_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[3, 6, 12],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_xs_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_ti_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[32, 32, 32],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)"
] | [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.jit.is_scripting"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lumstery/maskrcnn | [
"dd5008fcfdbaf46a61167214759b90dce0a3efd6"
] | [
"model.py"
] | [
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implemenetation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport random\nimport math\nimport datetime\nimport itertools\nimport json\nimport re\nimport logging\nfrom collections import OrderedDict\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.initializers as KI\nimport keras.engine as KE\nimport keras.models as KM\nimport utils\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\"))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Batch Normalization class. Subclasses the Keras BN class and\n hardcodes training=False so the BN layer doesn't update\n during training.\n\n Batch normalization has a negative effect on training if batches are small\n so we disable it here.\n \"\"\"\n\n def call(self, inputs, training=None):\n return super(self.__class__, self).call(inputs, training=False)\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False):\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(axis=3, name='bn_conv1')(x)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, 4] where each row is y1, x1, y2, x2\n deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, 4] each row is y1, x1, y2, x2\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split corners\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, anchors,\n config=None, **kwargs):\n \"\"\"\n anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates\n \"\"\"\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n self.anchors = anchors.astype(np.float32)\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Base anchors\n anchors = self.anchors\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = min(6000, self.anchors.shape[0])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]\n height, width = self.config.IMAGE_SHAPE[:2]\n window = np.array([0, 0, height, width]).astype(np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Normalize dimensions to range of 0 to 1.\n normalized_boxes = boxes / np.array([[height, width, height, width]])\n\n # Non-max suppression\n def nms(normalized_boxes, scores):\n indices = tf.image.non_max_suppression(\n normalized_boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(normalized_boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([normalized_boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementatin of Log2. TF doesn't have a native implemenation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [height, width] of the output pooled regions. Usually [7, 7]\n - image_shape: [height, width, channels]. Shape of input image in pixels\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, image_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n self.image_shape = tuple(image_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[1:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(\n self.image_shape[0] * self.image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indicies for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n pooled = tf.expand_dims(pooled, 0)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n Class-specific bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine postive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI corrdinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinements.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, 1), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef clip_to_window(window, boxes):\n \"\"\"\n window: (y1, x1, y2, x2). The window in the image we want to clip to.\n boxes: [N, (y1, x1, y2, x2)]\n \"\"\"\n boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])\n boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])\n boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])\n boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])\n return boxes\n\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where\n coordinates are in image domain.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Convert coordiates to image domain\n # TODO: better to keep them normalized until later\n height, width = config.IMAGE_SHAPE[:2]\n refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n # Round and cast to int since we're deadling with pixels now\n refined_rois = tf.to_int32(tf.rint(refined_rois))\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.to_float(tf.gather(pre_nms_rois, ixs)),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indicies\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are in image domain.\n detections = tf.concat([\n tf.to_float(tf.gather(refined_rois, keep)),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are in image domain\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Run detection refinement graph on each item in the batch\n _, _, window, _ = parse_image_meta_graph(image_meta)\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n# Region Proposal Network (RPN)\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the featuremap\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location, depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps,\n image_shape, pool_size, num_classes):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from diffent layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_shape: [height, width, depth]\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n\n Returns:\n logits: [N, NUM_CLASSES] classifier logits (before softmax)\n probs: [N, NUM_CLASSES] classifier probabilities\n bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size], image_shape,\n name=\"roi_align_classifier\")([rois] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3),\n name='mrcnn_class_bn2')(x)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps,\n image_shape, pool_size, num_classes):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from diffent layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_shape: [height, width, depth]\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n\n Returns: Masks [batch, roi_count, height, width, num_classes]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size], image_shape,\n name=\"roi_align_mask\")([rois] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3),\n name='mrcnn_mask_bn1')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3),\n name='mrcnn_mask_bn2')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3),\n name='mrcnn_mask_bn3')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(axis=3),\n name='mrcnn_mask_bn4')(x)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typicallly: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Crossentropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n # TODO: use smooth_l1_loss() rather than reimplementing here\n # to reduce code duplication\n diff = K.abs(target_bbox - rpn_bbox)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indicies.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n loss = K.reshape(loss, [1, 1])\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n loss = K.reshape(loss, [1, 1])\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: If true, apply random image augmentation. Currently, only\n horizontal flipping is offered.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n shape = image.shape\n image, window, scale, padding = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n max_dim=config.IMAGE_MAX_DIM,\n padding=config.IMAGE_PADDING)\n mask = utils.resize_mask(mask, scale, padding)\n\n # Random horizontal flips.\n if augment:\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, shape, window, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Grund truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indicies of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks.\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),\n interp='nearest') / 255.0).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = scipy.misc.imresize(\n m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,\n batch_size=1, detection_targets=False):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: If True, applies image augmentation to images (currently only\n horizontal flips are supported)\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The containtes\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, size of image meta]\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n if config.USE_MINI_MASK:\n batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],\n config.MAX_GT_INSTANCES))\n else:\n batch_gt_masks = np.zeros(\n (batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\n set_session(tf.Session(config=tfconfig))\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=config.IMAGE_SHAPE.tolist(), name=\"input_image\")\n input_image_meta = KL.Input(shape=[None], name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n h, w = K.shape(input_image)[1], K.shape(input_image)[2]\n image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)\n gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n _, C2, C3, C4, C5 = resnet_graph(input_image, \"resnet101\", stage5=True)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Generate Anchors\n self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), 256)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n anchors=self.anchors,\n config=config)([rpn_class, rpn_bbox])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n _, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),\n mask=[None, None, None, None])(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates to 0-1 range.\n target_rois = KL.Lambda(lambda x: K.cast(\n x, tf.float32) / image_scale[:4])(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,\n config.POOL_SIZE, config.NUM_CLASSES)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n config.IMAGE_SHAPE,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,\n config.POOL_SIZE, config.NUM_CLASSES)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Convert boxes to normalized coordinates\n # TODO: let DetectionLayer return normalized coordinates to avoid\n # unnecessary conversions\n h, w = config.IMAGE_SHAPE[:2]\n detection_boxes = KL.Lambda(\n lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)\n\n # Create masks for detections\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n config.IMAGE_SHAPE,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES)\n\n model = KM.Model([input_image, input_image_meta],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n log_dir: The directory where events and weights are saved\n checkpoint_path: the path to the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the correspoding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exlude: list of layer names to excluce\n \"\"\"\n import h5py\n from keras.engine import topology\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n topology.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n topology.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,\n clipnorm=5.0)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n self.keras_model.add_loss(\n tf.reduce_mean(layer.output, keep_dims=True))\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(optimizer=optimizer, loss=[\n None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n self.keras_model.metrics_tensors.append(tf.reduce_mean(\n layer.output, keep_dims=True))\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainble layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_\\w+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n self.epoch = int(m.group(6)) + 1\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heaads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE,\n augment=False)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = max(self.config.BATCH_SIZE // 2, 2)\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=next(val_generator),\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matricies [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matricies:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image to fit the model expected size\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n max_dim=self.config.IMAGE_MAX_DIM,\n padding=self.config.IMAGE_PADDING)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, window,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)]\n mrcnn_mask: [N, height, width, num_classes]\n image_shape: [height, width, depth] Original size of the image before resizing\n window: [y1, x1, y2, x2] Box in the image where the real image is\n excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Compute scale and shift to translate coordinates to image domain.\n h_scale = image_shape[0] / (window[2] - window[0])\n w_scale = image_shape[1] / (window[3] - window[1])\n scale = min(h_scale, w_scale)\n shift = window[:2] # y, x\n scales = np.array([scale, scale, scale, scale])\n shifts = np.array([shift[0], shift[1], shift[0], shift[1]])\n\n # Translate bounding boxes to image domain\n boxes = np.multiply(boxes - shifts, scales).astype(np.int32)\n\n # Filter out detections with zero area. Often only happens in early\n # stages of training when the network weights are still a bit random.\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty((0,) + masks.shape[1:3])\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n # Run object detection\n detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \\\n rois, rpn_class, rpn_bbox =\\\n self.keras_model.predict([molded_images, image_metas], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Run inference\n molded_images, image_metas, windows = self.mold_inputs(images)\n # TODO: support training mode?\n # if TEST_MODE == \"training\":\n # model_in = [molded_images, image_metas,\n # target_rpn_match, target_rpn_bbox,\n # gt_boxes, gt_masks]\n # if not config.USE_RPN_ROIS:\n # model_in.append(target_rois)\n # if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n # model_in.append(1.)\n # outputs_np = kf(model_in)\n # else:\n\n model_in = [molded_images, image_metas]\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, image_shape, window, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n image_shape: [height, width, channels]\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n \"\"\"\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]\n\n\ndef mold_image(images, config):\n \"\"\"Takes RGB images with 0-255 values and subtraces\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name=None):\n \"\"\"Often boxes are represented with matricies of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n"
] | [
[
"numpy.amax",
"numpy.expand_dims",
"tensorflow.concat",
"numpy.minimum",
"tensorflow.control_dependencies",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.image.non_max_suppression",
"tensorflow.equal",
"tensorflow.image.crop_and_resize",
"numpy.concatenate",
"tensorflow.abs",
"tensorflow.map_fn",
"numpy.any",
"tensorflow.pad",
"tensorflow.where",
"tensorflow.random_shuffle",
"numpy.where",
"tensorflow.add_n",
"numpy.random.randint",
"tensorflow.boolean_mask",
"numpy.hstack",
"numpy.reshape",
"numpy.fliplr",
"numpy.arange",
"tensorflow.squeeze",
"numpy.stack",
"tensorflow.stop_gradient",
"tensorflow.ConfigProto",
"tensorflow.gather",
"numpy.copy",
"numpy.argmax",
"tensorflow.nn.top_k",
"tensorflow.Session",
"tensorflow.argmax",
"numpy.zeros",
"numpy.log",
"tensorflow.gather_nd",
"tensorflow.unique",
"tensorflow.shape",
"numpy.random.choice",
"numpy.multiply",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.sparse_tensor_to_dense",
"numpy.delete",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.split",
"tensorflow.round",
"numpy.array",
"tensorflow.size",
"numpy.sum",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.rint",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"numpy.abs",
"tensorflow.reshape",
"tensorflow.expand_dims",
"numpy.sort",
"numpy.ones",
"numpy.random.shuffle",
"tensorflow.log",
"tensorflow.sqrt",
"numpy.empty",
"tensorflow.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jamebs/ki67 | [
"2de6d6ce08cd0a90a5f0c6f50dc9ca058a6fa3d1"
] | [
"ki67/modules/markers/markers_preview.py"
] | [
"from dataclasses import dataclass\nfrom typing import Optional\n\nimport numpy as np\nfrom skimage import draw\nfrom magda.module import Module\nfrom magda.decorators import finalize, accept, produce, register\n\nfrom ki67.interfaces.slide import Slide\nfrom ki67.interfaces.markers import Markers\nfrom ki67.interfaces.image import Image\n\n\n@accept(Slide, Markers)\n@produce(Image)\n@register('MarkersPreview')\n@finalize\nclass MarkersPreview(Module.Runtime):\n \"\"\" Markers Preview \"\"\"\n\n colors = [None, (255, 0, 0), (0, 255, 0)]\n\n @dataclass(frozen=True)\n class Parameters:\n radius: Optional[int] = None\n size: Optional[int] = None\n\n def run(self, data: Module.ResultSet, **kwargs):\n slide: Slide = data.get(Slide)\n markers: Markers = data.get(Markers)\n params = self.Parameters(**self.parameters)\n\n preview = slide.image[:, :, :3].copy()\n marker_radius = params.radius or (np.max(preview.shape) // 250)\n marker_size = params.size or (np.max(preview.shape) // 650)\n\n for _, marker in markers.markers.iterrows():\n for i in range(marker_size):\n yy, xx = draw.circle_perimeter(\n marker.y,\n marker.x,\n marker_radius - i,\n shape=preview.shape,\n )\n preview[yy, xx] = self.colors[marker.type]\n return Image(uid=slide.uid, data=preview)\n"
] | [
[
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fengjixuchui/EmbeddedSystem | [
"972a8378760c40ab513523bd05abffe58e367039"
] | [
"MachineLearning/project/07_minist/script_reference/resize_image_and_to_array.py"
] | [
"import sys\nfrom PIL import Image, ImageOps\nimport numpy as np\n\nnp.set_printoptions(threshold=np.inf)\n\ndef resize_image(image, _width=32, _height=32):\n new_image = Image.open(image)\n new_image = ImageOps.fit(new_image , (_width, _height), Image.ANTIALIAS)\n new_image_rgb = new_image.convert('RGB')\n return np.asarray(new_image_rgb).flatten()\n\ndef print_array_for_c(_array):\n print(\"{\",end=\"\")\n for pixel in _array:\n print(pixel,end=\",\")\n print(\"}\")\n\ndef main():\n if len(sys.argv) == 2:\n print(\"resize and convert image: \"+sys.argv[1])\n print_array_for_c(resize_image(sys.argv[1]))\n else:\n print(\"Usage: python resize_image_and_to_array.py path_to_image\")\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.asarray",
"numpy.set_printoptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
devilishBurrito/AppsFlyer-GDPR-Deletion-Script | [
"fb8c7e546ac0e878f253b0390d30f0303b577242"
] | [
"GDPR_delete (v1.0).py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport pandas as pd\nfrom tqdm import tqdm\nfrom datetime import *\nimport uuid\nimport warnings\nfrom math import isnan\nimport time\n\n\n## throttling based on AF's 80 request per 2 minute rule\ndef throttle(tm):\n i = 0\n while i < tm:\n print (\"PAUSED FOR THROTTLING!\" + \"\\n\" + str(delay-i) + \" minutes remaining\")\n time.sleep(60)\n i = i + 1\n return 0\n\n## function for reformating the dates\ndef date():\n dt = datetime.utcnow() # # <-- get time in UTC\n dt = dt.isoformat('T') + 'Z'\n tz = dt.split('.')\n tz = tz[0] + 'Z'\n return str(tz)\n\ndef requestDeletion(mplatform, mdevice_id, mtime, muuid, app, token, endpoint, mdf, position): \n identity_type = ''\n if mplatform.lower() == 'android':\n mdevice_id = mdevice_id.lower()\n identity_type = 'android_advertising_id'\n elif mplatform.lower() == 'ios':\n mdevice_id = mdevice_id.upper()\n identity_type = 'ios_advertising_id'\n else:\n return\n\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n\n params = {'api_token': token }\n \n body = {\n 'subject_request_id': muuid,\n 'subject_request_type': 'erasure',\n 'submitted_time': mtime,\n \"subject_identities\": [\n { \"identity_type\": identity_type, \n \"identity_value\": mdevice_id, \n \"identity_format\": \"raw\" }\n ], \n \"property_id\": app\n }\n\n body = json.dumps(body)\n\n\n endpoint = 'https://hq1.appsflyer.com/gdpr/opengdpr_requests'\n\n res = requests.request('POST', endpoint, headers=headers,\n data=body, params=params)\n\n if res:\n #checks if the response was within 200-400 range\n mdf['subject_request_id'][position] = muuid\n else: \n mdf['subject_request_id'][position] = ''\n mdf['error status'][position] = (str(res.status_code) + ': ' + res.text)\n\n global logs_enabled\n if logs_enabled:\n # printing in case we need to check things\n print(mplatform.upper() + ' status: ' + str(res.status_code) + '\\nresponse: ' + res.text + '\\nendpoint: ' + res.url + '\\n')\n\n## main run function. Determines whether it is iOS or Android request and sends if not LAT-user\ndef run(output, mdf, throt_rate, throt_delay):\n global is_test\n global app_id\n global api_key\n\n print ('Sending requests! Stand by...')\n platform = mdf.platform\n device = mdf.device_id\n\n if is_test == True:\n app_id = mdf.app_id\n token = api_key\n endpoint = 'https://hq1.appsflyer.com/gdpr/stub'\n else:\n app_id = mdf.app_id\n token = api_key\n endpoint = 'https://hq1.appsflyer.com/gdpr/opengdpr_requests'\n \n for position in tqdm(range(len(device))):\n\n if position % throt_rate == 0 and position != 0: \n throttle(throt_delay)\n\n # else:\n req_id = str(uuid.uuid4())\n timestamp = str(date())\n validate = str(device[position])\n if validate == '' or validate == 'nan':\n mdf['subject_request_id'][position] = ''\n mdf['error status'][position] = 'Limit Ad Tracking Users Unsupported. Device ID Required' \n else:\n requestDeletion(str(platform[position]), str(device[position]), timestamp, req_id, app_id[position], token, endpoint, mdf, position)\n \n ## write to CSV DURING the loop. Doing t the end was bad idea.\n ## Too many possibilites and lost logs / request IDs\n mdf.to_csv(output, sep=',', index = False, header=True)\n \n print ('\\nDONE. Please see ' + output \n + ' for the subject_request_id and/or error messages\\n')\n\n## just used to create the renamed file with _LOGS.csv\ndef addLogExt(nm):\n nm = stripExtension(nm) + '_LOGS.csv'\n return nm\n\n## adds relevant columns to the log file\ndef logs_csv(out, df):\n df['subject_request_id'] = ''\n df['error status'] = ''\n df['device_id'].fillna('')\n df.to_csv(out, sep=',', index=None, header=True)\n\n return df\n\n## solely for reading in the file name from the user. creates string out of filename\n## due to current limitations, this file MUST be located with the python script. \n## moving forward will add file finder so user can type file path or drag and drop file into terminal\ndef stripExtension(fn):\n fn = fn.split('.')\n fn = fn[0]\n return str(fn)\n\ndef readin_name():\n mprefix = input('FILE NAME (CSV ONLY): ')\n mprefix = stripExtension(mprefix)\n mname = str(mprefix + '.csv')\n print ('Reading in file: ' + mname)\n return mname\n\ndef validateToken(key):\n key = key.strip()\n if len(key) != 36:\n print('\\ninvalid API Key format, please try again.')\n return False\n return True\n\n\ndef start():\n print ('\\nWelcome to GDPR STREAMLINE')\n global api_key\n api_key = input('Please provide your API key: ')\n while validateToken(api_key) == False:\n api_key = input(' API key : ')\n validateToken(api_key)\n # # blue = OpenFile()\n\n global throttle_rate\n global throttle_delay\n\n testing = input('Is this a test? (y/n) : ')\n if testing == \"y\":\n global is_test \n is_test = True\n print('\\nGreat! We\\'ll use the StubAPI endpoint (testing endoint')\n debugging = input('\\nDebug mode (logging) enabled? (y/n) : ')\n throttle_delay = 1 # minutes\n if debugging == 'y':\n global logs_enabled\n logs_enabled = True\n\n # return a CSV\n name = readin_name()\n import_csv = pd.read_csv(name)\n output_name = addLogExt(name)\n\n output_file = logs_csv(output_name, import_csv)\n\n run(output_name, output_file, throttle_rate, throttle_delay)\n\n\n## to disable all warnings in console logs\n\nwarnings.filterwarnings('ignore')\nis_test = False\nlogs_enabled = False\napi_key = ''\n# OpenGDPR has a throttle mechanism in which only 80 requests can be sent every 3 minutes.\nthrottle_delay = 3 # minutes\nthrottle_rate = 80 # requests per throttle_delay\nstart()\n\n# REQUESTED ASSISTANCE:\n# https://stackoverflow.com/questions/54082240/nested-json-values-cause-typeerror-object-of-type-int64-is-not-json-serializ"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
markolalovic/metric-graph-reconstruction | [
"c250f910feb6cd87658aa210fa002de7141f5a38"
] | [
"src/metric_graph_reconstruction.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\" metric_graph_reconstruction.py: Implementation of algorithm for\nreconstructing the topology of a metric graph that represents intersecting\nor branching filamentary paths embedded in d-dimensional space.\n\nTODO: metric should be more implicit so it's easy to switch from Euclidean\nto geodesic or distance induced by Rips or alpha complex\n\nAuthor: Marko Lalovic <[email protected]>\nLicense: MIT License\n\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mpl_toolkits.mplot3d import axes3d\nfrom queue import PriorityQueue\nimport os\nimport json\n\nclass Graph:\n def __init__(self, vertices, edges):\n ''' Graph with vertices associated to points in d-dimensional space.\n Edges are represented simply as pairs of vertices.\n\n e.g:\n graph = Graph(...)\n\n Args:\n vertices: list of Point objects\n edges: list of [Point, Point] lists\n '''\n self.vertices = vertices\n self.edges = edges\n\n def __str__(self):\n vertices = ['v' + str(i+1) for i in range(self.n)]\n name_of = dict(zip(self.vertices, vertices))\n out = 'vertices: \\n'\n for vertex, vertex_point in zip(vertices, self.vertices):\n out += ' ' + vertex + ': ' + str(vertex_point) + '\\n'\n\n edges = ['e' + str(i+1) for i in range(self.m)]\n out += '\\nedges: \\n'\n for edge, edge_points in zip(edges, self.edges):\n out += ' ' + edge + ': ' \\\n + ', '.join([name_of[edge_points[0]], name_of[edge_points[1]]]) \\\n + '\\n'\n\n out += '\\ncomponents: \\n'\n for i, cmpt in self.components.items():\n out += ' c' + str(i+1) + ': '\n out += ', '.join([name_of[vertex_point] for vertex_point in cmpt]) \\\n + '\\n'\n\n return out\n\n @property\n def d(self):\n ''' Dimension of embedding space. '''\n return self.vertices[0].d\n\n @property\n def n(self):\n ''' Number of vertices. '''\n return len(self.vertices)\n\n @property\n def m(self):\n ''' Number of edges. '''\n return len(self.edges)\n\n @property\n def k(self):\n ''' Number of connected components.'''\n return len(self.components)\n\n @property\n def components(self):\n ''' Returns the connected components as a dictionary:\n {i: [Points of component i]}\n '''\n cmpts = []\n visited = []\n for v in self.vertices:\n if not v in visited:\n comp_of_v = self.component(v)\n # add vertices from component to visited\n for u in comp_of_v:\n visited.append(u)\n cmpts.append(comp_of_v)\n\n return dict(zip(range(len(cmpts)), cmpts))\n\n def neighbors(self, v):\n ''' Neighbors of vertex v. '''\n nbrs = []\n for edge in self.edges:\n u1, u2 = edge\n if u1.equals(v):\n nbrs.append(u2)\n elif u2.equals(v):\n nbrs.append(u1)\n return nbrs\n\n def component(self, v):\n ''' Connected component of v. '''\n def cmpt(v, T):\n nhbs = list(set(self.neighbors(v)) - set(T))\n if nhbs == []:\n return [v]\n else:\n T += nhbs # expand the tree\n for nhb in nhbs:\n T += cmpt(nhb, T) # expand the tree in BFS way\n return list(set(T))\n return cmpt(v, [v]) # start with T = [v]\n\n def graph_distance(self, p1, p2):\n ''' Graph distance between points p1, p2. '''\n vertices = [i for i in range(self.n)]\n lenghts = self.graph_distances(p1)\n name_of = dict(zip(self.vertices, vertices))\n return lenghts[name_of[p2]]\n\n def graph_distances(self, start):\n ''' To compute shortest distances from start to all other vertices. '''\n vertices = [i for i in range(self.n)]\n distances = {v:float('inf') for v in vertices}\n name_of = dict(zip(self.vertices, vertices))\n start = name_of[start]\n distances[start] = 0\n\n lengths = [[-1 for i in range(self.m)] for j in range(self.m)]\n for edge in self.edges:\n u, v = name_of[edge[0]], name_of[edge[1]]\n weight = 1 # TODO: set weight based on edge lenght\n lengths[u][v] = weight\n lengths[v][u] = weight\n\n queue = PriorityQueue()\n queue.put((0, start))\n\n visited = []\n while not queue.empty():\n _, current_vertex = queue.get()\n visited.append(current_vertex)\n\n for vertex in vertices:\n distance = lengths[current_vertex][vertex]\n if distance != -1:\n if vertex not in visited:\n old_length = distances[vertex]\n new_length = distances[current_vertex] + distance\n if new_length < old_length:\n queue.put((new_length, vertex))\n distances[vertex] = new_length\n return distances\n\n def show(self):\n ''' Plots the graph structure as a rectilinear drawing.\n\n TODO: if dimension is more than 3, project the graph, so that the\n associated points can be of any dimension.\n\n TODO: pass the projection mapping to plot_graph or even curves that are\n associated with the edges.\n '''\n space = Space(self.d)\n space.plot_graph(self)\n space.show()\n\nclass Point:\n ''' Supporting class for storing coordinates and labels of points.\n\n e.g:\n point = Point(...)\n\n Args:\n coords::tuple(float)\n The coordinates of a point. Should be a tuple of floats.\n label::str\n Should be: 'E' for edge point and 'V' for vertex point.\n '''\n def __init__(self, coords=(), label='P'):\n self.coords = coords\n self.label = label\n\n def __str__(self):\n return self.label + str(self.coords)\n\n @property\n def d(self):\n ''' Dimension of embedding space. '''\n return len(self.coords)\n\n def equals(self, p, eps=1e-4):\n ''' Returns true if point is close to p. '''\n if self.d != p.d:\n return False\n\n def distance(p1, p2):\n p1 = np.array(p1.coords)\n p2 = np.array(p2.coords)\n return np.linalg.norm(p1 - p2)\n\n return distance(self, p) < eps\n\nclass PointCloud:\n ''' PointCloud Class to hold a list of Point objects.\n\n e.g:\n point_cloud = PointCloud(...)\n\n TODO: should be general to work with any dimension of embedding space\n or any distance we are using for reconstructing the graph.\n\n Test and show capabilities:\n\n * on Heawood graph embedded on a torus in 3D space\n * on hypercube embedded in 4D space\n * on the earthquake data using geodesic distance\n '''\n def __init__(self, points):\n if points == [] or isinstance(points[0], Point):\n self.points = points\n else:\n raise ValueError('Points must be a list of Point objects.')\n\n @property\n def vertex_points(self):\n vertex_points = []\n for point in self.points:\n if point.label == 'V':\n vertex_points.append(point)\n return vertex_points\n\n @property\n def edge_points(self):\n edge_points = []\n for point in self.points:\n if point.label == 'E':\n edge_points.append(point)\n return edge_points\n\n def __str__(self):\n return '[' + ', '.join([str(point) for point in self.points]) + ']'\n\n def __len__(self):\n return len(self.points)\n\n def distance(self, p1, p2):\n ''' Euclidean distance between two points.\n TODO: generalize, so we can use geodesic distance or\n distance induced by Rips-Vietoris graph.\n '''\n p1 = np.array(p1.coords)\n p2 = np.array(p2.coords)\n return np.linalg.norm(p1 - p2)\n\n def set_distance(self, points1, points2):\n ''' Computes minimum distance between given sets of points points1 and points2. '''\n distances = []\n for point1 in points1:\n for point2 in points2:\n distances.append(self.distance(point1, point2))\n return np.min(np.array(distances))\n\n def set_center(self, points):\n ''' Computes the center of mass of the given set of points.'''\n points_np = np.array([point.coords for point in points])\n return Point( tuple(np.mean(points_np, axis=0)) )\n\n def get_shell_points(self, y, radius, delta):\n ''' Returns a list of points between radius and radius + delta around point y.'''\n shell_points = []\n for point in self.points:\n dst = self.distance(y, point)\n if dst >= radius and dst <= radius + delta:\n shell_points.append(point)\n return shell_points\n\n def rips_vietoris_graph(self, points, delta):\n ''' Constructs the Rips-Vietoris graph on points of parameter delta. '''\n n = len(points)\n vertices = []\n edges = []\n for i in range(n):\n p1 = points[i]\n vertices.append(p1)\n for j in range(i, n):\n p2 = points[j]\n if not p1.equals(p2) and self.distance(p1, p2) < delta:\n edges.append([p1, p2])\n return Graph(vertices, edges)\n\n def label_points(self, r, delta):\n ''' Labels the points as edge or vertex points. '''\n for y in self.points:\n shell_points = self.get_shell_points(y, r, delta)\n rips_embedded = self.rips_vietoris_graph(shell_points, delta)\n if rips_embedded.k == 2:\n y.label = 'E'\n else:\n y.label = 'V'\n\n def get_ball_points(self, center, radius):\n ball_points = []\n for point in self.points:\n dist = self.distance(center, point)\n if dist < radius:\n ball_points.append(point)\n return ball_points\n\n def expand_vertices(self, p11):\n ''' Re-labels all the points withing distance p11 from\n preliminary vertex points as vertices. '''\n for vertex_point in self.vertex_points:\n ball_points = self.get_ball_points(vertex_point, p11)\n for ball_point in ball_points:\n ball_point.label = 'V'\n\n def reconstruct(self, delta):\n ''' Reconstructs the graph structure. '''\n # compute the connected components of Rips-Vietoris graphs:\n # Rips_delta(vertex_points), Rips_delta(edge_points)\n rips_V = self.rips_vietoris_graph(self.vertex_points, delta)\n rips_E = self.rips_vietoris_graph(self.edge_points, delta)\n cmpts_V = rips_V.components\n cmpts_E = rips_E.components\n\n # connected components of Rips_delta(vertex_points) are vertices of\n # reconstructed embedded graph hatG\n # represented here by centers of mass of point clouds\n vertices = []\n for cmpt_V in cmpts_V.values():\n vertices.append(self.set_center(cmpt_V))\n\n # there is an edge between vertices of hatG if their corresponding\n # connected components in Rips_delta(vertex_points) contain points\n # at distance less than delta from the same component of\n # Rips_delta(edge_points)\n n = len(vertices)\n edges = []\n for i in range(n):\n # we cannot detect loops by setting range(i, n)\n # then each vertex would have a loop\n for j in range(i+1, n):\n for cmpt_E in cmpts_E.values():\n if self.set_distance(cmpts_V[i], cmpt_E) < delta and \\\n self.set_distance(cmpts_V[j], cmpt_E) < delta:\n edges.append([vertices[i], vertices[j]])\n\n return Graph(vertices, edges)\n\nclass Space:\n ''' Space on which we plot the graphics.\n\n e.g:\n space = Space()\n\n '''\n def __init__(self, dimension,\n figsize=8, remove_ticks=True, label_axes=False):\n if dimension in [2, 3]:\n self.dim = dimension\n else:\n raise ValueError(\n \"Space on which we plot the graphics can be 2 or 3 dimensional.\")\n\n plt.rcParams['figure.figsize'] = [figsize, figsize]\n plt.rcParams['axes.facecolor'] = 'white'\n plt.rcParams['savefig.facecolor'] = 'white'\n\n self.fig = plt.figure()\n self.font_size = 28\n\n if self.dim == 2:\n self.ax = self.fig.add_subplot(111, aspect='equal')\n\n if label_axes:\n self.ax.set_xlabel('x')\n self.ax.set_ylabel('y')\n\n if remove_ticks:\n self.ax.set_xticks([])\n self.ax.set_yticks([])\n\n # set axis colors\n self.ax.spines['top'].set_color('grey')\n self.ax.spines['right'].set_color('grey')\n else:\n self.ax = plt.axes(projection='3d')\n\n # set aspect ratio\n self.ax.set_box_aspect(aspect = (1,1,1))\n\n # set viewing angle\n self.ax.azim = 30\n self.ax.dist = 10\n self.ax.elev = 30\n\n # remove fill\n self.ax.xaxis.pane.fill = False\n self.ax.yaxis.pane.fill = False\n self.ax.zaxis.pane.fill = False\n\n if remove_ticks:\n self.ax.set_xticks([])\n self.ax.set_yticks([])\n self.ax.set_zticks([])\n\n # if remove_axes: self.ax.set_axis_off()\n\n # set z-axis on the left\n tmp_planes = self.ax.zaxis._PLANES\n self.ax.zaxis._PLANES = (tmp_planes[2], tmp_planes[3],\n tmp_planes[0], tmp_planes[1],\n tmp_planes[4], tmp_planes[5])\n # set axes colors\n self.ax.xaxis.pane.set_edgecolor('gray')\n self.ax.yaxis.pane.set_edgecolor('gray')\n self.ax.zaxis.pane.set_edgecolor('gray')\n\n if label_axes:\n self.ax.set_xlabel('x')\n self.ax.set_ylabel('y')\n self.ax.set_zlabel('z')\n\n def show(self, figure_path=''):\n \"\"\" Show the space, displaying any graphics on it.\"\"\"\n if self.dim == 3:\n # fix the aspect ratio for 3d plot; source:\n #https://stackoverflow.com/questions/8130823/set-matplotlib-3d-plot-aspect-ratio\n extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n sz = extents[:,1] - extents[:,0]\n centers = np.mean(extents, axis=1)\n maxsize = max(abs(sz))\n r = maxsize/2\n for ctr, dim in zip(centers, 'xyz'):\n getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)\n\n if figure_path != '':\n self.fig.savefig(figure_path, dpi=300)\n os.system('convert ' + figure_path + ' -trim ' + figure_path)\n\n plt.show()\n plt.clf()\n plt.cla()\n plt.close()\n\n def color(self, point, default='green'):\n ''' Returns the color of a point based on its label. '''\n if point.label == 'V':\n return 'red'\n elif point.label == 'E':\n return 'blue'\n else:\n return default\n\n def plot_point(self, point, color='black', **kwargs):\n ''' Plots a point. '''\n if self.dim == 2:\n x, y = point.coords\n self.ax.scatter(x, y, color=color, s=50, **kwargs)\n else:\n x, y, z = point.coords\n self.ax.scatter3D(x, y, z, color=color, **kwargs)\n\n def plot_points(self, points, **kwargs):\n coords = np.array([point.coords for point in points])\n colors = list(map(self.color, points))\n if self.dim == 2:\n self.ax.scatter(coords[:, 0], coords[:, 1], color=colors, **kwargs)\n else:\n self.ax.scatter3D(coords[:, 0], coords[:, 1], coords[:, 2],\n color=colors, depthshade=True, **kwargs)\n\n def plot_shell(self, center, radius, delta, color='black', **kwargs):\n ''' Plots B(center, radius) and B(center, radius + delta). '''\n if self.dim == 2:\n self.plot_ball(center, radius + delta, color='grey', alpha=0.1)\n self.plot_ball(center, radius + delta, fill=False, alpha=1)\n self.plot_ball(center, radius, color='white', alpha=1)\n else:\n self.plot_ball(center, radius + delta)\n self.plot_ball(center, radius)\n\n def plot_ball(self, center, radius, color='black', **kwargs):\n \"\"\" Plots a ball B(center, radius). \"\"\"\n if self.dim == 2:\n circle = patches.Circle(center.coords,\n radius,\n facecolor=color,\n edgecolor='k',\n linestyle='--',\n linewidth='2.2',\n zorder=0,\n **kwargs)\n self.ax.add_patch(circle)\n else:\n # TODO: simplify for faster drawing\n x, y, z = center.coords\n color='grey'\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, 2* np.pi, 100)\n x += radius * np.outer(np.cos(u), np.sin(v))\n y += radius * np.outer(np.sin(u), np.sin(v))\n z += radius * np.outer(np.ones(np.size(u)), np.cos(v))\n self.ax.scatter(x, y, z, c=color, marker='o', alpha=0.01*radius)\n\n def plot_edge(self, p1, p2, color='blue', **kwargs):\n ''' Plots line segment between points p1 and p2. '''\n if self.dim == 2:\n x1, y1 = p1.coords\n x2, y2 = p2.coords\n self.ax.plot([x1, x2], [y1, y2],\n color=color, lw=3.3, **kwargs)\n else:\n x1, y1, z1 = p1.coords\n x2, y2, z2 = p2.coords\n self.ax.plot([x1, x2], [y1, y2], [z1, z2],\n color=color, lw=3.3, **kwargs)\n\n def plot_graph(self, graph, color='purple', **kwargs):\n ''' Draw the graph as a rectilinear drawing in embedding space. '''\n self.plot_points(graph.vertices)\n for edge in graph.edges:\n self.plot_edge(edge[0], edge[1], color=color)\n\n\nclass MetricGraph:\n '''\n Usage:\n * to draw a metric graph by gluing together a bunch of curves\n * and compute its geometric @properties:\n * lengths of edges -> shortest edge\n e.g.:\n loop1: 18.56\n loop2: 18.56\n -------------\n shortest edge: 18.56\n\n * smallest curvatures of edges -> local reach\n e.g.:\n loop1: 2.1\n loop2: 2.1\n -----------\n local reach: 2.1\n\n * angles around vertices -> smallest angle between curves\n e.g.:\n e: 106.98, 73.02\n -----------\n alpha: 73.02\n\n * TODO: global reach\n\n * TODO: add getting a dense sample of points from G;\n * TODO: test the reconstruction on it.\n\n Args: \n \n To provide a description of metric graph, we use a dictionary, e.g.:\n desc = {\n 'name': 'butterfly',\n 'points': {\n 'a': [0, 0],\n 'b': [20, 0],\n 'c': [20, 15],\n 'd': [0, 15],\n 'e': [10, 7.5]},\n 'vertices': ['e'],\n 'edges': {\n 'loop1': ['e', 'a', 'd', 'e'],\n 'loop2': ['e', 'b', 'c', 'e']}\n }\n \n Or a path to a JSON file with this content, e.g.:\n file = '../data/metric-graphs/butterfly.json'\n \n Points and names should be unique, no duplicates.\n \n '''\n def __init__(self, desc={}, file=''):\n if not desc and not file:\n raise ValueError('Provide a description or a file.')\n if file != '':\n desc = self.load(file)\n\n self.name = desc['name']\n self.points = [tuple(point) for point in desc['points'].values()]\n names = [name for name in desc['points'].keys()]\n d_points = dict(zip(names, self.points))\n self.name_of = dict(zip(self.points, names))\n self.edge_names = list(desc['edges'].keys())\n vertices = [d_points[vertex] for vertex in desc['vertices']]\n self.vertices = vertices\n edges = []\n for edge in desc['edges'].values():\n edges.append([ d_points[edge[i]] for i in range(len(edge)) ])\n self.edges = edges\n\n def __str__(self):\n out = 'name: ' + self.name + '\\n'\n out += '\\npoints: \\n'\n for point in self.points:\n out += ' ' + self.name_of[point] + ': ' + str(point) + '\\n'\n out += '\\nvertices: ' + ', '.join([\n self.name_of[vertex] for vertex in self.vertices]) + '\\n'\n out += '\\nedges: \\n'\n for edge_name, edge in zip(self.edge_names, self.edges):\n control_points = ', '.join([\n self.name_of[control_point] for control_point in edge])\n out += ' ' + edge_name + ': ' + control_points + '\\n'\n \n # TODO: print the geometric properties too\n return out\n\n @property\n def edge_lenghts(self):\n ''' Lenghts of edges of metric graph as a dictionary:\n {edge_name: lenght}\n '''\n lenghts = {}\n for edge_name, edge in zip(self.edge_names, self.edges):\n lenghts[edge_name] = self.edge_lenght(edge)\n return lenghts\n\n @property\n def shortest_edge(self):\n ''' Returns the shortest edge in the metric graph as:\n (edge_name, lenght) '''\n lenghts = self.edge_lenghts\n return min(lenghts.items(), key=lambda x: x[1])\n\n @property\n def angles(self, in_degrees=True):\n ''' Returns angles around each vertex enclosed by edges (curves)\n that meet at that vertex. '''\n angles = {}\n for vertex in self.vertices:\n angles[self.name_of[vertex]] = self.get_angles(vertex, in_degrees)\n return angles\n\n @property\n def smallest_angle(self, in_degrees=True):\n ''' Returns smallest angle between edges in the metric graph. '''\n min_angles = {}\n for vertex, angles in self.angles.items():\n min_angles[vertex] = np.min(angles)\n return min(min_angles.items(), key=lambda x: x[1])\n\n @property\n def edge_radii(self, nn=500):\n ''' Returns \"1/curvature\" of the edge = the minimum radius r \n of a circle touching the edge for each edge. '''\n radii = {}\n for edge_name, edge in zip(self.edge_names, self.edges):\n radii[edge_name] = self.edge_radius(edge, nn=500)\n return radii\n\n @property\n def local_reach(self):\n ''' Returns the local reach of the metric graph. This is the minimum\n edge_radius over all edges. Where edge radius is 1/curvature of the edge,\n in other words, the minimum radius r of a circle touching the edge. '''\n radii = self.edge_radii\n return min(radii.items(), key=lambda x: x[1])\n\n def edge_radius(self, edge, nn=500):\n ''' Returns the \"1/curvature\" of the edge = the minimum radius r of a\n circle touching the edge. '''\n ts = np.linspace(0, 1, nn)\n rs = []\n for t in ts:\n rs.append(self.r_bezier(t, edge))\n return np.min(rs)\n\n def get_angles(self, vertex, in_degrees=True):\n ''' Returns angles between edges (curves) that meet at the vertex in the metric graph. '''\n ds = []\n for t in [0, 1]:\n for i, edge in enumerate(self.edges):\n d = self.d_bezier(t, edge)\n d /= np.linalg.norm(d)\n ds.append(list(d))\n ds.append(list(-d))\n ds = np.unique(ds, axis=0)\n\n if in_degrees:\n conversion = (180/np.pi)\n else:\n conversion = 1\n\n angles = []\n for i in range(ds.shape[0]):\n for j in range(i+1, ds.shape[0]):\n angles.append( np.arccos(np.dot(ds[i], ds[j]))*conversion )\n return list(set(angles))\n\n def edge_lenght(self, edge, nn=500):\n ''' Computes approximate lenght of an edge. '''\n points = self.bezier_points(edge, nn)\n d = 0\n for i in range(nn - 1):\n d += self.distance(points[i], points[i + 1])\n return d\n\n def partial_edge_lenghts(self, edge, nn=500):\n ''' Returns approximate lenghts of an edge for a range of\n values of parameter t. '''\n points = self.bezier_points(edge, nn)\n d = 0\n ds = [d]\n for i in range(nn - 1):\n d += self.distance(points[i], points[i + 1])\n ds.append(d)\n return ds\n\n def n_edge_points(self, density=1):\n ''' Normalizes the lenghts and transforms the normalized\n lenghts to numbers of points on the curves. '''\n lenghts = [self.edge_lenght(edge) for edge in self.edges]\n\n # normalize the lenghts\n lenghts = np.array(lenghts)\n lenghts /= np.sum(lenghts)\n\n # transform them to number of edge points\n lenghts *= (density*100)\n\n return [int(np.round(lenght, 2)) for lenght in lenghts]\n\n def p_bezier(self, t, edge):\n ''' Returns a point on Bezier curve.\n t: parameter between 0 and 1\n '''\n b = np.array(edge)\n\n if b.shape[0] == 5:\n return list(t**4 * b[0] + 4*t**3 * (1 - t) * b[1] \\\n + 6*t**2 * (1 - t)**2 * b[2] \\\n + 4*t * (1 - t)**3 * b[3] + (1 - t)**4 * b[4])\n elif b.shape[0] == 4:\n return list((1 - t)**3 * b[0] + 3 * (1 - t)**2 * t * b[1] \\\n + 3 * (1 - t) * t**2 * b[2] \\\n + t**3 * b[3])\n elif b.shape[0] == 2:\n return list( (1 - t) * b[0] + t * b[1] )\n else:\n raise NotImplementedError\n\n def d_bezier(self, t, edge):\n ''' Returns derivatives [x', y'] on t at a point on Bezier curve.\n t: parameter between 0 and 1\n '''\n b = np.array(edge)\n\n if b.shape[0] == 5:\n return list( 4*b[0]*t**3 \\\n - 4*b[1]*t**3 \\\n + 12*b[1]*t**2*(1 - t) \\\n + 6*b[2]*t**2*(2*t - 2) \\\n + 12*b[2]*t*(1 - t)**2 \\\n - 12*b[3]*t*(1 - t)**2 \\\n + 4*b[3]*(1 - t)**3 \\\n - 4*b[4]*(1 - t)**3)\n elif b.shape[0] == 4:\n return list(- 3*b[0]*(1 - t)**2 \\\n + 3*b[1]*t*(2*t - 2) \\\n + 3*b[1]*(1 - t)**2 \\\n - 3*b[2]*t**2 \\\n + 2*b[2]*t*(3 - 3*t) \\\n + 3*b[3]*t**2)\n elif b.shape[0] == 2:\n return list( b[0] - b[1] )\n else:\n raise NotImplementedError\n\n def dd_bezier(self, t, edge):\n ''' Returns second order derivatives [x'', y''] at a point on Bezier curve.\n t: parameter between 0 and 1\n '''\n b = np.array(edge)\n\n if b.shape[0] == 5:\n return list( 12*b[0]*t**2 - 24*b[1]*t**2 + 24*b[1]*t*(1 - t) \\\n + 12*b[2]*t**2 + 24*b[2]*t*(2*t - 2) + 12*b[2]*(1 - t)**2 \\\n - 12*b[3]*t*(2*t - 2) - 24*b[3]*(1 - t)**2 + 12*b[4]*(1 - t)**2)\n elif b.shape[0] == 4:\n return list(- 3*b[0]*(2*t - 2) + 6*b[1]*t + 6*b[1]*(2*t - 2) \\\n - 12*b[2]*t + 2*b[2]*(3 - 3*t) + 6*b[3]*t)\n elif b.shape[0] == 2:\n return [0]\n else:\n raise NotImplementedError\n\n def r_bezier(self, t, edge, eps=1e-4):\n ''' Returns the radius of curvature r(t) at a point on Bezier curve\n kappa = (x'y'' - x''y') / (x'^2 + y'^2)^(3/2)\n r(t) = 1/kappa\n TODO: add curvature for a curve in 3d.\n '''\n d = self.d_bezier(t, edge)\n dd = self.dd_bezier(t, edge)\n kappa_nom = d[0] * dd[1] - dd[0] * d[1]\n kappa_den = (d[0]**2 + d[1]**2)**(3/2)\n if kappa_den < eps:\n return 0\n else:\n return np.abs(kappa_den/kappa_nom)\n\n def n_bezier(self, t, edge):\n ''' Returns the normal vector at a point on Bezier curve.\n TODO: add for a curve in 3d.\n '''\n d = self.d_bezier(t, edge)\n d /= np.linalg.norm(d)\n return [-d[1], d[0]]\n\n def edge_sigma_tube(self, edge, n, sigma):\n ''' Returns the points on sigma tube around the edge\n of the metric graph.'''\n b_points = self.regular_bezier_points(edge, n)\n n_points = self.normal_bezier_points(edge, n)\n\n b_points = np.array(b_points)\n n_points = np.array(n_points)\n n_points *= sigma\n\n s_tube1 = b_points + n_points\n s_tube2 = b_points - n_points\n\n s_tube = list(s_tube1) + list(s_tube2)\n s_tube = [list(point) for point in s_tube]\n return s_tube\n\n def bezier_points(self, edge, n):\n points = []\n ts = np.linspace(0, 1, n)\n for t in ts:\n points.append(self.p_bezier(t, edge))\n return points\n\n def regular_bezier_points(self, edge, n):\n points = []\n ts = self.regular_ts(edge, n)\n for t in ts:\n points.append(self.p_bezier(t, edge))\n return points\n\n def normal_bezier_points(self, edge, n):\n points = []\n ts = self.regular_ts(edge, n)\n for t in ts:\n points.append(self.n_bezier(t, edge))\n return points\n\n def regular_ts(self, edge, n=50, nn=500):\n ''' Returns ts that cut up the curve at regular intervals. '''\n ts = np.linspace(0, 1, nn)\n ds = self.partial_edge_lenghts(edge, nn)\n ds = np.array(ds)\n ds /= ds[-1]\n\n targets = list(np.linspace(0, 1, n))[1:-1]\n foundts = [0]\n for target in targets:\n for i in range(len(ds) - 1):\n if target > ds[i] and target < ds[i + 1]:\n foundts.append( (ts[i] + ts[i+1])/2 )\n foundts += [1]\n return foundts\n\n def save(self, file_name):\n with open(file_name, 'w') as file:\n json.dump(self.d, file, indent=4)\n\n def load(self, file_name):\n with open(file_name) as file:\n d = json.load(file)\n return d\n\n def distance(self, p1, p2):\n ''' Euclidean distance between two points. '''\n p1 = np.array(p1)\n p2 = np.array(p2)\n return np.linalg.norm(p1 - p2)\n\n def get_points(self, density=1):\n ''' Returns regularly spaced points from metric graph. '''\n points = []\n for vertex in self.vertices:\n points.append(vertex)\n\n ns = self.n_edge_points(density)\n for edge, n in zip(self.edges, ns):\n points += self.regular_bezier_points(edge, n)\n return points\n\n def get_st_points(self, density=1, sigma=0.5):\n ''' Returns regularly spaced points on the boundary\n of sigma tube around the metric graph. '''\n points_st = [] # points on the sigma tube\n ns = self.n_edge_points(density)\n for edge, n in zip(self.edges, ns):\n points_st += self.edge_sigma_tube(edge, n, sigma)\n return points_st\n\n def show(self, density=4, sigma_tube=True, marker='.'):\n ''' Plots the metric graph drawing. '''\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n\n points = self.get_points(density)\n points = np.array(points)\n ax.scatter(points[:, 0], points[:, 1],\n color='black', s=10, marker=marker)\n if sigma_tube:\n points_st = self.get_st_points(density)\n points_st = np.array(points_st)\n ax.scatter(points_st[:, 0], points_st[:, 1],\n color='red', s=10, marker=marker)\n plt.show()\n\n# some helper functions\ndef geogebra_point_cloud(point_cloud):\n ''' Some help for exporting point clouds to GeoGebra, e.g.\n >>> geogebra_point_cloud(PointCloud([Point(1, 2), Point(3, 4)]))\n Execute[{\"P0 = (1, 2)\", \"P1 = (3, 4)\"}]\n '''\n out = 'Execute[{'\n labels = ['P' + str(i) for i in range(len(point_cloud))]\n for label, point in zip(labels, point_cloud.points):\n x, y = point.coords\n out += '\"' + label + ' = (' + str(x) + ', ' + str(y) + ')\", '\n\n out = out[:-2]\n out += '}]'\n print(out)\n\ndef geogebra_bezier_curve(curve_name, control_points):\n ''' Some help for drawing Bezier curves in GeoGebra, e.g.\n geogebra_bezier_curve('quartic', ['A', 'B', 'C', 'D'])\n '''\n b = control_points\n if len(b) == 4: # quartic\n out = curve_name + ' = Curve['\n out += '(1 - t)^3 x(' + b[0] + ') + 3 (1 - t)^2 t x(' + b[1] \\\n + ') + 3 (1 - t) t^2 x(' + b[2] + ') + t^3 x(' + b[3] + '),'\n out += '(1 - t)^3 y(' + b[0] + ') + 3 (1 - t)^2 t y(' + b[1] \\\n + ') + 3 (1 - t) t^2 y(' + b[2] + ') + t^3 y(' + b[3] + '),'\n out += ' t, 0, 1]'\n print(out)\n else: # quintic\n out = curve_name + ' = Curve['\n out += 't^4 x(' + b[0] + ') + 4t^3 (1 - t) x(' + b[1] \\\n + ') + 6t^2 (1 - t)^2 x(' + b[2] + ') + 4t (1 - t)^3 x(' \\\n + b[3] + ') + (1 - t)^4 x(' + b[4] + '),'\n out += 't^4 y(' + b[0] + ') + 4t^3 (1 - t) y(' + b[1] \\\n + ') + 6t^2 (1 - t)^2 y(' + b[2] + ') + 4t (1 - t)^3 y(' \\\n + b[3] + ') + (1 - t)^4 y(' + b[4] + '),'\n out += ' t, 0, 1]'\n\ndef save_sample(sample_points, file_name):\n np.savetxt('../data/samples/' + file_name + '.out',\n np.array(sample_points),\n delimiter=',')\n\ndef load_sample(file_name):\n sample_points_np = np.loadtxt('../data/samples/' + file_name + '.out',\n delimiter=',')\n return [tuple(pt) for pt in sample_points_np]\n"
] | [
[
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.axes",
"numpy.round",
"numpy.mean",
"numpy.unique",
"numpy.sin",
"numpy.size",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.patches.Circle",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.cla",
"numpy.linalg.norm",
"numpy.cos",
"matplotlib.pyplot.clf",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mircean/torchvision | [
"148bac23afa21ae4df67aeb07a6f0c3bd3b15276"
] | [
"test/test_models.py"
] | [
"from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state\nfrom collections import OrderedDict\nfrom itertools import product\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torchvision import models\nimport unittest\nimport traceback\nimport random\n\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef get_available_classification_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_segmentation_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_detection_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_video_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\n# models that are in torch hub, as well as r3d_18. we tried testing all models\n# but the test was too slow. not included are detection models, because\n# they are not yet supported in JIT.\n# If 'unwrapper' is provided it will be called with the script model outputs\n# before they are compared to the eager model outputs. This is useful if the\n# model outputs are different between TorchScript / Eager mode\nscript_test_models = {\n 'deeplabv3_resnet50': {},\n 'deeplabv3_resnet101': {},\n 'mobilenet_v2': {},\n 'resnext50_32x4d': {},\n 'fcn_resnet50': {},\n 'fcn_resnet101': {},\n 'googlenet': {\n 'unwrapper': lambda x: x.logits\n },\n 'densenet121': {},\n 'resnet18': {},\n 'alexnet': {},\n 'shufflenet_v2_x1_0': {},\n 'squeezenet1_0': {},\n 'vgg11': {},\n 'inception_v3': {\n 'unwrapper': lambda x: x.logits\n },\n 'r3d_18': {},\n \"fasterrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n \"maskrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n \"keypointrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n}\n\n\nclass ModelTester(TestCase):\n def checkModule(self, model, name, args):\n if name not in script_test_models:\n return\n unwrapper = script_test_models[name].get('unwrapper', None)\n return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)\n\n def _test_classification_model(self, name, input_shape):\n set_rng_seed(0)\n # passing num_class equal to a number other than 1000 helps in making the test\n # more enforcing in nature\n model = models.__dict__[name](num_classes=50)\n model.eval()\n x = torch.rand(input_shape)\n out = model(x)\n self.assertExpected(out, prec=0.1)\n self.assertEqual(out.shape[-1], 50)\n self.checkModule(model, name, (x,))\n\n def _test_segmentation_model(self, name):\n # passing num_class equal to a number other than 1000 helps in making the test\n # more enforcing in nature\n model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)\n model.eval()\n input_shape = (1, 3, 300, 300)\n x = torch.rand(input_shape)\n out = model(x)\n self.assertEqual(tuple(out[\"out\"].shape), (1, 50, 300, 300))\n self.checkModule(model, name, (x,))\n\n def _test_detection_model(self, name):\n set_rng_seed(0)\n model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape)\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n\n def subsample_tensor(tensor):\n num_elems = tensor.numel()\n num_samples = 20\n if num_elems <= num_samples:\n return tensor\n\n flat_tensor = tensor.flatten()\n ith_index = num_elems // num_samples\n return flat_tensor[ith_index - 1::ith_index]\n\n def compute_mean_std(tensor):\n # can't compute mean of integral tensor\n tensor = tensor.to(torch.double)\n mean = torch.mean(tensor)\n std = torch.std(tensor)\n return {\"mean\": mean, \"std\": std}\n\n # maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now\n # compare results with mean and std\n if name == \"maskrcnn_resnet50_fpn\":\n test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)\n # mean values are small, use large prec\n self.assertExpected(test_value, prec=.01)\n else:\n self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor), prec=0.01)\n\n scripted_model = torch.jit.script(model)\n scripted_model.eval()\n scripted_out = scripted_model(model_input)[1]\n self.assertEqual(scripted_out[0][\"boxes\"], out[0][\"boxes\"])\n self.assertEqual(scripted_out[0][\"scores\"], out[0][\"scores\"])\n # labels currently float in script: need to investigate (though same result)\n self.assertEqual(scripted_out[0][\"labels\"].to(dtype=torch.long), out[0][\"labels\"])\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n # don't check script because we are compiling it here:\n # TODO: refactor tests\n # self.check_script(model, name)\n self.checkModule(model, name, ([x],))\n\n def _test_detection_model_validation(self, name):\n set_rng_seed(0)\n model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)\n input_shape = (3, 300, 300)\n x = [torch.rand(input_shape)]\n\n # validate that targets are present in training\n self.assertRaises(ValueError, model, x)\n\n # validate type\n targets = [{'boxes': 0.}]\n self.assertRaises(ValueError, model, x, targets=targets)\n\n # validate boxes shape\n for boxes in (torch.rand((4,)), torch.rand((1, 5))):\n targets = [{'boxes': boxes}]\n self.assertRaises(ValueError, model, x, targets=targets)\n\n # validate that no degenerate boxes are present\n boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])\n targets = [{'boxes': boxes}]\n self.assertRaises(ValueError, model, x, targets=targets)\n\n def _test_video_model(self, name):\n # the default input shape is\n # bs * num_channels * clip_len * h *w\n input_shape = (1, 3, 4, 112, 112)\n # test both basicblock and Bottleneck\n model = models.video.__dict__[name](num_classes=50)\n model.eval()\n x = torch.rand(input_shape)\n out = model(x)\n self.checkModule(model, name, (x,))\n self.assertEqual(out.shape[-1], 50)\n\n def _make_sliced_model(self, model, stop_layer):\n layers = OrderedDict()\n for name, layer in model.named_children():\n layers[name] = layer\n if name == stop_layer:\n break\n new_model = torch.nn.Sequential(layers)\n return new_model\n\n def test_memory_efficient_densenet(self):\n input_shape = (1, 3, 300, 300)\n x = torch.rand(input_shape)\n\n for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:\n model1 = models.__dict__[name](num_classes=50, memory_efficient=True)\n params = model1.state_dict()\n num_params = sum([x.numel() for x in model1.parameters()])\n model1.eval()\n out1 = model1(x)\n out1.sum().backward()\n num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])\n\n model2 = models.__dict__[name](num_classes=50, memory_efficient=False)\n model2.load_state_dict(params)\n model2.eval()\n out2 = model2(x)\n\n max_diff = (out1 - out2).abs().max()\n\n self.assertTrue(num_params == num_grad)\n self.assertTrue(max_diff < 1e-5)\n\n def test_resnet_dilation(self):\n # TODO improve tests to also check that each layer has the right dimensionality\n for i in product([False, True], [False, True], [False, True]):\n model = models.__dict__[\"resnet50\"](replace_stride_with_dilation=i)\n model = self._make_sliced_model(model, stop_layer=\"layer4\")\n model.eval()\n x = torch.rand(1, 3, 224, 224)\n out = model(x)\n f = 2 ** sum(i)\n self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))\n\n def test_mobilenetv2_residual_setting(self):\n model = models.__dict__[\"mobilenet_v2\"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])\n model.eval()\n x = torch.rand(1, 3, 224, 224)\n out = model(x)\n self.assertEqual(out.shape[-1], 1000)\n\n def test_mobilenetv2_norm_layer(self):\n model = models.__dict__[\"mobilenet_v2\"]()\n self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))\n\n def get_gn(num_channels):\n return nn.GroupNorm(32, num_channels)\n\n model = models.__dict__[\"mobilenet_v2\"](norm_layer=get_gn)\n self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))\n self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules()))\n\n def test_fasterrcnn_double(self):\n model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)\n model.double()\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape, dtype=torch.float64)\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n\n def test_googlenet_eval(self):\n m = torch.jit.script(models.googlenet(pretrained=True).eval())\n self.checkModule(m, \"googlenet\", torch.rand(1, 3, 224, 224))\n\n @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')\n def test_fasterrcnn_switch_devices(self):\n model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)\n model.cuda()\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape, device='cuda')\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n # now switch to cpu and make sure it works\n model.cpu()\n x = x.cpu()\n out_cpu = model([x])\n self.assertTrue(\"boxes\" in out_cpu[0])\n self.assertTrue(\"scores\" in out_cpu[0])\n self.assertTrue(\"labels\" in out_cpu[0])\n\n def test_generalizedrcnn_transform_repr(self):\n\n min_size, max_size = 224, 299\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n\n t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size,\n max_size=max_size,\n image_mean=image_mean,\n image_std=image_std)\n\n # Check integrity of object __repr__ attribute\n expected_string = 'GeneralizedRCNNTransform('\n _indent = '\\n '\n expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std)\n expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size)\n expected_string += \"mode='bilinear')\\n)\"\n self.assertEqual(t.__repr__(), expected_string)\n\n\nfor model_name in get_available_classification_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n input_shape = (1, 3, 224, 224)\n if model_name in ['inception_v3']:\n input_shape = (1, 3, 299, 299)\n self._test_classification_model(model_name, input_shape)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n\nfor model_name in get_available_segmentation_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n self._test_segmentation_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n\nfor model_name in get_available_detection_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n self._test_detection_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n def do_validation_test(self, model_name=model_name):\n self._test_detection_model_validation(model_name)\n\n setattr(ModelTester, \"test_\" + model_name + \"_validation\", do_validation_test)\n\n\nfor model_name in get_available_video_models():\n\n def do_test(self, model_name=model_name):\n self._test_video_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"torch.jit.script",
"torch.nn.Sequential",
"torch.mean",
"numpy.random.seed",
"torch.manual_seed",
"torch.tensor",
"torch.std",
"torch.rand",
"torch.cuda.is_available",
"torch.nn.GroupNorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gagolews/clustering_results_v1 | [
"f3007018a195124433a4bbb5b15259cf8e838334"
] | [
"do_benchmark_sklearn.py"
] | [
"\"\"\"\nCopyright (C) 2020, Marek Gagolewski, https://www.gagolewski.com\n\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\n\n\nimport sklearn.cluster\nimport sklearn.mixture\nimport numpy as np\nimport warnings\n\ndef do_benchmark_birch(X, Ks):\n max_K = max(max(Ks), 16) # just in case we'll need more in the future\n Ks = list(range(2, max_K+1))\n res = dict()\n for K in Ks: res[K] = dict()\n\n print(\" >:\", end=\"\", flush=True)\n for branching_factor in [10, 50, 100]:\n for threshold in [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0]:\n for K in Ks:\n method = \"sklearn_birch_T%g_BF%d\"%(threshold, branching_factor)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # If threshold is too large, the number of subclusters\n # found might be less than the requested one.\n c = sklearn.cluster.Birch(n_clusters=K,\n threshold=threshold,\n branching_factor=branching_factor\n )\n labels_pred = c.fit_predict(X)+1 # 0-based -> 1-based\n #print(np.bincount(labels_pred))\n #print(len(labels_pred))\n if labels_pred.max() == K:\n res[K][method] = labels_pred\n print(\".\", end=\"\", flush=True)\n print(\":\", end=\"\", flush=True)\n print(\"<\", end=\"\", flush=True)\n return res\n\n\ndef do_benchmark_kmeans(X, Ks):\n max_K = max(max(Ks), 16) # just in case we'll need more in the future\n Ks = list(range(2, max_K+1))\n res = dict()\n for K in Ks: res[K] = dict()\n\n print(\" >:\", end=\"\", flush=True)\n for K in Ks:\n method = \"sklearn_kmeans\"\n c = sklearn.cluster.KMeans(n_clusters=K,\n # defaults: n_init=10, max_iter=300, tol=1e-4, init=\"k-means++\"\n random_state=123\n )\n labels_pred = c.fit_predict(X)+1 # 0-based -> 1-based\n res[K][method] = labels_pred\n print(\".\", end=\"\", flush=True)\n print(\"<\", end=\"\", flush=True)\n return res\n\n\ndef do_benchmark_spectral(X, Ks):\n # this is slow -- use only Ks provided\n #max_K = max(max(Ks), 16) # just in case we'll need more in the future\n #Ks = list(range(2, max_K+1))\n\n res = dict()\n for K in Ks: res[K] = dict()\n\n print(\" >:\", end=\"\", flush=True)\n for K in Ks:\n for affinity in [\"rbf\", \"laplacian\", \"poly\", \"sigmoid\"]:\n for gamma in [0.25, 0.5, 1.0, 2.5, 5.0]:\n method = \"sklearn_spectral_A%s_G%g\"%(affinity, gamma)\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n c = sklearn.cluster.SpectralClustering(n_clusters=K,\n affinity=affinity, gamma=gamma,\n random_state=123\n )\n labels_pred = c.fit_predict(X)+1 # 0-based -> 1-based\n #print(np.bincount(labels_pred))\n #print(len(labels_pred))\n assert min(labels_pred) == 1\n assert max(labels_pred) == K\n assert labels_pred.shape[0] == X.shape[0]\n assert len(np.unique(labels_pred)) == K\n res[K][method] = labels_pred\n print(\".\", end=\"\", flush=True)\n except:\n print(\"x\", end=\"\", flush=True)\n print(\":\", end=\"\", flush=True)\n print(\"<\", end=\"\", flush=True)\n return res\n\n\ndef do_benchmark_gm(X, Ks):\n max_K = max(max(Ks), 16) # just in case we'll need more in the future\n Ks = list(range(2, max_K+1))\n res = dict()\n for K in Ks: res[K] = dict()\n\n print(\" >:\", end=\"\", flush=True)\n for K in Ks:\n method = \"sklearn_gm\"\n c = sklearn.mixture.GaussianMixture(n_components=K,\n n_init=100,\n # defaults: tol=1e-3, covariance_type=\"full\", max_iter=100, reg_covar=1e-6\n random_state=123\n )\n labels_pred = c.fit_predict(X)+1 # 0-based -> 1-based\n if len(np.unique(labels_pred)) != K: # some clusters might be empty\n continue # skip\n res[K][method] = labels_pred\n print(\".\", end=\"\", flush=True)\n print(\"<\", end=\"\", flush=True)\n return res\n\n\n"
] | [
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aclau99/api-offres-emploi | [
"abeaf74b22431613fc5fa4b280ad4de4a331b447"
] | [
"utils.py"
] | [
"import datetime\nimport pandas as pd\n\ndef dt_to_str_iso(dt):\n \"\"\"\n Convert a datetime.datetime object to a string respecting the ISO-8601 format \n Will raise ValueError if type not appropriate\n :param dt: The datetime object to convert\n :type dt: datetime.datetime \n \n :returns: ISO 8601 formatted string\n :rtype: str \n \"\"\"\n iso_format = \"%Y-%m-%dT%H:%M:%SZ\"\n if isinstance(dt, datetime.datetime):\n s = dt.strftime(iso_format)\n return s\n else:\n raise ValueError(\"Arg 'dt' should be of class 'datetime.datetime'\")\n\n\ndef filters_to_df(filters):\n \"\"\"\n :param filters: The list of the filters available through \"filtresDisponibles\" key\n :type filters: list\n \n :rtype: pandas.DataFrame\n :returns: A pandas.DataFrame of the filters (that is more suitable to analysis)\n \"\"\"\n dics = [{x[\"filtre\"]: x[\"agregation\"]} for x in filters]\n l = []\n for dic in dics:\n flat_dic = [\n dict(\n filtre=key,\n valeur_possible=x[\"valeurPossible\"],\n nb_resultats=x[\"nbResultats\"],\n )\n for (key, value) in dic.items()\n for x in value\n ]\n l.extend(flat_dic)\n df = pd.DataFrame(l)\n return df"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
syncrosim/pysyncrosim | [
"20c0005674b7dbcef19c233dfa7db95e0d9d451e"
] | [
"tests/test_pysyncrosim.py"
] | [
"import pysyncrosim as ps\nimport pytest\nimport pandas as pd\nimport math\nimport numpy as np\nimport os\nimport rasterio\n# import re\n\ndef test_session_attributes():\n \n mySession = ps.Session()\n \n # Test init\n assert isinstance(mySession, ps.Session)\n \n with pytest.raises(ValueError, match=\"The location is not valid\"):\n mySession = ps.Session(location=\"bad/location\")\n \n # Test version method\n assert isinstance(mySession.version(), str)\n assert \"Version is:\" in mySession.version()\n \n # Test packages method\n assert isinstance(mySession.packages(), pd.DataFrame)\n assert isinstance(mySession.packages(installed=False), pd.DataFrame)\n assert isinstance(mySession.packages(installed=\"BASE\"), pd.DataFrame)\n \n with pytest.raises(TypeError,\n match=\"installed must be Logical or 'BASE'\"):\n mySession.packages(installed=1)\n \n \ndef test_session_package_functions():\n \n mySession = ps.Session()\n \n # Test add_packages, remove_packages, update_packages methods\n with pytest.raises(TypeError, match=\"packages must be a String or List\"):\n mySession.add_packages(1)\n with pytest.raises(TypeError, match=\"packages must be a String or List\"):\n mySession.remove_packages(1)\n with pytest.raises(TypeError, match=\"packages must be a String or List\"):\n mySession.update_packages(1)\n \n with pytest.raises(TypeError, match=\"all packages must be Strings\"):\n mySession.add_packages([\"helloworldSpatial\", 1])\n with pytest.raises(TypeError, match=\"all packages must be Strings\"):\n mySession.remove_packages([\"helloworldSpatial\", 1])\n with pytest.raises(TypeError, match=\"all packages must be Strings\"):\n mySession.update_packages([\"helloworldSpatial\", 1])\n \n mySession.add_packages(\"helloworldSpatial\")\n assert \"helloworldSpatial\" in mySession.packages()[\"Name\"].values\n \n mySession.remove_packages(\"helloworldSpatial\")\n assert \"helloworldSpatial\" not in mySession.packages()[\"Name\"].values\n \n mySession.add_packages(\"helloworldSpatial\")\n \ndef test_helper():\n \n mySession = ps.Session()\n mySession.add_packages(\"stsim\")\n \n # Type checking\n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument\"):\n ps.library()\n \n with pytest.raises(TypeError, match=\"name must be a String\"):\n ps.library(name=1)\n \n with pytest.raises(\n TypeError,\n match=\"session must be None or pysyncrosim Session instance\"):\n ps.library(name=\"Test\", session=1)\n \n with pytest.raises(TypeError, match=\"package must be a String\"):\n ps.library(name=\"Test\", package=1)\n \n with pytest.raises(TypeError, match=\"addons must be a String\"):\n ps.library(name=\"Test\", addons=1)\n \n with pytest.raises(TypeError, match=\"templates must be a String\"):\n ps.library(name=\"Test\", template=1)\n \n with pytest.raises(TypeError, match=\"forceUpdate must be a Logical\"):\n ps.library(name=\"Test\", forceUpdate=\"True\")\n \n with pytest.raises(TypeError, match=\"overwrite must be a Logical\"):\n ps.library(name=\"Test\", overwrite=\"False\")\n\n # Test package installation\n mySession.remove_packages(\"stsim\")\n with pytest.raises(ValueError, match=\"The package stsim is not installed\"):\n ps.library(name=\"Test\", package=\"stsim\", session=mySession)\n mySession.add_packages(\"stsim\")\n \n # Test Library path\n with pytest.raises(ValueError, match=\"Path to Library does not exist\"):\n ps.library(\"path/to/library\")\n \n # Test template\n with pytest.raises(ValueError,\n match=\"Template test does not exist in package\"):\n ps.library(\"Test\", template=\"test\")\n \n # Test output\n myLibrary = ps.library(name=\"Test\", forceUpdate=True)\n assert isinstance(myLibrary, ps.Library)\n \ndef test_library_attributes():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True)\n \n # Check attributes\n assert isinstance(myLibrary.name, str)\n assert isinstance(myLibrary.session, ps.Session)\n assert isinstance(myLibrary.location, str)\n assert os.path.isfile(myLibrary.location)\n assert isinstance(myLibrary.package, str)\n assert isinstance(myLibrary.addons, pd.DataFrame)\n\ndef test_library_projects():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True)\n \n # Test inputs\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myLibrary.projects(name=1)\n \n with pytest.raises(TypeError, match=\"pid must be an Integer\"):\n myLibrary.projects(pid=\"1\")\n \n with pytest.raises(TypeError, match=\"summary must be a Logical\"):\n myLibrary.projects(summary=\"False\")\n \n with pytest.raises(TypeError, match=\"overwrite must be a Logical\"):\n myLibrary.projects(overwrite=\"False\")\n \n with pytest.raises(ValueError,\n match=\"pid specified, but no Projects created yet\"):\n myLibrary.projects(pid=2)\n \n with pytest.raises(ValueError,\n match=\"Project ID 1 does not match Project name test2\"):\n myLibrary.projects(name=\"test\")\n myLibrary.projects(name=\"test2\", pid=1)\n \n with pytest.raises(ValueError, match=\"Project ID 3 does not exist\"):\n myLibrary.projects(pid=3)\n \n # Test outputs\n assert isinstance(myLibrary.projects(), pd.DataFrame)\n assert isinstance(myLibrary.projects(name=\"test\"), ps.Project) \n assert isinstance(myLibrary.projects(summary=False), list)\n \ndef test_library_scenarios():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True)\n myLibrary.projects(name=\"test\")\n \n with pytest.raises(\n TypeError, \n match=\"name must be a String, Integer, or List of these\"):\n myLibrary.scenarios(name=pd.DataFrame())\n \n with pytest.raises(\n TypeError,\n match=\"project must be Project instance, String, or Integer\"):\n myLibrary.scenarios(project=[1])\n \n with pytest.raises(TypeError, match=\"sid must be an Integer\"):\n myLibrary.scenarios(sid=\"1\")\n \n with pytest.raises(TypeError, match=\"pid must be an Integer\"):\n myLibrary.scenarios(pid=\"1\")\n \n with pytest.raises(TypeError, match=\"overwrite must be a Logical\"):\n myLibrary.scenarios(overwrite=\"False\")\n \n with pytest.raises(TypeError, match=\"optional must be a Logical\"):\n myLibrary.scenarios(optional=1)\n \n with pytest.raises(TypeError, match=\"summary must be a Logical\"):\n myLibrary.scenarios(summary=\"True\")\n \n with pytest.raises(ValueError, match=\"Scenario ID 2 does not exist\"):\n myLibrary.scenarios(sid=2)\n \n # Test scenarios method outputs\n assert myLibrary.scenarios(name=\"test\").name == \"test\"\n assert myLibrary.scenarios(name=\"test2\", sid=1).name == \"test\"\n assert isinstance(myLibrary.scenarios(name=\"test\"), ps.Scenario)\n assert isinstance(myLibrary.scenarios(name=\"test\", sid=1), ps.Scenario)\n assert isinstance(myLibrary.scenarios(), pd.DataFrame)\n \n myLibrary.projects(name=\"project2\")\n with pytest.raises(ValueError, match=\"More than one Project in Library\"):\n myLibrary.scenarios(summary=False)\n \n assert isinstance(myLibrary.scenarios(), pd.DataFrame) \n assert isinstance(myLibrary.scenarios(pid=1, summary=False), ps.Scenario)\n assert len(myLibrary.scenarios(pid=1).columns) == 4\n assert len(myLibrary.scenarios(pid=1, optional=True).columns) == 11\n assert myLibrary.scenarios(name=\"test\", pid=1, overwrite=True).sid != 1\n assert all(myLibrary.scenarios(project=1) == myLibrary.scenarios(pid=1))\n assert all(\n myLibrary.scenarios(project=\"test\") == myLibrary.scenarios(pid=1))\n myProject = myLibrary.projects(\"test\")\n assert all(myLibrary.scenarios(\n project=1) == myLibrary.scenarios(project=myProject))\n \ndef test_library_datasheets():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True)\n \n # Test datasheets method inputs\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myLibrary.datasheets(name=1)\n \n with pytest.raises(TypeError, match=\"summary must be a Logical or 'CORE'\"):\n myLibrary.datasheets(summary=1)\n \n with pytest.raises(TypeError, match=\"optional must be a Logical\"):\n myLibrary.datasheets(optional=[1, 2, 3])\n \n with pytest.raises(TypeError, match=\"filter_column must be a String\"):\n myLibrary.datasheets(filter_column=1)\n \n with pytest.raises(\n RuntimeError,\n match=\"The scope must be 'Library', 'Project, or 'Scenario'\"):\n myLibrary.datasheets(scope=\"test\")\n \n with pytest.raises(\n RuntimeError,\n match=\"The data sheet does not exist: stsim_test\"):\n myLibrary.datasheets(name=\"test\")\n \n with pytest.raises(\n ValueError,\n match=\"filter column Test not in Datasheet stsim_RunControl\"):\n myLibrary.datasheets(name=\"RunControl\", filter_column=\"Test\",\n filter_value=1)\n \n # Test datasheets method outputs\n assert isinstance(myLibrary.datasheets(), pd.DataFrame)\n assert isinstance(myLibrary.datasheets(name=\"core_Backup\"), pd.DataFrame)\n assert isinstance(myLibrary.datasheets(summary=False), list)\n assert len(myLibrary.datasheets().columns) == 3\n assert len(myLibrary.datasheets(optional=True).columns) == 6\n assert myLibrary.datasheets(name=\"core_Backup\", empty=True).empty\n assert not myLibrary.datasheets().equals(\n myLibrary.datasheets(scope=\"Project\"))\n assert not myLibrary.datasheets().equals(\n myLibrary.datasheets(scope=\"Scenario\"))\n \ndef test_library_delete():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True)\n myLibrary.projects(name=\"test\")\n \n # Test delete method\n with pytest.raises(\n TypeError,\n match=\"project must be a Project instance, Integer, or String\"):\n myLibrary.delete(project=1.5)\n \n with pytest.raises(\n TypeError,\n match=\"scenario must be a Scenario instance, Integer, or String\"):\n myLibrary.delete(scenario=1.5)\n \n with pytest.raises(TypeError, match=\"force must be a Logical\"):\n myLibrary.delete(force=\"True\")\n \n with pytest.raises(ValueError, match=\"Project ID 2 does not exist\"):\n myLibrary.delete(project=2)\n \n with pytest.raises(ValueError, match=\"project dne does not exist\"):\n myLibrary.delete(project=\"dne\")\n \n with pytest.raises(ValueError, match=\"Scenario ID 50 does not exist\"):\n myLibrary.delete(scenario=50)\n \n with pytest.raises(ValueError, match=\"scenario dne does not exist\"):\n myLibrary.delete(scenario=\"dne\")\n \n myLibrary.delete(project=\"test\", force=True)\n assert myLibrary._Library__projects.empty\n assert \"test\" not in myLibrary.projects().Name.values\n \n myLibrary.scenarios(name=\"test\")\n myLibrary.delete(scenario=\"test\", force=True)\n assert \"test\" not in myLibrary.scenarios().Name.values\n \ndef test_library_save_datasheet():\n \n myLibrary = ps.library(name=\"Test\")\n \n # Test save_datasheet method\n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument:\"):\n myLibrary.save_datasheet(name=1)\n \n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument:\"):\n myLibrary.save_datasheet(data=1)\n \n with pytest.raises(TypeError, match=\"name must be a String\"):\n myLibrary.save_datasheet(name=1, data=1)\n \n with pytest.raises(TypeError, match=\"data must be a pandas DataFrame\"):\n myLibrary.save_datasheet(name=\"test\", data=1)\n \n with pytest.raises(TypeError, match=\"scope must be a String\"):\n myLibrary.save_datasheet(name=\"test\", data=pd.DataFrame(), scope=1)\n \n with pytest.raises(RuntimeError,\n match=\"The data sheet does not exist: stsim_test\"):\n myLibrary.save_datasheet(name=\"test\", data=pd.DataFrame())\n \n with pytest.raises(\n RuntimeError,\n match=\"The header references a column that does not belong\"):\n random_df = pd.DataFrame({\"col1\": [1], \"col2\": [2]})\n myLibrary.save_datasheet(name=\"core_Backup\", data=random_df)\n \n initial_core_backup = myLibrary.datasheets(name=\"core_Backup\")\n assert initial_core_backup[\"IncludeOutput\"].isna().values[0]\n \n initial_core_backup[\"IncludeOutput\"] = \"Yes\"\n myLibrary.save_datasheet(name=\"core_Backup\", data=initial_core_backup)\n modified_core_backup = myLibrary.datasheets(name=\"core_Backup\")\n assert (modified_core_backup[\"IncludeOutput\"] == \"Yes\").item() \n \ndef test_library_run():\n \n mySession = ps.Session()\n mySession.add_packages(\"helloworldSpatial\")\n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\",\n template=\"example-library\", overwrite=True,\n forceUpdate=True)\n \n # Test run method\n with pytest.raises(\n TypeError,\n match=\"must be Scenario instance, String, Integer, or List\"):\n myLibrary.run(scenarios=pd.DataFrame())\n\n with pytest.raises(\n TypeError,\n match=\"project must be Project instance, String, or Integer\"):\n myLibrary.run(project=[1])\n \n with pytest.raises(\n TypeError,\n match=\"jobs must be an Integer\"):\n myLibrary.run(jobs=\"1\")\n \n runcontrol = myLibrary.datasheets(\"RunControl\", True, False, False,\n \"Scenario\", None, None, False, False, 1)\n runcontrol[\"MaximumIteration\"] = 2\n runcontrol[\"MaximumTimestep\"] = 2\n myLibrary.save_datasheet(\"RunControl\", runcontrol, \"Scenario\", 1)\n \n myLibrary.run()\n assert len(myLibrary.scenarios()) == 2\n assert myLibrary.scenarios().iloc[1][\"IsResult\"] == \"Yes\"\n \n myLibrary.run(project=1)\n assert len(myLibrary.scenarios()) == 3\n assert myLibrary.scenarios().iloc[2][\"IsResult\"] == \"Yes\"\n \n myLibrary.run(project=1, scenarios=1)\n assert len(myLibrary.scenarios()) == 4 \n assert myLibrary.scenarios().iloc[3][\"IsResult\"] == \"Yes\"\n \n myLibrary.projects(name=\"New Project\")\n with pytest.raises(\n ValueError,\n match=\"Must specify project when > 1 Project in the Library\"):\n myLibrary.run()\n \ndef test_library_addons_functions():\n \n myLibrary = ps.library(name=\"stsim_test\", package=\"stsim\", overwrite=True)\n \n # Test enable_addons method\n with pytest.raises(TypeError,\n match=\"name must be a String or List of Strings\"):\n myLibrary.enable_addons(name=1)\n \n with pytest.raises(TypeError,\n match=\"all elements in name must be Strings\"):\n myLibrary.enable_addons(name=[1, True])\n \n myLibrary.enable_addons(\"stsimsf\")\n stsimsf_info = myLibrary.addons[myLibrary.addons[\"Name\"] == \"stsimsf\"]\n assert stsimsf_info[\"Enabled\"].item() == \"Yes\" \n \n # Test disable_addons method\n with pytest.raises(TypeError,\n match=\"name must be a String or List of Strings\"):\n myLibrary.disable_addons(name=1)\n \n with pytest.raises(TypeError,\n match=\"all elements in name must be Strings\"):\n myLibrary.disable_addons(name=[1, True])\n \n myLibrary.disable_addons(\"stsimsf\")\n stsimsf_info = myLibrary.addons[myLibrary.addons[\"Name\"] == \"stsimsf\"]\n assert stsimsf_info[\"Enabled\"].item() == \"No\" \n \ndef test_project_attributes():\n \n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\",\n overwrite=True)\n myProject = myLibrary.projects(name=\"Definitions\")\n \n # Check attributes\n assert isinstance(myProject.pid, int)\n assert myProject.pid == 1\n assert isinstance(myProject.name, str)\n assert myProject.name == \"Definitions\"\n assert isinstance(myProject.library, ps.Library)\n \ndef test_project_scenarios(): \n \n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\",\n overwrite=True)\n myProject = myLibrary.projects(name=\"Definitions\")\n \n # Test scenarios method\n with pytest.raises(\n TypeError,\n match=\"name must be a String, Integer, or List of these\"):\n myProject.scenarios(name=pd.DataFrame())\n \n with pytest.raises(TypeError, match=\"sid must be an Integer\"):\n myProject.scenarios(sid=\"1\")\n \n with pytest.raises(TypeError, match=\"optional must be a Logical\"):\n myProject.scenarios(optional=1)\n \n with pytest.raises(TypeError, match=\"summary must be a Logical\"):\n myProject.scenarios(summary=\"1\")\n \n assert isinstance(myProject.scenarios(), pd.DataFrame)\n assert isinstance(myProject.scenarios(summary=False), list)\n assert myProject.scenarios().empty\n assert len(myProject.scenarios().columns) == 4\n assert len(myProject.scenarios(optional=True).columns) == 11 \n assert isinstance(myProject.scenarios(name=\"test\"), ps.Scenario)\n assert len(myProject.scenarios()) == 1\n assert isinstance(myProject.scenarios(sid=1), ps.Scenario) \n\ndef test_project_datasheets():\n\n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\")\n myProject = myLibrary.projects(name=\"Definitions\")\n \n # Test Datasheets\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myProject.datasheets(name=1)\n \n with pytest.raises(TypeError, match=\"summary must be a Logical or 'CORE'\"):\n myProject.datasheets(summary=\"1\")\n \n with pytest.raises(TypeError, match=\"optional must be a Logical\"):\n myProject.datasheets(optional=1)\n \n with pytest.raises(TypeError, match=\"empty must be a Logical\"):\n myProject.datasheets(empty=1)\n \n assert isinstance(myProject.datasheets(), pd.DataFrame)\n assert isinstance(myProject.datasheets(summary=False), list)\n assert myProject.datasheets(\n summary='CORE')[\"Name\"].iloc[0].startswith(\"core\")\n assert len(myProject.datasheets().columns) == 3\n assert len(myProject.datasheets(optional=True).columns) == 6\n assert myProject.datasheets(name=\"core_Transformer\").empty is False\n assert myProject.datasheets(name=\"core_Transformer\", empty=True).empty\n \ndef test_project_save_datasheet():\n \n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\")\n myProject = myLibrary.projects(name=\"Definitions\")\n \n # Test save_datasheet\n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument\"):\n myProject.save_datasheet(name=1)\n \n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument:\"):\n myProject.save_datasheet(data=1)\n \n with pytest.raises(TypeError, match=\"name must be a String\"):\n myProject.save_datasheet(name=1, data=1)\n \n with pytest.raises(TypeError, match=\"data must be a pandas DataFrame\"):\n myProject.save_datasheet(name=\"test\", data=1)\n \n assert myProject.datasheets(name=\"core_AutoGenTag\").empty\n test_datasheet = pd.DataFrame({\"Name\": [\"test\"], \"AutoGenTagKey\": [\"key\"],\n \"AutoGenTagValue\": [1]})\n myProject.save_datasheet(name=\"core_AutoGenTag\", data=test_datasheet)\n assert myProject.datasheets(name=\"core_AutoGenTag\").empty is False\n\ndef test_project_copy_delete():\n \n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\")\n myProject = myLibrary.projects(name=\"Definitions\")\n\n # Test copy\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myProject.copy(name=1)\n \n myNewProj = myProject.copy()\n assert myNewProj.name == \"Definitions - Copy\"\n assert myNewProj.datasheets(name=\"core_AutoGenTag\").empty is False\n \n myNewerProj = myProject.copy(name=\"Definitions 2\")\n assert myNewerProj.name == \"Definitions 2\"\n assert myNewerProj.datasheets(name=\"core_AutoGenTag\").empty is False\n \n # Test delete \n with pytest.raises(\n TypeError,\n match=\"scenario must be a Scenario instance, Integer, or String\"):\n myNewProj.delete(scenario=[1]) \n \n with pytest.raises(RuntimeError, match=\"The project does not exist\"):\n myNewProj.delete(force=True)\n myNewProj.scenarios()\n \n\ndef test_scenarios_attributes():\n\n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\",\n overwrite=True)\n myScenario = myLibrary.scenarios(\"Test Scenario\")\n \n # Check attributes\n assert isinstance(myScenario.sid, int) or isinstance(\n myScenario.sid, np.int64)\n assert myScenario.sid == 1\n assert isinstance(myScenario.name, str)\n assert myScenario.name == \"Test Scenario\"\n assert isinstance(myScenario.project, ps.Project)\n assert isinstance(myScenario.library, ps.Library)\n assert isinstance(myScenario.is_result, str)\n assert myScenario.is_result == \"No\"\n assert math.isnan(myScenario.parent_id)\n \ndef test_scenario_datasheets():\n \n myLibrary = ps.library(name=\"ds_test\", package=\"helloworldSpatial\",\n overwrite=True, template=\"example-library\",\n forceUpdate=True)\n myScenario = myLibrary.scenarios(sid=1)\n \n # Test datasheets\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myScenario.datasheets(name=1)\n \n with pytest.raises(TypeError, match=\"summary must be a Logical or 'CORE'\"):\n myScenario.datasheets(summary=\"1\")\n \n with pytest.raises(TypeError, match=\"optional must be a Logical\"):\n myScenario.datasheets(optional=1)\n \n with pytest.raises(TypeError, match=\"empty must be a Logical\"):\n myScenario.datasheets(empty=1)\n \n assert isinstance(myScenario.datasheets(), pd.DataFrame)\n assert isinstance(myScenario.datasheets(summary=False), list)\n assert myScenario.datasheets(\n summary='CORE')[\"Name\"].iloc[0].startswith(\"core\")\n assert len(myScenario.datasheets().columns) == 3\n assert len(myScenario.datasheets(optional=True).columns) == 7\n assert isinstance(myScenario.datasheets(\n name=\"RunControl\",\n filter_column=\"MinimumIteration\", \n filter_value=\"1\"), pd.DataFrame)\n assert len(myScenario.datasheets(\n name=\"RunControl\",\n filter_column=\"MinimumIteration\",\n filter_value=\"1\") == 1)\n assert myScenario.datasheets(\n name=\"RunControl\",\n filter_column=\"MinimumIteration\",\n filter_value=\"2\").empty \n assert myScenario.datasheets(name=\"InputDatasheet\").empty is False\n assert myScenario.datasheets(name=\"InputDatasheet\", empty=True).empty\n\ndef test_scenario_save_datasheet():\n\n myLibrary = ps.library(name=\"ds_test\", package=\"helloworldSpatial\",\n overwrite=True, template=\"example-library\",\n forceUpdate=True)\n myScenario = myLibrary.scenarios(sid=1) \n\n # Test save_datasheet\n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument\"):\n myScenario.save_datasheet(name=1)\n \n with pytest.raises(\n TypeError,\n match=\"missing 1 required positional argument:\"):\n myScenario.save_datasheet(data=1)\n \n with pytest.raises(TypeError, match=\"name must be a String\"):\n myScenario.save_datasheet(name=1, data=1)\n \n with pytest.raises(TypeError, match=\"data must be a pandas DataFrame\"):\n myScenario.save_datasheet(name=\"test\", data=1)\n \n runcontrol = myScenario.datasheets(name=\"RunControl\")\n runcontrol[\"MaximumIteration\"] = 2\n runcontrol[\"MaximumTimestep\"] = 2\n myScenario.save_datasheet(\"RunControl\", runcontrol)\n\n assert myScenario.datasheets(\n name=\"RunControl\")[\"MaximumIteration\"].item() == 2\n assert myScenario.datasheets(\n name=\"RunControl\")[\"MaximumTimestep\"].item() == 2\n \ndef test_scenario_run_and_results():\n \n myLibrary = ps.library(name=\"Test\", overwrite=True,\n package=\"helloworldSpatial\",\n template=\"example-library\",\n forceUpdate=True)\n myScenario = myLibrary.scenarios(sid=1)\n runcontrol = myScenario.datasheets(name=\"RunControl\")\n runcontrol[\"MaximumIteration\"] = 2\n runcontrol[\"MaximumTimestep\"] = 2\n myScenario.save_datasheet(\"RunControl\", runcontrol)\n \n # Test run\n with pytest.raises(TypeError, match=\"jobs must be an Integer\"):\n myScenario.run(jobs=\"1\")\n \n myScenario.run(jobs=2)\n assert len(myLibrary.scenarios()) == 2 \n assert myLibrary.scenarios().iloc[1][\"IsResult\"] == \"Yes\"\n \n # Test results\n with pytest.raises(TypeError, match=\"Scenario ID must be an Integer\"):\n myScenario.results(sid=\"5\")\n \n with pytest.raises(ValueError, match=\"not a Results Scenario\"):\n myScenario.results(sid=1)\n \n assert isinstance(myScenario.results(), pd.DataFrame)\n assert (myScenario.results()[\"IsResult\"] == \"Yes\").all()\n res_sid = myLibrary.scenarios().iloc[1][\"ScenarioID\"].item()\n assert isinstance(myScenario.results(sid=res_sid), ps.Scenario) \n \n # Test run_log\n myResultsScenario = myScenario.results(sid=res_sid)\n assert isinstance(myResultsScenario.run_log(), pd.DataFrame)\n assert not isinstance(myScenario.run_log(), pd.DataFrame)\n \n # Test datasheet_rasters\n with pytest.raises(TypeError, match=\"datasheet must be a String\"):\n myResultsScenario.datasheet_rasters(datasheet=1, column=\"test\")\n \n with pytest.raises(TypeError, match=\"column must be a String\"):\n myResultsScenario.datasheet_rasters(datasheet=\"test\", column=1)\n \n with pytest.raises(TypeError, match=\"iteration must be an Integer\"):\n myResultsScenario.datasheet_rasters(datasheet=\"test\", column=\"test\",\n iteration=\"test\")\n \n with pytest.raises(TypeError, match=\"timestep must be an Integer\"):\n myResultsScenario.datasheet_rasters(datasheet=\"test\", column=\"test\",\n timestep=\"test\")\n \n with pytest.raises(ValueError,\n match=\"Scenario must be a Results Scenario\"):\n myScenario.datasheet_rasters(datasheet=\"test\", column=\"test\")\n \n with pytest.raises(RuntimeError,\n match=\"The data sheet does not exist\"):\n myResultsScenario.datasheet_rasters(datasheet=\"test\", column=\"test\")\n \n with pytest.raises(ValueError,\n match=\"No raster columns found in Datasheet\"):\n myResultsScenario.datasheet_rasters(datasheet=\"OutputDatasheet\")\n \n with pytest.raises(\n ValueError,\n match=\"Column test not found in Datasheet\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"test\")\n \n with pytest.raises(\n ValueError, \n match=\"Specified iteration above range of plausible values\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n iteration=3) \n \n with pytest.raises(ValueError, match=\"iteration cannot be below 1\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n iteration=0)\n \n with pytest.raises(ValueError,\n match=\"Some iteration values outside of range\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n iteration=[1, 2, 3]) \n \n with pytest.raises(\n ValueError, \n match=\"Specified timestep above range of plausible values\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n timestep=3) \n \n with pytest.raises(\n ValueError, \n match=\"Specified timestep below range of plausible values\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n timestep=0) \n \n with pytest.raises(ValueError,\n match=\"Some timestep values outside of range\"):\n myResultsScenario.datasheet_rasters(datasheet=\"IntermediateDatasheet\",\n column=\"OutputRasterFile\",\n timestep=[1, 2, 3]) \n \n with pytest.raises(\n ValueError, \n match = \"Must specify a filter_value to filter the filter_column\"):\n myResultsScenario.datasheet_rasters(\n datasheet=\"IntermediateDatasheet\",\n column = None,\n filter_column=\"IntermediateDatasheetID\") \n \n with pytest.raises(\n ValueError, \n match = \"filter column test not in Datasheet\"\n ):\n myResultsScenario.datasheet_rasters(\n datasheet=\"IntermediateDatasheet\",\n column = None,\n filter_column=\"test\",\n filter_value=\"test\") \n \n with pytest.raises(\n RuntimeError, \n match=\"Cannot find a value for: test\"):\n myResultsScenario.datasheet_rasters(\n datasheet=\"IntermediateDatasheet\",\n column = None,\n filter_column=\"IntermediateDatasheetID\",\n filter_value=\"test\") \n \n raster1 = myResultsScenario.datasheet_rasters(\n datasheet=\"IntermediateDatasheet\", column=\"OutputRasterFile\",\n iteration=1, timestep=1)\n assert isinstance(raster1, ps.Raster)\n \n raster2 = myResultsScenario.datasheet_rasters(\n datasheet=\"IntermediateDatasheet\", column=\"OutputRasterFile\")\n assert len(raster2) == 4\n assert all([isinstance(x, ps.Raster) for x in raster2])\n \n raster3 = myResultsScenario.datasheet_rasters(\n datasheet = \"IntermediateDatasheet\", \n column = None,\n filter_column=\"IntermediateDatasheetID\",\n filter_value=2)\n assert isinstance(raster3, ps.Raster)\n \n # Test raster class attributes\n assert os.path.isfile(raster1.source)\n assert isinstance(raster1.name, str)\n assert raster1.name.endswith(\".it1.ts1\")\n assert isinstance(raster1.dimensions, dict)\n assert all([\n x in raster1.dimensions.keys() for x in [\n \"height\", \"width\", \"cells\"]])\n assert isinstance(raster1.extent, dict)\n assert all([\n x in raster1.extent.keys() for x in [\n \"xmin\", \"xmax\", \"ymin\", \"ymax\"]]) \n assert isinstance(raster1.crs, rasterio.crs.CRS)\n assert isinstance(raster1.values(), np.ndarray)\n assert isinstance(raster1.values(band=1), np.ndarray)\n \ndef test_scenario_copy_dep_delete():\n \n myLibrary = ps.library(name=\"Test\", package=\"helloworldSpatial\",\n overwrite=True, template=\"example-library\",\n forceUpdate=True)\n myScenario = myLibrary.scenarios(name=\"My Scenario\")\n runcontrol = myScenario.datasheets(name=\"RunControl\")\n runcontrol[\"MaximumIteration\"] = 2\n runcontrol[\"MaximumTimestep\"] = 2\n myScenario.save_datasheet(\"RunControl\", runcontrol)\n \n # Test copy\n with pytest.raises(TypeError, match=\"name must be a String\"):\n myScenario.copy(name=1)\n \n myNewScn = myScenario.copy()\n assert myNewScn.name == \"My Scenario - Copy\"\n assert myNewScn.datasheets(name=\"RunControl\").empty is False\n assert myNewScn.datasheets(\n name=\"RunControl\")[\"MaximumIteration\"].item() == 2\n \n myNewerScn = myScenario.copy(name=\"My Scenario 2\")\n assert myNewerScn.name == \"My Scenario 2\"\n assert myNewerScn.datasheets(name=\"RunControl\").empty is False\n assert myNewerScn.datasheets(\n name=\"RunControl\")[\"MaximumIteration\"].item() == 2\n \n # Test dependencies\n with pytest.raises(\n TypeError,\n match=\"dependency must be a Scenario, String, Integer, or List\"):\n myNewScn.dependencies(dependency=1.5)\n \n with pytest.raises(\n TypeError,\n match=\"remove must be a Logical\"):\n myNewScn.dependencies(dependency=myNewerScn.sid, remove=\"True\")\n \n assert myNewScn.dependencies().empty is True\n myNewScn.dependencies(dependency=myNewerScn)\n assert myNewScn.dependencies().empty is False\n assert myNewScn.dependencies().Name.item() == \"My Scenario 2\"\n \n myNewScn.dependencies(dependency=myNewerScn.sid, remove=True, force=True)\n assert myNewScn.dependencies().empty is True\n \n myNewScn.dependencies(dependency=myNewerScn.name)\n assert myNewScn.dependencies().empty is False\n \n sameNameScn = myScenario.copy(name=\"My Scenario 2\")\n with pytest.raises(\n ValueError,\n match=\"dependency name not unique, use ID or Scenario\"):\n myNewScn.dependencies(dependency=sameNameScn.name)\n \n # Test ignore_dependencies\n with pytest.raises(TypeError, match=\"value must be a String\"):\n myNewScn.ignore_dependencies(value=1)\n \n assert math.isnan(myNewScn.ignore_dependencies())\n \n myNewScn.ignore_dependencies(value=\"RunControl\")\n myNewScn.ignore_dependencies()\n \n assert myNewScn.ignore_dependencies() == \"'RunControl'\"\n \n myNewScn.ignore_dependencies(value=\"InputDatasheet,OutputDatasheet\")\n assert myNewScn.ignore_dependencies() == \"'InputDatasheet,OutputDatasheet'\"\n \n # Test merge_dependencies\n with pytest.raises(TypeError, match=\"value must be a Logical\"):\n myNewScn.merge_dependencies(value=1)\n \n assert myNewScn.merge_dependencies() == \"No\"\n \n myNewScn.merge_dependencies(value=True)\n assert myNewScn.merge_dependencies() == \"Yes\"\n \n # Test delete \n with pytest.raises(RuntimeError, match=\"The scenario does not exist\"):\n myNewScn.delete(force=True)\n myNewScn.run()\n "
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
giserh/book-python | [
"ebd4e70cea1dd56986aa8efbae3629ba3f1ba087"
] | [
"data-vizualization/src/plotly-timeseries-rangeslider.py"
] | [
"import plotly.offline as py\nimport plotly.graph_objs as go\n\nimport pandas as pd\n\ndf = pd.read_csv(\"https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv\")\n\ntrace_high = go.Scatter(\n x=df.Date,\n y=df['AAPL.High'],\n name=\"AAPL High\",\n line=dict(color='#17BECF'),\n opacity=0.8)\n\ntrace_low = go.Scatter(\n x=df.Date,\n y=df['AAPL.Low'],\n name=\"AAPL Low\",\n line=dict(color='#7F7F7F'),\n opacity=0.8)\n\ndata = [trace_high, trace_low]\n\nlayout = dict(\n title='Time Series with Rangeslider',\n xaxis=dict(\n rangeselector=dict(\n buttons=list([\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n rangeslider=dict(),\n type='date'\n )\n)\n\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename=\"Time Series with Rangeslider\")\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PeterZZQ/scDART_test | [
"61371fa653a585ccc3a981d8d429b7dbd4d89dfc"
] | [
"test/bmk_snare.py"
] | [
"# In[]\nimport sys, os\nsys.path.append('../')\nsys.path.append('../src/')\n\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\nimport networkx as nx\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nimport time\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import MDS\n\nimport diffusion_dist as diff\nimport dataset as dataset\nimport model as model\nimport loss as loss\nimport train\nimport TI as ti\nimport benchmark as bmk\nimport de_analy as de\nimport utils as utils\nimport post_align as palign\n\nfrom umap import UMAP\nfrom scipy.sparse import load_npz\nfrom scipy.stats import pearsonr, spearmanr\nfrom matplotlib.ticker import FormatStrFormatter\nimport seaborn as sns\n\nfrom adjustText import adjust_text\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nimport scanpy as sc \nimport anndata \n\nplt.rcParams[\"font.size\"] = 20\n\n# In[] scan and find the one with the highest neighborhood overlap score\nseeds = [0, 1, 2]\nlatent_dims = [4, 8, 32]\nreg_ds = [1, 10]\nreg_gs = [0.01, 1, 10]\nreg_mmds = [1, 10, 20, 30]\n\nlatent_dim = latent_dims[0]\nreg_d = reg_ds[0]\nreg_g = reg_gs[1]\n# harder to merge, need to make mmd loss larger\nreg_mmd = reg_mmds[1]\nseed = seeds[0]\n\nlearning_rate = 3e-4\nn_epochs = 500\nuse_anchor = False\nts = [30, 50, 70]\nuse_potential = True\nnorm = \"l1\"\n\ncounts_rna = pd.read_csv(\"../data/snare-seq-1000/counts_rna.csv\", index_col = 0)\ncounts_atac = pd.read_csv(\"../data/snare-seq-1000/counts_atac.csv\", index_col = 0)\nlabel_rna = pd.read_csv(\"../data/snare-seq-1000/anno.txt\", header = None)\nlabel_atac = pd.read_csv(\"../data/snare-seq-1000/anno.txt\", header = None)\nrna_dataset = dataset.dataset(counts = counts_rna.values, anchor = None)\natac_dataset = dataset.dataset(counts = counts_atac.values, anchor = None)\ncoarse_reg = torch.FloatTensor(pd.read_csv(\"../data/snare-seq-1000/region2gene.csv\", sep = \",\", index_col = 0).values).to(device)\n\nbatch_size = int(max([len(rna_dataset),len(atac_dataset)])/4)\n\n\nprint(\"Random seed: \" + str(seed))\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\nnp.random.seed(seed)\n\ntrain_rna_loader = DataLoader(rna_dataset, batch_size = batch_size, shuffle = True)\ntrain_atac_loader = DataLoader(atac_dataset, batch_size = batch_size, shuffle = True)\n\nEMBED_CONFIG = {\n 'gact_layers': [atac_dataset.counts.shape[1], 1024, 512, rna_dataset.counts.shape[1]], \n 'proj_layers': [rna_dataset.counts.shape[1], 512, 128, latent_dim], # number of nodes in each \n 'learning_rate': learning_rate,\n 'n_epochs': n_epochs + 1,\n 'use_anchor': use_anchor,\n 'reg_d': reg_d,\n 'reg_g': reg_g,\n 'reg_mmd': reg_mmd,\n 'l_dist_type': 'kl',\n 'device': device\n}\n\n# initialize the model\ngene_act = model.gene_act(features = EMBED_CONFIG[\"gact_layers\"], dropout_rate = 0.0, negative_slope = 0.2).to(device)\nencoder = model.Encoder(features = EMBED_CONFIG[\"proj_layers\"], dropout_rate = 0.0, negative_slope = 0.2).to(device)\nmodel_dict = {\"gene_act\": gene_act, \"encoder\": encoder}\n\nmodel_dict = torch.load(\"../test/results_snare/models_1000/model_\" + str(latent_dim) + \"_\" + str(reg_d) + \"_\" + str(reg_g) + \"_\" + str(reg_mmd) + \"_\" + str(seed) + \"_\" + norm + \".pth\", map_location = device)\n\nwith torch.no_grad():\n z_rna = model_dict[\"encoder\"](rna_dataset.counts.to(device))\n z_atac = model_dict[\"encoder\"](model_dict[\"gene_act\"](atac_dataset.counts.to(device)))\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna.cpu().numpy(), z_atac.cpu().numpy()), axis = 0))\nz_rna_pca = z[:z_rna.shape[0],:]\nz_atac_pca = z[z_rna.shape[0]:,:]\nutils.plot_latent(z1 = z_rna_pca, z2 = z_atac_pca, anno1 = label_rna, \n anno2 = label_atac, mode = \"joint\", save = \"results_snare/z_joint.png\", \n figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(z1 = z_rna_pca, z2 = z_atac_pca, anno1 = label_rna, \n anno2 = label_atac, mode = \"modality\", save = \"results_snare/z_mod.png\", \n figsize = (15,7), axis_label = \"PCA\")\n\n\nz_rna, z_atac = palign.match_alignment(z_rna = z_rna.cpu(), z_atac = z_atac.cpu(), k = 10)\nz_atac, z_rna = palign.match_alignment(z_rna = z_atac.cpu(), z_atac = z_rna.cpu(), k = 10)\nz_rna = z_rna.cpu().numpy()\nz_atac = z_atac.cpu().numpy()\n\n# post-maching\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna, z_atac), axis = 0))\nz_rna_pca = z[:z_rna.shape[0],:]\nz_atac_pca = z[z_rna.shape[0]:,:]\nutils.plot_latent(z1 = z_rna_pca, z2 = z_atac_pca, anno1 = label_rna, \n anno2 = label_atac, mode = \"joint\", save = \"results_snare/z_joint_post.png\", \n figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(z1 = z_rna_pca, z2 = z_atac_pca, anno1 = label_rna, \n anno2 = label_atac, mode = \"modality\", save = \"results_snare/z_mod_post.png\", \n figsize = (15,7), axis_label = \"PCA\")\n\numap_op = UMAP(n_components = 2, min_dist = 0.8, random_state = 0)\nz = umap_op.fit_transform(np.concatenate((z_rna, z_atac), axis = 0))\nz_rna_umap = z[:z_rna.shape[0],:]\nz_atac_umap = z[z_rna.shape[0]:,:]\nutils.plot_latent(z1 = z_rna_umap, z2 = z_atac_umap, anno1 = label_rna, \n anno2 = label_atac, mode = \"joint\", save = \"results_snare/z_joint_post_umap.png\", \n figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(z1 = z_rna_umap, z2 = z_atac_umap, anno1 = label_rna, \n anno2 = label_atac, mode = \"modality\", save = \"results_snare/z_mod_post_umap.png\", \n figsize = (15,7), axis_label = \"UMAP\")\n\n\n# # run diffusion map\n# adata_scdart = anndata.AnnData(X = np.concatenate((z_rna,z_atac), axis = 0))\n# # adata_scdart = anndata.AnnData(X = np.concatenate((z_rna_pca,z_atac_pca), axis = 0))\n\n# sc.pp.neighbors(adata_scdart, use_rep = 'X', n_neighbors = 30, random_state = 0)\n# sc.tl.diffmap(adata_scdart, random_state = 0)\n# diffmap_latent = adata_scdart.obsm[\"X_diffmap\"]\n# utils.plot_latent(diffmap_latent[:z_rna.shape[0],:], diffmap_latent[z_rna.shape[0]:,:], anno1 = rna_dataset.cell_labels, anno2 = atac_dataset.cell_labels, \n# mode = \"joint\", save = \"results_snare/z_joint_post_diffmap.png\", figsize = (15,7), axis_label = \"Diffmap\")\n# utils.plot_latent(diffmap_latent[:z_rna.shape[0],:], diffmap_latent[z_rna.shape[0]:,:], anno1 = rna_dataset.cell_labels, anno2 = atac_dataset.cell_labels, \n# mode = \"modality\", save = \"results_snare/z_mod_post_diffmap.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\nz_destiny = np.load(\"results_snare/models_1000/z_diffmap.npy\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna, anno2 = label_atac, \nmode = \"joint\", save = \"results_snare/z_joint_post_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna, anno2 = label_atac, \nmode = \"modality\", save = \"results_snare/z_mod_post_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\n# In[] Plot backbone\ndef plot_backbone(z1, z2, T, mean_cluster, groups, anno, mode = \"joint\", save = None, figsize = (20,10), axis_label = \"Latent\", **kwargs):\n _kwargs = {\n \"s\": 10,\n \"alpha\": 0.7,\n \"markerscale\": 6,\n \"fontsize\": 20\n }\n _kwargs.update(kwargs)\n\n fig = plt.figure(figsize = figsize)\n\n if mode == \"joint\":\n ax = fig.add_subplot()\n cluster_types = np.sort(np.unique(groups))\n cmap = plt.cm.get_cmap(\"Paired\", len(np.unique(anno)))\n z = np.concatenate((z1, z2), axis = 0)\n\n for i, cat in enumerate(np.sort(np.unique(anno))):\n idx = np.where(anno == cat)[0]\n ax.scatter(z[idx,0], z[idx,1], color = cmap(i), label = cat, alpha = _kwargs[\"alpha\"], s = _kwargs[\"s\"])\n\n for i in range(T.shape[0]):\n for j in range(T.shape[1]):\n if T[i,j] != 0:\n ax.plot([mean_cluster[i, 0], mean_cluster[j, 0]], [mean_cluster[i, 1], mean_cluster[j, 1]], 'r-')\n \n ax.scatter(mean_cluster[:,0], mean_cluster[:,1], color = \"red\", s = 30)\n \n texts = []\n for i in range(mean_cluster.shape[0]):\n # marker = cluster_types[i].split(\"_\")[0] + \"\\_\" + cluster_types[i].split(\"_\")[1] \n # ax.plot(mean_cluster[i,0] - 0.007, mean_cluster[i,1] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 70)\n texts.append(ax.text(mean_cluster[i,0] - 0.007, mean_cluster[i,1] + 0.001, color = \"black\", s = cluster_types[i], size = 'small', weight = 'bold', in_layout = True))\n\n ax.tick_params(axis = \"both\", which = \"major\", labelsize = 15)\n\n ax.set_xlabel(axis_label + \" 1\", fontsize = 19)\n ax.set_ylabel(axis_label + \" 2\", fontsize = 19)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n ax.legend(bbox_to_anchor=(1.04,1), loc=\"upper left\", fontsize = _kwargs[\"fontsize\"], frameon=False, markerscale = _kwargs[\"markerscale\"])\n\n adjust_text(texts, only_move={'points':'y', 'texts':'y'})\n plt.tight_layout()\n if save:\n fig.savefig(save, bbox_inches = \"tight\")\n \n print(save)\n\ndef backbone_inf(z_rna, z_atac, groups):\n import networkx as nx\n X = np.concatenate((z_rna, z_atac), axis = 0)\n n_clust = np.unique(groups).shape[0]\n\n mean_cluster = [[] for x in range(n_clust)]\n\n for i, cat in enumerate(np.sort(np.unique(groups))):\n idx = np.where(groups == cat)[0]\n mean_cluster[i] = np.mean(X[idx,:], axis = 0)\n\n mst = np.zeros((n_clust,n_clust))\n\n for i in range(n_clust):\n for j in range(n_clust):\n mst[i,j] = np.linalg.norm(np.array(mean_cluster[i]) - np.array(mean_cluster[j]), ord = 2)\n\n G = nx.from_numpy_matrix(-mst)\n T = nx.maximum_spanning_tree(G, weight = 'weight', algorithm = 'kruskal')\n T = nx.to_numpy_matrix(T)\n # conn is the adj of the MST.\n\n return groups, mean_cluster, T\n\n\n# root, manually found\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna, z_atac), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna.shape[0]:]\n\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna, z_atac), axis = 0))\nz_rna_pca = z[:z_rna.shape[0],:]\nz_atac_pca = z[z_rna.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna, z_atac, cell_labels)\nmean_cluster_pca = pca_op.transform(np.array(mean_cluster))\n# # mean_cluster_pca\n# mean_cluster_pca = [[] for x in range(len(mean_cluster))]\n# for i, cat in enumerate(np.sort(np.unique(cell_labels))):\n# idx = np.where(cell_labels == cat)[0]\n# mean_cluster_pca[i] = np.mean(z[idx,:], axis = 0)\n# mean_cluster_pca = np.array(mean_cluster_pca)\n\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster_pca, groups = groups, T = T, figsize=(15,7), save = \"results_snare/backbone.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = \"./results_snare/z_pt.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\n# In[] Infer pseudotime\n# infer backbone with leiden clustering\ngroups, mean_cluster, T = ti.backbone_inf(np.concatenate((z_rna, z_atac), axis = 0), resolution = 0.05)\ngroups_rna = groups[:counts_rna.shape[0]]\ngroups_atac = groups[counts_rna.shape[0]:]\nroot_clust = groups[root_cell]\n\n# infer all trajectories\nG = nx.from_numpy_matrix(T, create_using=nx.DiGraph)\nG = nx.dfs_tree(G, source = root_clust)\npaths = []\nfor node in G:\n if G.out_degree(node)==0: #it's a leaf\n paths.append(nx.shortest_path(G, root_clust, node))\n \npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna, z_atac), axis = 0))\nz_rna_pca = z[:z_rna.shape[0],:]\nz_atac_pca = z[z_rna.shape[0]:,:] \nmean_cluster = pca_op.transform(np.array(mean_cluster))\nutils.plot_backbone(z_rna_pca, z_atac_pca, groups = groups, T = T, mean_cluster = mean_cluster, mode = \"joint\", figsize=(10,7), save = None, axis_label = \"PCA\")\n\npseudo_order_rna = np.empty((groups_rna.shape[0], len(paths)))\npseudo_order_rna[:] = np.nan\npseudo_order_rna = pd.DataFrame(data = pseudo_order_rna, index = counts_rna.index.values, columns = np.array([\"traj_\" + str(x) for x in range(len(paths))]))\nfor i, path in enumerate(paths):\n selected_cells = np.concatenate([np.where(groups_rna == x)[0] for x in path], axis = 0)\n pseudo_order_rna.iloc[selected_cells, i] = pt_infer_rna[selected_cells]\n\npseudo_order_atac = np.empty((groups_atac.shape[0], len(paths)))\npseudo_order_atac[:] = np.nan\npseudo_order_atac = pd.DataFrame(data = pseudo_order_atac, index = counts_atac.index.values, columns = np.array([\"traj_\" + str(x) for x in range(len(paths))]))\nfor i, path in enumerate(paths):\n selected_cells = np.concatenate([np.where(groups_atac == x)[0] for x in path], axis = 0)\n pseudo_order_atac.iloc[selected_cells, i] = pt_infer_atac[selected_cells]\n\n# Overall pseudo-order\npseudo_order = pd.concat((pseudo_order_rna, pseudo_order_atac), axis = 0, ignore_index = False)\npseudo_order.to_csv(\"results_snare/pseudo_order.csv\")\n\n# In[] Find de genes\nde_genes = de.de_analy_para(X = counts_rna, pseudo_order = pseudo_order_rna, p_val_t = 0.05, verbose = False, distri = \"normal\", fdr_correct = True, n_jobs = 4)\nfor traj in de_genes.keys():\n genes = np.array([x[\"feature\"] for x in de_genes[traj]])\n p_val = np.array([x[\"p_val\"] for x in de_genes[traj]])\n genes= genes[np.argsort(p_val)]\n p_val = p_val[np.argsort(p_val)]\n de_list = pd.DataFrame.from_dict({\"feature\": genes, \"p-val\": p_val})\n de_list.to_csv(\"./results_snare/de_snare/de_gene_\" + str(traj) + \".csv\")\n\ngenes = [\"Mki67\", \"Fabp7\", \"Eomes\", \"Unc5d\", \"Cux1\", \"Foxp1\"]\nncols = 2\nnrows = np.ceil(len(genes)/2).astype('int32')\nfigsize = (20,15)\nX = counts_rna\nde_feats = de_genes\npseudo_order = pseudo_order_rna\n\nfigs = []\nfor traj_i in de_feats.keys():\n # ordering of genes\n sorted_pt = pseudo_order[traj_i].dropna(axis = 0).sort_values()\n # ordering = [int(x.split(\"_\")[1]) for x in sorted_pt.index]\n ordering = sorted_pt.index.values.squeeze()\n X_traj = X.loc[ordering, :]\n\n # make plot\n fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize = figsize)\n colormap = plt.cm.get_cmap('tab20b', 20)\n idx = 0\n for feat in de_feats[traj_i]:\n if feat[\"feature\"] in genes:\n # plot log transformed version\n y = np.squeeze(X_traj.loc[:,feat[\"feature\"]].values)\n y_null = feat['null']\n y_pred = feat['regression']\n\n axs[idx%nrows, idx//nrows].scatter(np.arange(y.shape[0]), y, color = colormap(1), alpha = 0.5)\n axs[idx%nrows, idx//nrows].plot(np.arange(y.shape[0]), y_pred, color = \"black\", alpha = 1, linewidth = 4)\n axs[idx%nrows, idx//nrows].plot(np.arange(y.shape[0]), y_null, color = \"red\", alpha = 1, linewidth = 4)\n axs[idx%nrows, idx//nrows].set_title(feat[\"feature\"]) \n idx += 1 \n \n plt.tight_layout()\n figs.append(fig)\n fig.savefig(\"results_snare/de_snare/de_snare_\" + str(traj_i) + \".png\", bbox_inches = \"tight\")\n\ndef plot_gene(z, counts, gene, save = None, figsize = (20,10), axis_label = \"Latent\", **kwargs):\n _kwargs = {\n \"s\": 10,\n \"alpha\": 0.9,\n }\n _kwargs.update(kwargs)\n\n fig = plt.figure(figsize = figsize)\n\n ax = fig.add_subplot()\n sct = ax.scatter(z[:,0], z[:,1], c = counts.loc[:, gene].values.squeeze(), cmap = plt.get_cmap('gnuplot'), **_kwargs)\n \n\n ax.tick_params(axis = \"both\", which = \"major\", labelsize = 15)\n\n ax.set_xlabel(axis_label + \" 1\", fontsize = 19)\n ax.set_ylabel(axis_label + \" 2\", fontsize = 19)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False) \n\n cbar = fig.colorbar(sct,fraction=0.046, pad=0.04, ax = ax)\n cbar.ax.tick_params(labelsize = 20)\n\n if save:\n fig.savefig(save, bbox_inches = \"tight\")\n \n print(save)\n\nfor gene in genes:\n plot_gene(z_rna_pca, counts_rna, gene, save = None, figsize = (20,10), axis_label = \"Latent\")\n\n\n# In[] Find de motif\ncounts_motif = pd.read_csv(\"../data/snare-seq-1000/chromVAR/motif_z.csv\", index_col = 0)\n\n# Find de regions, binomial distribution\nde_motifs = de.de_analy_para(X = counts_motif, pseudo_order = pseudo_order_atac, p_val_t = 0.05, verbose = False, distri = \"normal\", fdr_correct = True)\nfor traj in de_motifs.keys():\n motifs = np.array([x[\"feature\"] for x in de_motifs[traj]])\n p_val = np.array([x[\"p_val\"] for x in de_motifs[traj]])\n motifs= motifs[np.argsort(p_val)]\n p_val = p_val[np.argsort(p_val)]\n de_list = pd.DataFrame.from_dict({\"feature\": motifs, \"p-val\": p_val})\n de_list.to_csv(\"./results_snare/de_snare/de_motif_\" + str(traj) + \".csv\")\n\nfigs = de.de_plot(X = counts_motif, pseudo_order = pseudo_order_atac, de_feats = de_motifs, figsize = (20,50), n_feats = 20)\n\n# In[] Motif by gene matrix\n# extract gene activity matrix (region, gene)\nGACT = train.infer_gact(model_dict[\"gene_act\"], mask = (coarse_reg != 0), thresh = 1e-6).cpu().numpy()\n# transform into (motif, gene)\n# read in the region2motif matrix, fill in the empty regions\nregion2motif = pd.read_csv(\"../data/snare-seq-1000/chromVAR/region2motif.csv\", sep = \",\", index_col = 0)\nregion2motif_full = pd.DataFrame(index = counts_atac.columns.values, columns = region2motif.columns.values, data = 0)\nregion2motif_full.loc[region2motif.index.values, region2motif.columns.values] = region2motif.values\n\n\nmotif2gene = region2motif_full.values.T @ GACT\n# check which motif is regulating which gene\nmotif2gene = pd.DataFrame(data = motif2gene, index = region2motif.columns.values, columns = counts_rna.columns.values)\nmotif2gene.to_csv(\"results_snare/de_snare/motif2gene_\" + str(latent_dim) + \"_\" + str(reg_d) + \"_\" + str(reg_g) + \"_\" + str(reg_mmd) + \"_\" + str(seed) + \"_\" + norm + \".csv\")\n\n# # In[]\n# # First check the Sox6\n# gene = \"Sox6\"\n# ordering = np.argsort(motif2gene[gene].values.squeeze())[::-1]\n# motif2gene_ordered = motif2gene.iloc[ordering, :]\n# motif2gene_ordered = motif2gene_ordered.loc[:, [gene]]\n# motif2gene_ordered.to_csv(\"results_snare/de_snare/1000/motifs_\" + gene + \".csv\")\n\n# In[]\nfrom scipy.stats import zscore\npseudo_rna = model_dict[\"gene_act\"](atac_dataset.counts.to(device)).detach().cpu().numpy()\npseudo_rna = zscore(pseudo_rna, axis = 0)\ngenes = [\"Ptprd\", \"Mki67\", \"Fabp7\", \"Top2a\", \"Mef2c\", \"Macrod2\", \"Tenm2\", \"Dab1\", \"Tnc\", \"Frmd4a\", \"Celf2\"]\ngenes = pd.read_csv(\"results_snare/de_snare/1000/de_gene_traj_0.csv\", index_col = 0)\ngenes = genes.iloc[:50,0].values.squeeze()\npseudo_rna = pd.DataFrame(data = pseudo_rna, index = counts_rna.index.values, columns = counts_rna.columns.values)\npseudo_rna_sorted = pseudo_rna.iloc[np.argsort(pt_infer_rna), :]\nrna_sorted = counts_rna.iloc[np.argsort(pt_infer_rna), :]\npseudo_rna_sorted = pseudo_rna_sorted.loc[:, genes]\nrna_sorted = rna_sorted.loc[:, genes]\nrna_sorted = zscore(rna_sorted, axis = 0)\n\nfig = plt.figure(figsize = (20, 7))\naxs = fig.subplots(1, 2)\nsns.heatmap(pseudo_rna_sorted.T, ax = axs[0])\nsns.heatmap(rna_sorted.T, ax = axs[1])\n# fig.savefig(\"results_snare/de_snare/predict_rna.png\", bbox_inches = \"tight\")\n\nscore_gact = pd.DataFrame(columns = [\"Method\", \"Gene\", \"Spearman\", \"Pearson\"])\nfor i, gene in enumerate(genes):\n spearman,_ = spearmanr(pseudo_rna_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(spearman):\n spearman = 0\n pearson,_ = pearsonr(pseudo_rna_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(pearson):\n pearson = 0\n # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n score_gact = score_gact.append({\"Method\": \"scDART\", \"Gene\": gene, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# linear method\npseudo_rna2 = (atac_dataset.counts.to(device) @ coarse_reg).detach().cpu().numpy()\npseudo_rna2 = zscore(pseudo_rna2, axis = 0)\npseudo_rna2 = pd.DataFrame(data = pseudo_rna2, index = counts_rna.index.values, columns = counts_rna.columns.values)\npseudo_rna2_sorted = pseudo_rna2.iloc[np.argsort(pt_infer_rna), :]\npseudo_rna2_sorted = pseudo_rna2_sorted.loc[:, genes]\n\nfor i, gene in enumerate(genes):\n spearman,_ = spearmanr(pseudo_rna2_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(spearman):\n spearman = 0\n pearson,_ = pearsonr(pseudo_rna2_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(pearson):\n pearson = 0\n # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n score_gact = score_gact.append({\"Method\": \"Linear\", \"Gene\": gene, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# Signac method\npseudo_rna_signac = pd.read_csv(\"results_snare/pseudoRNA/counts_rna_signac.csv\", index_col = 0)\npseudo_rna_signac_sorted = pseudo_rna_signac.loc[rna_sorted.index.values,:]\npseudo_rna_signac_sorted = zscore(pseudo_rna_signac_sorted, axis = 0)\npseudo_rna_signac_sorted = pseudo_rna_signac_sorted.loc[:, genes]\npseudo_rna_signac_sorted = pseudo_rna_signac_sorted.fillna(0)\n\nfor i, gene in enumerate(genes):\n spearman,_ = spearmanr(pseudo_rna_signac_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(spearman):\n spearman = 0\n pearson,_ = pearsonr(pseudo_rna_signac_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(pearson):\n pearson = 0\n # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n score_gact = score_gact.append({\"Method\": \"Signac\", \"Gene\": gene, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# Cicero method\npseudo_rna_cicero = pd.read_csv(\"results_snare/pseudoRNA/counts_rna_cicero.csv\", index_col = 0)\npseudo_rna_cicero = zscore(pseudo_rna_cicero, axis = 0)\npseudo_rna_signac = pd.DataFrame(data = pseudo_rna_cicero, index = counts_rna.index.values, columns = counts_rna.columns.values)\npseudo_rna_cicero_sorted = pseudo_rna_cicero.iloc[np.argsort(pt_infer_rna), :]\npseudo_rna_cicero_sorted = pseudo_rna_cicero_sorted.loc[:, genes]\npseudo_rna_cicero_sorted = pseudo_rna_cicero_sorted.fillna(0)\n\nfor i, gene in enumerate(genes):\n spearman,_ = spearmanr(pseudo_rna_cicero_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(spearman):\n spearman = 0\n pearson,_ = pearsonr(pseudo_rna_cicero_sorted.T.loc[gene,:], rna_sorted.T.loc[gene,:])\n if np.isnan(pearson):\n pearson = 0\n # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n score_gact = score_gact.append({\"Method\": \"Cicero\", \"Gene\": gene, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n\nscore_gact.to_csv(\"results_snare/de_snare/gact_acc.csv\")\n\nfig = plt.figure(figsize = (7, 7))\nax = fig.add_subplot()\nx = score_gact.loc[score_gact[\"Method\"] == \"Linear\", \"Pearson\"].values\ny = score_gact.loc[score_gact[\"Method\"] == \"scDART\", \"Pearson\"].values\nax.scatter(x, y)\n\nprint(\"proportion above: {:.2f}\".format(np.sum((x < y).astype(int))/x.shape[0]) )\n# for i in range(x.shape[0]):\n# marker = genes[i]\n# if len(marker) <= 3:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 20)\n# elif len(marker) <= 5:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 30)\n# else:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 45)\n\n# ax.set_yscale('log')\n# ax.set_xscale('log')\nax.set_xlim([0.001, 0.4])\nax.set_ylim([0.001, 0.4])\n# ax.set_xticks([0.01, 0.1])\n# ax.set_yticks([0.01, 0.1])\nax.plot([0, 0.4], [0, 0.4], \"r:\")\nax.set_xlabel(\"Linear\")\nax.set_ylabel(\"scDART\")\n\nfig.savefig(\"results_snare/de_snare/Pearson_linear.png\", bbox_inches = \"tight\")\n\n\nfig = plt.figure(figsize = (7, 7))\nax = fig.add_subplot()\nx = score_gact.loc[score_gact[\"Method\"] == \"Signac\", \"Pearson\"].values\ny = score_gact.loc[score_gact[\"Method\"] == \"scDART\", \"Pearson\"].values\nax.scatter(x, y)\n\nprint(\"proportion above: {:.2f}\".format(np.sum((x < y).astype(int))/x.shape[0]) )\n# for i in range(x.shape[0]):\n# marker = genes[i]\n# if len(marker) <= 3:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 20)\n# elif len(marker) <= 5:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 30)\n# else:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 45)\n\n# ax.set_yscale('log')\n# ax.set_xscale('log')\nax.set_xlim([0.001, 0.4])\nax.set_ylim([0.001, 0.4])\n# ax.set_xticks([0.01, 0.1])\n# ax.set_yticks([0.01, 0.1])\nax.plot([0, 0.4], [0, 0.4], \"r:\")\nax.set_xlabel(\"Signac\")\nax.set_ylabel(\"scDART\")\n\nfig.savefig(\"results_snare/de_snare/Pearson_signac.png\", bbox_inches = \"tight\")\n\n\nfig = plt.figure(figsize = (7, 7))\nax = fig.add_subplot()\nx = score_gact.loc[score_gact[\"Method\"] == \"Cicero\", \"Pearson\"].values\ny = score_gact.loc[score_gact[\"Method\"] == \"scDART\", \"Pearson\"].values\nax.scatter(x, y)\n\nprint(\"proportion above: {:.2f}\".format(np.sum((x < y).astype(int))/x.shape[0]) )\n# for i in range(x.shape[0]):\n# marker = genes[i]\n# if len(marker) <= 3:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 20)\n# elif len(marker) <= 5:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 30)\n# else:\n# ax.plot(x[i] + 0.02, y[i] + 0.001, color = \"black\", marker= \"$\" + marker + \"$\", markersize = 45)\n\n# ax.set_yscale('log')\n# ax.set_xscale('log')\nax.set_xlim([0.001, 0.4])\nax.set_ylim([0.001, 0.4])\n# ax.set_xticks([0.01, 0.1])\n# ax.set_yticks([0.01, 0.1])\nax.plot([0, 0.4], [0, 0.4], \"r:\")\nax.set_xlabel(\"Cicero\")\nax.set_ylabel(\"scDART\")\n\nfig.savefig(\"results_snare/de_snare/Pearson_cicero.png\", bbox_inches = \"tight\")\n\n# # average barplot\n# spearman_scdart = np.mean(score_gact.loc[score_gact[\"Method\"] == \"scDART\", \"Spearman\"].values)\n# pearson_scdart = np.mean(score_gact.loc[score_gact[\"Method\"] == \"scDART\", \"Pearson\"].values)\n# spearman_linear = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Linear\", \"Spearman\"].values)\n# pearson_linear = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Linear\", \"Pearson\"].values)\n# spearman_signac = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Signac\", \"Spearman\"].values)\n# pearson_signac = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Signac\", \"Pearson\"].values)\n# spearman_cicero = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Cicero\", \"Spearman\"].values)\n# pearson_cicero = np.mean(score_gact.loc[score_gact[\"Method\"] == \"Cicero\", \"Pearson\"].values)\n\nfig = plt.figure(figsize = (7,5))\nax = fig.subplots(nrows = 1, ncols = 1)\n# ax = sns.barplot(data = score_gact, x = \"Method\", y = \"Pearson\", ax = ax, color = \"blue\", alpha = 0.7, estimator=np.median, ci='sd', capsize=.2)\nax = sns.boxplot(data = score_gact, x = \"Method\", y = \"Pearson\", ax = ax)\nplt.tight_layout()\nax.set_xticklabels(labels = [\"scDART\", \"Linear\", \"Signac\", \"Cicero\"], rotation = 45)\nax.set_ylabel(\"Pearson\")\nnewwidth = 0.5\nfor bar1 in ax.patches:\n x = bar1.get_x()\n width = bar1.get_width()\n centre = x+width/2.\n\n bar1.set_x(centre-newwidth/2.)\n bar1.set_width(newwidth)\n\nshow_values_on_bars(ax)\nfig.savefig(\"results_snare/de_snare/Pearson.png\", bbox_inches = \"tight\")\n\n# # In[]\n# pseudo_rna = model_dict[\"gene_act\"](atac_dataset.counts.to(device)).detach().cpu().numpy()\n# pseudo_rna = zscore(pseudo_rna, axis = 0)\n# genes = [\"Ptprd\", \"Mki67\", \"Fabp7\", \"Top2a\", \"Mef2c\", \"Macrod2\", \"Tenm2\", \"Dab1\", \"Tnc\", \"Frmd4a\", \"Celf2\"]\n# # genes = pd.read_csv(\"results_snare/de_snare/1000/de_gene_traj_0.csv\", index_col = 0)\n# # genes = genes.iloc[:50,0].values.squeeze()\n# pseudo_rna = pd.DataFrame(data = pseudo_rna, index = counts_rna.index.values, columns = counts_rna.columns.values)\n# pseudo_rna_sorted = pseudo_rna.iloc[np.argsort(pt_infer_rna), :]\n# rna_sorted = counts_rna.iloc[np.argsort(pt_infer_rna), :]\n# pseudo_rna_sorted = pseudo_rna_sorted.loc[:, genes]\n# rna_sorted = rna_sorted.loc[:, genes]\n# # rna_sorted = zscore(rna_sorted, axis = 0)\n\n\n# score_gact = pd.DataFrame(columns = [\"Method\", \"Gene\", \"Spearman\", \"Pearson\"])\n# # loop through cells\n# for i, barcode in enumerate(pseudo_rna_sorted.index.values.squeeze()):\n# spearman,_ = spearmanr(pseudo_rna_sorted.loc[barcode, :], rna_sorted.loc[barcode, :])\n# if np.isnan(spearman):\n# spearman = 0\n# pearson,_ = pearsonr(pseudo_rna_sorted.loc[barcode, :], rna_sorted.loc[barcode, :])\n# if np.isnan(pearson):\n# pearson = 0\n# # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n# score_gact = score_gact.append({\"Method\": \"scDART\", \"Cell\": barcode, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# # linear method\n# pseudo_rna2 = (atac_dataset.counts.to(device) @ coarse_reg).detach().cpu().numpy()\n# pseudo_rna2 = zscore(pseudo_rna2, axis = 0)\n# pseudo_rna2 = pd.DataFrame(data = pseudo_rna2, index = counts_rna.index.values, columns = counts_rna.columns.values)\n# pseudo_rna2_sorted = pseudo_rna2.iloc[np.argsort(pt_infer_rna), :]\n# pseudo_rna2_sorted = pseudo_rna2_sorted.loc[:, genes]\n\n# for i, barcode in enumerate(pseudo_rna_sorted.index.values.squeeze()):\n# spearman,_ = spearmanr(pseudo_rna2_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(spearman):\n# spearman = 0\n# pearson,_ = pearsonr(pseudo_rna2_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(pearson):\n# pearson = 0\n# # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n# score_gact = score_gact.append({\"Method\": \"Linear\", \"Cell\": barcode, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# # Signac method\n# pseudo_rna_signac = pd.read_csv(\"results_snare/pseudoRNA/counts_rna_signac.csv\", index_col = 0)\n# pseudo_rna_signac_sorted = pseudo_rna_signac.loc[rna_sorted.index.values,:]\n# pseudo_rna_signac_sorted = zscore(pseudo_rna_signac_sorted, axis = 0)\n# pseudo_rna_signac_sorted = pseudo_rna_signac_sorted.loc[:, genes]\n# pseudo_rna_signac_sorted = pseudo_rna_signac_sorted.fillna(0)\n\n# for i, barcode in enumerate(pseudo_rna_sorted.index.values.squeeze()):\n# spearman,_ = spearmanr(pseudo_rna_signac_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(spearman):\n# spearman = 0\n# pearson,_ = pearsonr(pseudo_rna_signac_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(pearson):\n# pearson = 0\n# # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n# score_gact = score_gact.append({\"Method\": \"Signac\", \"Cell\": barcode, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n# # Cicero method\n# pseudo_rna_cicero = pd.read_csv(\"results_snare/pseudoRNA/counts_rna_cicero.csv\", index_col = 0)\n# pseudo_rna_cicero = zscore(pseudo_rna_cicero, axis = 0)\n# pseudo_rna_signac = pd.DataFrame(data = pseudo_rna_cicero, index = counts_rna.index.values, columns = counts_rna.columns.values)\n# pseudo_rna_cicero_sorted = pseudo_rna_cicero.iloc[np.argsort(pt_infer_rna), :]\n# pseudo_rna_cicero_sorted = pseudo_rna_cicero_sorted.loc[:, genes]\n# pseudo_rna_cicero_sorted = pseudo_rna_cicero_sorted.fillna(0)\n\n# for i, barcode in enumerate(pseudo_rna_sorted.index.values.squeeze()):\n# spearman,_ = spearmanr(pseudo_rna_cicero_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(spearman):\n# spearman = 0\n# pearson,_ = pearsonr(pseudo_rna_cicero_sorted.loc[barcode,:], rna_sorted.loc[barcode,:])\n# if np.isnan(pearson):\n# pearson = 0\n# # print(\"gene: {:s}, spearman: {:.4f}, pearson: {:.4f}\".format(gene, spearman, pearson))\n# score_gact = score_gact.append({\"Method\": \"Cicero\", \"Cell\": barcode, \"Spearman\": spearman, \"Pearson\": pearson}, ignore_index = True)\n\n\n# fig = plt.figure(figsize = (7,5))\n# ax = fig.subplots(nrows = 1, ncols = 1)\n# # ax = sns.barplot(data = score_gact, x = \"Method\", y = \"Pearson\", ax = ax, color = \"blue\", alpha = 0.7, estimator=np.median, ci='sd', capsize=.2)\n# ax = sns.boxplot(data = score_gact, x = \"Method\", y = \"Pearson\", ax = ax)\n# plt.tight_layout()\n# ax.set_xticklabels(labels = [\"scDART\", \"Linear\", \"Signac\", \"Cicero\"], rotation = 45)\n# ax.set_ylabel(\"Pearson\")\n# newwidth = 0.5\n# for bar1 in ax.patches:\n# x = bar1.get_x()\n# width = bar1.get_width()\n# centre = x+width/2.\n\n# bar1.set_x(centre-newwidth/2.)\n# bar1.set_width(newwidth)\n\n# show_values_on_bars(ax)\n# fig.savefig(\"results_snare/de_snare/Pearson.png\", bbox_inches = \"tight\")\n\n\n# In[] Other baseline methods\n# 1. Liger\npath = \"results_snare/liger/\"\nz_rna_liger = pd.read_csv(path + \"H1_full.csv\", index_col = 0)\nz_atac_liger = pd.read_csv(path + \"H2_full.csv\", index_col = 0)\nintegrated_data = (z_rna_liger.values, z_atac_liger.values)\n\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\n\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\n\nfig, axs = utils.plot_latent(umap_latent[:z_rna.shape[0],:], umap_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"liger_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nfig, axs = utils.plot_latent(umap_latent[:z_rna.shape[0],:], umap_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"liger_batches_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nfig, axs = utils.plot_latent(pca_latent[:z_rna.shape[0],:], pca_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = None, figsize = (15,7), axis_label = \"PCA\")\naxs.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))\nfig.savefig(path + \"liger_pca.png\", bbox_inches = \"tight\")\nfig, axs = utils.plot_latent(pca_latent[:z_rna.shape[0],:], pca_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"liger_batches_pca.png\", figsize = (15,7), axis_label = \"PCA\")\naxs.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))\nfig.savefig(path + \"liger_batches_pca.png\", bbox_inches = \"tight\")\n\nz_destiny = np.load(path + \"z_diffmap.npy\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"liger_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"liger_batches_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_liger, z_atac_liger), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_liger.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_liger.shape[0]:]\n\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_liger, z_atac_liger), axis = 0))\nz_rna_pca = z[:z_rna_liger.shape[0],:]\nz_atac_pca = z[z_rna_liger.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_liger, z_atac_liger, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# In[]\n# 2. Seurat\npath = \"results_snare/seurat/\"\n\ncoembed = pd.read_csv(path + \"umap_embedding.txt\", sep = \"\\t\").values\nz_rna_seurat = coembed[:label_rna.values.shape[0],:]\nz_atac_seurat = coembed[label_rna.values.shape[0]:,:]\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"modality\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_batches_umap.png\")\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"joint\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_umap.png\")\n\ncoembed = pd.read_csv(path + \"pca_embedding.txt\", sep = \"\\t\").values\nz_rna_seurat = coembed[:label_rna.values.shape[0],:]\nz_atac_seurat = coembed[label_rna.values.shape[0]:,:]\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"modality\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_batches_pca.png\")\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"joint\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_pca.png\")\n\nz_destiny = np.load(path + \"z_diffmap.npy\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"destiny_joint.png\", figsize = (15,7), axis_label = \"Diffmap\")\nutils.plot_latent(z_destiny[:z_rna.shape[0],:], z_destiny[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_seurat, z_atac_seurat), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_seurat.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_seurat.shape[0]:]\n\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_seurat, z_atac_seurat, cell_labels)\n\nplot_backbone(z_rna_seurat, z_atac_seurat, mode = \"joint\", mean_cluster = np.array(mean_cluster), groups = groups, T = T, figsize=(15,7), save = path + \"backbone.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_seurat, z2 = z_atac_seurat, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\npath = \"results_snare/seurat/\"\n\ncoembed = pd.read_csv(path + \"umap_embedding_full.txt\", sep = \"\\t\").values\nz_rna_seurat = coembed[:label_rna.values.shape[0],:]\nz_atac_seurat = coembed[label_rna.values.shape[0]:,:]\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"modality\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_batches_umap_full.png\")\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"joint\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_umap_full.png\")\n\ncoembed = pd.read_csv(path + \"pca_embedding_full.txt\", sep = \"\\t\").values\nz_rna_seurat = coembed[:label_rna.values.shape[0],:]\nz_atac_seurat = coembed[label_rna.values.shape[0]:,:]\n\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"modality\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_batches_pca_full.png\")\nutils.plot_latent(z_rna_seurat, z_atac_seurat, label_rna.values, label_atac.values, mode = \"joint\", figsize = (15,7), axis_label = \"PCA\", save = path + \"seurat_pca_full.png\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_seurat, z_atac_seurat), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_seurat.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_seurat.shape[0]:]\n\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_seurat, z_atac_seurat, cell_labels)\n\nplot_backbone(z_rna_seurat, z_atac_seurat, mode = \"joint\", mean_cluster = np.array(mean_cluster), groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_seurat, z2 = z_atac_seurat, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\n# Include post-processing\nz_rna_seurat_post = torch.FloatTensor(z_rna_seurat)\nz_atac_seurat_post = torch.FloatTensor(z_atac_seurat)\nz_rna_seurat_post, z_atac_seurat_post = palign.match_alignment(z_rna = z_rna_seurat_post, z_atac = z_atac_seurat_post, k = 10)\nz_atac_seurat_post, z_rna_seurat_post = palign.match_alignment(z_rna = z_atac_seurat_post, z_atac = z_rna_seurat_post, k = 10)\nz_rna_seurat_post = z_rna_seurat_post.numpy()\nz_atac_seurat_post = z_atac_seurat_post.numpy()\nutils.plot_latent(z_rna_seurat_post, z_atac_seurat_post, label_rna.values, label_atac.values, mode = \"modality\", figsize = (15,7), axis_label = \"PCA\", save = path + \"pca_post.png\")\nutils.plot_latent(z_rna_seurat_post, z_atac_seurat_post, label_rna.values, label_atac.values, mode = \"joint\", figsize = (15,7), axis_label = \"PCA\", save = path + \"pca_joint_post.png\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_seurat_post, z_atac_seurat_post), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_seurat_post.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_seurat_post.shape[0]:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_seurat, z_atac_seurat, cell_labels)\n\nplot_backbone(z_rna_seurat_post, z_atac_seurat_post, mode = \"joint\", mean_cluster = np.array(mean_cluster), groups = groups, T = T, figsize=(15,7), save = path + \"backbone_post.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_seurat_post, z2 = z_atac_seurat_post, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_post.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\n# In[]\n# 3. unioncom\npath = \"results_snare/unioncom/\"\nz_rna_unioncom = np.load(path + \"unioncom_rna_32.npy\")\nz_atac_unioncom = np.load(path + \"unioncom_atac_32.npy\")\nintegrated_data = (z_rna_unioncom, z_atac_unioncom)\n\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\n\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\n\nutils.plot_latent(umap_latent[:z_rna.shape[0],:], umap_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"unioncom_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna.shape[0],:], umap_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"unioncom_batches_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna.shape[0],:], pca_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"unioncom_pca.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna.shape[0],:], pca_latent[z_rna.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"unioncom_batches_pca.png\", figsize = (15,7), axis_label = \"PCA\")\n\nz_destiny = np.load(path + \"z_diffmap.npy\")\nutils.plot_latent(z_destiny[:z_rna_unioncom.shape[0],:], z_destiny[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"unioncom_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\nutils.plot_latent(z_destiny[:z_rna_unioncom.shape[0],:], z_destiny[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"unioncom_batches_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_unioncom, z_atac_unioncom), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_unioncom.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_unioncom.shape[0]:]\n\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_unioncom, z_atac_unioncom), axis = 0))\nz_rna_pca = z[:z_rna_unioncom.shape[0],:]\nz_atac_pca = z[z_rna_unioncom.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_unioncom, z_atac_unioncom, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# Include post-processing\nz_rna_unioncom_post = torch.FloatTensor(z_rna_unioncom)\nz_atac_unioncom_post = torch.FloatTensor(z_atac_unioncom)\nz_rna_unioncom_post, z_atac_unioncom_post = palign.match_alignment(z_rna = z_rna_unioncom_post, z_atac = z_atac_unioncom_post, k = 10)\nz_atac_unioncom_post, z_rna_unioncom_post = palign.match_alignment(z_rna = z_atac_unioncom_post, z_atac = z_rna_unioncom_post, k = 10)\nz_rna_unioncom_post = z_rna_unioncom_post.numpy()\nz_atac_unioncom_post = z_atac_unioncom_post.numpy()\n\nintegrated_data = (z_rna_unioncom_post, z_atac_unioncom_post)\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\n\n\nutils.plot_latent(umap_latent[:z_rna_unioncom.shape[0],:], umap_latent[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"joint\", save = path + \"unioncom_umap_post.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna_unioncom.shape[0],:], umap_latent[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"unioncom_batches_umap_post.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna_unioncom.shape[0],:], pca_latent[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"joint\", save = path + \"unioncom_pca_post.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna_unioncom.shape[0],:], pca_latent[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"modality\", save = path + \"unioncom_batches_pca_post.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_unioncom_post, z_atac_unioncom_post), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_unioncom_post.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_unioncom_post.shape[0]:]\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_unioncom_post, z_atac_unioncom_post), axis = 0))\nz_rna_pca = z[:z_rna_unioncom_post.shape[0],:]\nz_atac_pca = z[z_rna_unioncom_post.shape[0]:,:]\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_unioncom_post, z_atac_unioncom_post, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_post.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_post.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# In[]\n# 4. scJoint\npath = \"results_snare/scJoint_snare_traj/\"\nz_atac_scJoint = pd.read_csv(path + \"counts_atac_embeddings.txt\", sep = \" \", header = None).values\nz_rna_scJoint = pd.read_csv(path + \"counts_rna_embeddings.txt\", sep = \" \", header = None).values\n\nintegrated_data = [z_rna_scJoint, z_atac_scJoint]\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\n\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\n\n\nutils.plot_latent(umap_latent[:z_rna_scJoint.shape[0],:], umap_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"scjoint_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna_scJoint.shape[0],:], umap_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"scjoint_batches_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna_scJoint.shape[0],:], pca_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"scjoint_pca.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna_scJoint.shape[0],:], pca_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"scjoint_batches_pca.png\", figsize = (15,7), axis_label = \"PCA\")\n\nz_destiny = np.load(path + \"z_diffmap.npy\")\nutils.plot_latent(z_destiny[:z_rna_unioncom.shape[0],:], z_destiny[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"scjoint_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\nutils.plot_latent(z_destiny[:z_rna_unioncom.shape[0],:], z_destiny[z_rna_unioncom.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"scjoint_batches_destiny.png\", figsize = (15,7), axis_label = \"Diffmap\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_scJoint.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_scJoint.shape[0]:]\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0))\nz_rna_pca = z[:z_rna_scJoint.shape[0],:]\nz_atac_pca = z[z_rna_scJoint.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_scJoint, z_atac_scJoint, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# In[]\npath = \"results_snare/scJoint_snare_raw_traj/\"\nz_atac_scJoint = pd.read_csv(path + \"counts_atac_embeddings.txt\", sep = \" \", header = None).values\nz_rna_scJoint = pd.read_csv(path + \"counts_rna_embeddings.txt\", sep = \" \", header = None).values\n\nintegrated_data = [z_rna_scJoint, z_atac_scJoint]\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\n\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\nprint(pca_op.explained_variance_ratio_)\n\nutils.plot_latent(umap_latent[:z_rna_scJoint.shape[0],:], umap_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"scjoint_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna_scJoint.shape[0],:], umap_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"scjoint_batches_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna_scJoint.shape[0],:], pca_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"scjoint_pca.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna_scJoint.shape[0],:], pca_latent[z_rna_scJoint.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"scjoint_batches_pca.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_scJoint.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_scJoint.shape[0]:]\n\nz = pca_op.fit_transform(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0))\nz_rna_pca = z[:z_rna_scJoint.shape[0],:]\nz_atac_pca = z[z_rna_scJoint.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_scJoint, z_atac_scJoint, cell_labels)\nmean_cluster_pca = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster_pca, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full.png\", figsize = (15,7), axis_label = \"PCA\")\n\nz = umap_op.fit_transform(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0))\nz_rna_umap = z[:z_rna_scJoint.shape[0],:]\nz_atac_umap = z[z_rna_scJoint.shape[0]:,:]\n\nmean_cluster_umap = umap_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_umap, z_atac_umap, mode = \"joint\", mean_cluster = mean_cluster_umap, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full_umap.png\", anno = cell_labels, axis_label = \"UMAP\")\nutils.plot_latent_pt(z1 = z_rna_umap, z2 = z_atac_umap, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\n\n\n# In[]\n# Include post-processing\nz_rna_scJoint_post = torch.FloatTensor(z_rna_scJoint)\nz_atac_scJoint_post = torch.FloatTensor(z_atac_scJoint)\nz_rna_scJoint_post, z_atac_scJoint_post = palign.match_alignment(z_rna = z_rna_scJoint_post, z_atac = z_atac_scJoint_post, k = 10)\nz_atac_scJoint_post, z_rna_scJoint_post = palign.match_alignment(z_rna = z_atac_scJoint_post, z_atac = z_rna_scJoint_post, k = 10)\nz_rna_scJoint_post = z_rna_scJoint_post.numpy()\nz_atac_scJoint_post = z_atac_scJoint_post.numpy()\n\nintegrated_data = (z_rna_scJoint_post, z_atac_scJoint_post)\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\n\n\nutils.plot_latent(umap_latent[:z_rna_scJoint_post.shape[0],:], umap_latent[z_rna_scJoint_post.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"joint\", save = path + \"scjoint_umap_post.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna_scJoint_post.shape[0],:], umap_latent[z_rna_scJoint_post.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"modality\", save = path + \"scjoint_batches_umap_post.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna_scJoint_post.shape[0],:], pca_latent[z_rna_scJoint_post.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"joint\", save = path + \"scjoint_pca_post.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna_scJoint_post.shape[0],:], pca_latent[z_rna_scJoint_post.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values,\nmode = \"modality\", save = path + \"scjoint_batches_pca_post.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_scJoint_post, z_atac_scJoint_post), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_scJoint_post.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_scJoint_post.shape[0]:]\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_scJoint_post, z_atac_scJoint_post), axis = 0))\nz_rna_pca = z[:z_rna_scJoint_post.shape[0],:]\nz_atac_pca = z[z_rna_scJoint_post.shape[0]:,:]\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_scJoint_post, z_rna_scJoint_post, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full_post.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full_post.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\n# In[] MMD-MA\npath = \"results_snare/mmd_ma/\"\nz_rna_mmdma = np.load(path + \"mmd_ma_rna.npy\")\nz_atac_mmdma = np.load(path + \"mmd_ma_atac.npy\")\n\nintegrated_data = [z_rna_mmdma, z_atac_mmdma]\npca_op = PCA(n_components = 2)\numap_op = UMAP(n_components = 2)\n\npca_latent = pca_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\numap_latent = umap_op.fit_transform(np.concatenate((integrated_data[0],integrated_data[1]), axis = 0))\nprint(pca_op.explained_variance_ratio_)\n\nutils.plot_latent(umap_latent[:z_rna_mmdma.shape[0],:], umap_latent[z_rna_mmdma.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"mmdma_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(umap_latent[:z_rna_mmdma.shape[0],:], umap_latent[z_rna_mmdma.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"mmdma_batches_umap.png\", figsize = (15,7), axis_label = \"UMAP\")\nutils.plot_latent(pca_latent[:z_rna_mmdma.shape[0],:], pca_latent[z_rna_mmdma.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"joint\", save = path + \"mmdma_pca.png\", figsize = (15,7), axis_label = \"PCA\")\nutils.plot_latent(pca_latent[:z_rna_mmdma.shape[0],:], pca_latent[z_rna_mmdma.shape[0]:,:], anno1 = label_rna.values, anno2 = label_atac.values, \nmode = \"modality\", save = path + \"mmdma_batches_pca.png\", figsize = (15,7), axis_label = \"PCA\")\n\n# Infer backbone\nroot_cell = 450\ndpt_mtx = ti.dpt(np.concatenate((z_rna_mmdma, z_atac_mmdma), axis = 0), n_neigh = 10)\npt_infer = dpt_mtx[root_cell, :]\npt_infer[pt_infer.argsort()] = np.arange(len(pt_infer))\npt_infer = pt_infer/np.max(pt_infer)\n# for scRNA-Seq batch\npt_infer_rna = pt_infer[:z_rna_mmdma.shape[0]]\n# for scATAC-Seq batch\npt_infer_atac = pt_infer[z_rna_mmdma.shape[0]:]\n\npca_op = PCA(n_components = 2)\nz = pca_op.fit_transform(np.concatenate((z_rna_mmdma, z_atac_mmdma), axis = 0))\nz_rna_pca = z[:z_rna_mmdma.shape[0],:]\nz_atac_pca = z[z_rna_mmdma.shape[0]:,:]\n\ncell_labels = np.concatenate((label_rna, label_atac), axis = 0).squeeze()\ngroups, mean_cluster, T = backbone_inf(z_rna_mmdma, z_atac_mmdma, cell_labels)\nmean_cluster = pca_op.transform(np.array(mean_cluster))\n\nplot_backbone(z_rna_pca, z_atac_pca, mode = \"joint\", mean_cluster = mean_cluster, groups = groups, T = T, figsize=(15,7), save = path + \"backbone_full.png\", anno = cell_labels, axis_label = \"PCA\")\nutils.plot_latent_pt(z1 = z_rna_pca, z2 = z_atac_pca, pt1 = pt_infer_rna, pt2 = pt_infer_atac, mode = \"joint\", save = path + \"z_pt_full.png\", figsize = (15,7), axis_label = \"PCA\")\n\n\n# In[] Neighborhood overlap score, use also the unionCom, LIGER and Seurat\n# Both the neighborhood overlap and pseudotime alignment are higher for scDART when the number of genes increase\npath = \"results_snare/liger/\"\nz_rna_liger = pd.read_csv(path + \"H1_full.csv\", index_col = 0).values\nz_atac_liger = pd.read_csv(path + \"H2_full.csv\", index_col = 0).values\n\npath = \"results_snare/seurat/\"\ncoembed = pd.read_csv(path + \"pca_embedding.txt\", sep = \"\\t\").values\nz_rna_seurat = coembed[:label_rna.values.shape[0],:]\nz_atac_seurat = coembed[label_rna.values.shape[0]:,:]\n\npath = \"results_snare/unioncom/\"\nz_rna_unioncom = np.load(path + \"unioncom_rna_32.npy\")\nz_atac_unioncom = np.load(path + \"unioncom_atac_32.npy\")\n\npath = \"results_snare/mmd_ma/\"\nz_rna_mmdma = np.load(path + \"mmd_ma_rna.npy\")\nz_atac_mmdma = np.load(path + \"mmd_ma_atac.npy\")\n\npath = \"results_snare/scJoint_snare_raw_traj/\"\nz_rna_scJoint = np.loadtxt(path + \"counts_rna_embeddings.txt\")\nz_atac_scJoint = np.loadtxt(path + \"counts_atac_embeddings.txt\")\n\npath = \"results_snare/models_1000/\"\nz_rna_scdart = np.load(file = path + \"z_rna_\" + str(latent_dim) + \"_\" + str(reg_d) + \"_\" + str(reg_g) + \"_\" + str(reg_mmd) + \"_\" + str(seed) + \"_\" + norm + \".npy\")\nz_atac_scdart = np.load(file = path + \"z_atac_\" + str(latent_dim) + \"_\" + str(reg_d) + \"_\" + str(reg_g) + \"_\" + str(reg_mmd) + \"_\" + str(seed) + \"_\" + norm + \".npy\")\nz_rna_scdart = torch.FloatTensor(z_rna_scdart)\nz_atac_scdart = torch.FloatTensor(z_atac_scdart)\nz_rna_scdart, z_atac_scdart = palign.match_alignment(z_rna = z_rna_scdart, z_atac = z_atac_scdart, k = 10)\nz_atac_scdart, z_rna_scdart = palign.match_alignment(z_rna = z_atac_scdart, z_atac = z_rna_scdart, k = 10)\nz_rna_scdart = z_rna_scdart.numpy()\nz_atac_scdart = z_atac_scdart.numpy()\n\nscore_liger = []\nscore_seurat = []\nscore_unioncom = []\nscore_scdart = []\nscore_mmdma = []\nscore_scJoint = []\n\nfor k in range(10, 1000, 10):\n score_liger.append(bmk.neigh_overlap(z_rna_liger, z_atac_liger, k = k))\n score_unioncom.append(bmk.neigh_overlap(z_rna_unioncom, z_atac_unioncom, k = k))\n score_seurat.append(bmk.neigh_overlap(z_rna_seurat, z_atac_seurat, k = k))\n score_scdart.append(bmk.neigh_overlap(z_rna_scdart, z_atac_scdart, k = k))\n score_mmdma.append(bmk.neigh_overlap(z_rna_mmdma, z_atac_mmdma, k = k))\n score_scJoint.append(bmk.neigh_overlap(z_rna_scJoint, z_atac_scJoint, k = k))\n\nscore_liger = np.array(score_liger)\nscore_seurat = np.array(score_seurat)\nscore_unioncom = np.array(score_unioncom)\nscore_scdart = np.array(score_scdart)\nscore_mmdma = np.array(score_mmdma)\nscore_scJoint = np.array(score_scJoint)\n\nfig = plt.figure(figsize = (10, 7))\nax = fig.add_subplot()\nax.plot(np.arange(10, 1000, 10), score_liger, label = \"LIGER\")\nax.plot(np.arange(10, 1000, 10), score_unioncom, label = \"UnionCom\")\nax.plot(np.arange(10, 1000, 10), score_seurat, label = \"Seurat\")\nax.plot(np.arange(10, 1000, 10), score_scdart, label = \"scDART\")\nax.plot(np.arange(10, 1000, 10), score_mmdma, label = \"MMD-MA\")\nax.plot(np.arange(10, 1000, 10), score_scJoint, label = \"scJoint\")\nax.legend()\nax.set_xlabel(\"Neighborhood size\")\nax.set_ylabel(\"Neighborhood overlap\")\nax.set_xticks([0, 200, 400, 600, 800, 1000])\nfig.savefig(\"results_snare/neigh_ov.png\", bbox_inches = \"tight\")\n\n# In[] check pseudotime correlation\n\n# root, manually found\nroot_cell = 450\n# scdart\ndpt_mtx = ti.dpt(np.concatenate((z_rna_scdart, z_atac_scdart), axis = 0), n_neigh = 10)\npt_infer_scdart = dpt_mtx[root_cell, :]\npt_infer_scdart[pt_infer_scdart.argsort()] = np.arange(len(pt_infer_scdart))\npt_infer_scdart = pt_infer_scdart/np.max(pt_infer_scdart)\nspearman_scdart, _ = spearmanr(pt_infer_scdart[:z_rna_scdart.shape[0]], pt_infer_scdart[z_rna_scdart.shape[0]:])\npearson_scdart, _ = pearsonr(pt_infer_scdart[:z_rna_scdart.shape[0]], pt_infer_scdart[z_rna_scdart.shape[0]:])\n\n# liger\ndpt_mtx = ti.dpt(np.concatenate((z_rna_liger, z_atac_liger), axis = 0), n_neigh = 10)\npt_infer_liger = dpt_mtx[root_cell, :]\npt_infer_liger[pt_infer_liger.argsort()] = np.arange(len(pt_infer_liger))\npt_infer_liger = pt_infer_liger/np.max(pt_infer_liger)\nspearman_liger, _ = spearmanr(pt_infer_liger[:z_rna_liger.shape[0]], pt_infer_liger[z_rna_liger.shape[0]:])\npearson_liger, _ = pearsonr(pt_infer_liger[:z_rna_liger.shape[0]], pt_infer_liger[z_rna_liger.shape[0]:])\n\n# unioncom\ndpt_mtx = ti.dpt(np.concatenate((z_rna_unioncom, z_atac_unioncom), axis = 0), n_neigh = 10)\npt_infer_unioncom = dpt_mtx[root_cell, :]\npt_infer_unioncom[pt_infer_unioncom.argsort()] = np.arange(len(pt_infer_unioncom))\npt_infer_unioncom = pt_infer_unioncom/np.max(pt_infer_unioncom)\nspearman_unioncom, _ = spearmanr(pt_infer_unioncom[:z_rna_unioncom.shape[0]], pt_infer_unioncom[z_rna_unioncom.shape[0]:])\npearson_unioncom, _ = pearsonr(pt_infer_unioncom[:z_rna_unioncom.shape[0]], pt_infer_unioncom[z_rna_unioncom.shape[0]:])\n\n# seurat\ndpt_mtx = ti.dpt(np.concatenate((z_rna_seurat, z_atac_seurat), axis = 0), n_neigh = 10)\npt_infer_seurat = dpt_mtx[root_cell, :]\npt_infer_seurat[pt_infer_seurat.argsort()] = np.arange(len(pt_infer_seurat))\npt_infer_seurat = pt_infer_seurat/np.max(pt_infer_seurat)\nspearman_seurat, _ = spearmanr(pt_infer_seurat[:z_rna_seurat.shape[0]], pt_infer_seurat[z_rna_seurat.shape[0]:])\npearson_seurat, _ = pearsonr(pt_infer_seurat[:z_rna_seurat.shape[0]], pt_infer_seurat[z_rna_seurat.shape[0]:])\n\n# mmd-ma\ndpt_mtx = ti.dpt(np.concatenate((z_rna_mmdma, z_atac_mmdma), axis = 0), n_neigh = 10)\npt_infer_mmdma = dpt_mtx[root_cell, :]\npt_infer_mmdma[pt_infer_mmdma.argsort()] = np.arange(len(pt_infer_mmdma))\npt_infer_mmdma = pt_infer_mmdma/np.max(pt_infer_mmdma)\nspearman_mmdma, _ = spearmanr(pt_infer_mmdma[:z_rna_mmdma.shape[0]], pt_infer_mmdma[z_rna_mmdma.shape[0]:])\npearson_mmdma, _ = pearsonr(pt_infer_mmdma[:z_rna_mmdma.shape[0]], pt_infer_mmdma[z_rna_mmdma.shape[0]:])\n\n# scJoint\ndpt_mtx = ti.dpt(np.concatenate((z_rna_scJoint, z_atac_scJoint), axis = 0), n_neigh = 10)\npt_infer_scjoint = dpt_mtx[root_cell, :]\npt_infer_scjoint[pt_infer_scjoint.argsort()] = np.arange(len(pt_infer_scjoint))\npt_infer_scjoint = pt_infer_scjoint/np.max(pt_infer_scjoint)\nspearman_scjoint, _ = spearmanr(pt_infer_scjoint[:z_rna_scJoint.shape[0]], pt_infer_scjoint[z_rna_scJoint.shape[0]:])\npearson_scjoint, _ = pearsonr(pt_infer_scjoint[:z_rna_scJoint.shape[0]], pt_infer_scjoint[z_rna_scJoint.shape[0]:])\n\n\n# correlation smaller than 0.87, the one reported in the paper.\nprint(\"scDART: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_scdart, pearson_scdart))\nprint(\"LIGER: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_liger, pearson_liger))\nprint(\"Seurat: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_seurat, pearson_seurat))\nprint(\"UnionCom: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_unioncom, pearson_unioncom))\nprint(\"MMD-MA: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_mmdma, pearson_mmdma))\nprint(\"scJoint: spearman: {:.4f}, pearson: {:.4f}\".format(spearman_scjoint, pearson_scjoint))\n\n# plot barplot\n\ndef show_values_on_bars(axs):\n def _show_on_single_plot(ax): \n for p in ax.patches:\n _x = p.get_x() + p.get_width() / 2\n _y = p.get_y() + p.get_height()\n value = '{:.4f}'.format(p.get_height())\n ax.text(_x, _y, value, ha=\"center\") \n\n if isinstance(axs, np.ndarray):\n for idx, ax in np.ndenumerate(axs):\n _show_on_single_plot(ax)\n else:\n _show_on_single_plot(axs)\n\nscores = pd.DataFrame(columns = [\"Method\", \"Spearman\", \"Pearson\"])\nscores[\"Method\"] = np.array([\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"])\nscores[\"Spearman\"] = np.array([spearman_scdart, spearman_liger, spearman_seurat, spearman_unioncom, spearman_mmdma, spearman_scjoint])\nscores[\"Pearson\"] = np.array([pearson_scdart, pearson_liger, pearson_seurat, pearson_unioncom, pearson_mmdma, pearson_scjoint])\nimport seaborn as sns\nfig = plt.figure(figsize = (15,7))\nax = fig.subplots(nrows = 1, ncols = 2)\nax[0] = sns.barplot(data = scores, x = \"Method\", y = \"Spearman\", ax = ax[0], color = \"blue\", alpha = 0.7)\nax[1] = sns.barplot(data = scores, x = \"Method\", y = \"Pearson\", ax = ax[1], color = \"blue\", alpha = 0.7)\nplt.tight_layout()\nax[0].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\nax[1].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\nnewwidth = 0.5\nfor bar1, bar2 in zip(ax[0].patches, ax[1].patches):\n x = bar1.get_x()\n width = bar1.get_width()\n centre = x+width/2.\n\n bar1.set_x(centre-newwidth/2.)\n bar1.set_width(newwidth)\n\n x = bar2.get_x()\n width = bar2.get_width()\n centre = x+width/2.\n\n bar2.set_x(centre-newwidth/2.)\n bar2.set_width(newwidth)\n\nshow_values_on_bars(ax)\nfig.savefig(\"results_snare/correlation.png\", bbox_inches = \"tight\")\n\n# In[] Clustering label consistency\ndef clust_consist(z_rna, z_atac, nclust = 3):\n # leiden, cannot fix cluster numbers\n # conn, _ = ti.nearest_neighbor(z_rna, k = k)\n # groups_rna, _ = ti.leiden(conn, resolution = resolution)\n # conn, _ = ti.nearest_neighbor(z_atac, k = k)\n # groups_atac, _ = ti.leiden(conn, resolution = resolution)\n\n # k-means\n from sklearn.cluster import KMeans\n groups_rna = KMeans(n_clusters = nclust, random_state = 0).fit(z_rna).labels_\n groups_atac = KMeans(n_clusters = nclust, random_state = 0).fit(z_atac).labels_\n\n # TODO: measuring the alignment of clustering, including ARI, NMI, Silhouette Score\n ari_score = bmk.ari(group1 = groups_rna, group2 = groups_atac)\n nmi_score = bmk.nmi(group1 = groups_rna, group2 = groups_atac)\n # print(\"number of clusters in RNA: {:d}\".format(np.max(groups_rna)))\n # print(\"number of clusters in ATAC: {:d}\".format(np.max(groups_atac)))\n # print(\"ARI: {:.3f}, NMI: {:.3f}\".format(ari_score, nmi_score))\n # Silhouette Score cannot be used for cluster label alignment\n return ari_score, nmi_score\n\nk = 15\nnclusts = [5]\nprint(\"method: scDART\")\nari_scdarts = []\nnmi_scdarts = []\nfor nclust in nclusts:\n ari_scdart, nmi_scdart = clust_consist(z_rna_scdart, z_atac_scdart, nclust = nclust)\n ari_scdarts.append(ari_scdart)\n nmi_scdarts.append(nmi_scdart)\n\nprint(\"method: LIGER\")\nari_ligers = []\nnmi_ligers = []\nfor nclust in nclusts:\n ari_liger, nmi_liger = clust_consist(z_rna_liger, z_atac_liger, nclust = nclust)\n ari_ligers.append(ari_liger)\n nmi_ligers.append(nmi_liger)\n\nprint(\"method: Seurat\")\nari_seurats = []\nnmi_seurats = []\nfor nclust in nclusts:\n ari_seurat, nmi_seurat = clust_consist(z_rna_seurat, z_atac_seurat, nclust = nclust)\n ari_seurats.append(ari_seurat)\n nmi_seurats.append(nmi_seurat)\n\nprint(\"method: UnionCom\")\nari_unioncoms = []\nnmi_unioncoms = []\nfor nclust in nclusts:\n ari_unioncom, nmi_unioncom = clust_consist(z_rna_unioncom, z_atac_unioncom, nclust = nclust)\n ari_unioncoms.append(ari_unioncom)\n nmi_unioncoms.append(nmi_unioncom)\n\nprint(\"method: MMD-MA\")\nari_mmdmas = []\nnmi_mmdmas = []\nfor nclust in nclusts:\n ari_mmdma, nmi_mmdma = clust_consist(z_rna_mmdma, z_atac_mmdma, nclust = nclust)\n ari_mmdmas.append(ari_mmdma)\n nmi_mmdmas.append(nmi_mmdma)\n\nprint(\"method: scJoint\")\nari_scjoints = []\nnmi_scjoints = []\nfor nclust in nclusts:\n ari_scjoint, nmi_scjoint = clust_consist(z_rna_scJoint, z_atac_scJoint, nclust = nclust)\n ari_scjoints.append(ari_scjoint)\n nmi_scjoints.append(nmi_scjoint)\n\nari_scdarts = np.array(ari_scdarts)\nari_ligers = np.array(ari_ligers)\nari_seurats = np.array(ari_seurats)\nari_unioncoms = np.array(ari_unioncoms)\nari_mmdmas = np.array(ari_mmdmas)\nari_scjoints = np.array(ari_scjoints)\n\nnmi_scdarts = np.array(nmi_scdarts)\nnmi_ligers = np.array(nmi_ligers)\nnmi_seurats = np.array(nmi_seurats)\nnmi_unioncoms = np.array(nmi_unioncoms)\nnmi_mmdmas = np.array(nmi_mmdmas)\nnmi_scjoints = np.array(nmi_scjoints)\n\nari_scdart = np.nanmax(ari_scdarts)\nari_liger = np.nanmax(ari_ligers)\nari_seurat = np.nanmax(ari_seurats)\nari_unioncom = np.nanmax(ari_unioncoms)\nari_mmdma = np.nanmax(ari_mmdmas)\nari_scjoint = np.nanmax(ari_scjoints)\n\nnmi_scdarts = np.nanmax(nmi_scdarts)\nnmi_ligers = np.nanmax(nmi_ligers)\nnmi_seurats = np.nanmax(nmi_seurats)\nnmi_unioncoms = np.nanmax(nmi_unioncoms)\nnmi_mmdmas = np.nanmax(nmi_mmdmas)\nnmi_scjoints = np.nanmax(nmi_scjoints)\n\nscores = pd.DataFrame(columns = [\"Method\", \"ARI\", \"NMI\"])\nscores[\"Method\"] = np.array([\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"])\nscores[\"ARI\"] = np.array([ari_scdart, ari_liger, ari_seurat, ari_unioncom, ari_mmdma, ari_scjoint])\nscores[\"NMI\"] = np.array([nmi_scdart, nmi_liger, nmi_seurat, nmi_unioncom, nmi_mmdma, nmi_scjoint])\nimport seaborn as sns\nfig = plt.figure(figsize = (15,7))\nax = fig.subplots(nrows = 1, ncols = 2)\nax[0] = sns.barplot(data = scores, x = \"Method\", y = \"ARI\", ax = ax[0], color = \"blue\", alpha = 0.7)\nax[1] = sns.barplot(data = scores, x = \"Method\", y = \"NMI\", ax = ax[1], color = \"blue\", alpha = 0.7)\nplt.tight_layout()\nax[0].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\nax[1].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\nnewwidth = 0.5\nfor bar1, bar2 in zip(ax[0].patches, ax[1].patches):\n x = bar1.get_x()\n width = bar1.get_width()\n centre = x+width/2.\n\n bar1.set_x(centre-newwidth/2.)\n bar1.set_width(newwidth)\n\n x = bar2.get_x()\n width = bar2.get_width()\n centre = x+width/2.\n\n bar2.set_x(centre-newwidth/2.)\n bar2.set_width(newwidth)\n\nshow_values_on_bars(ax)\nfig.savefig(\"results_snare/cluster_consistency.png\", bbox_inches = \"tight\")\n\n# In[] Difference of distances\nfrom scipy.spatial.distance import pdist, squareform, cosine\ndef dist_diff(z_rna, z_atac):\n mse = 1/z_rna.shape[0] * np.sum(np.sqrt(np.sum((z_rna - z_atac) ** 2, axis = 1)))\n cos = 0\n for i in range(z_rna.shape[0]):\n cos += 1 - cosine(z_rna[i, :], z_atac[i, :])\n cos /= z_rna.shape[0]\n return mse, cos\n\nmse_scdart, cos_scdart = dist_diff(z_rna = z_rna_scdart, z_atac = z_atac_scdart)\nmse_seurat, cos_seurat = dist_diff(z_rna = z_rna_seurat, z_atac = z_atac_seurat)\nmse_liger, cos_liger = dist_diff(z_rna = z_rna_liger, z_atac = z_atac_liger)\nmse_mmdma, cos_mmdma = dist_diff(z_rna = z_rna_mmdma, z_atac = z_atac_mmdma)\nmse_unioncom, cos_unioncom = dist_diff(z_rna = z_rna_unioncom, z_atac = z_atac_unioncom)\nmse_scjoint, cos_scjoint = dist_diff(z_rna = z_rna_scJoint, z_atac = z_atac_scJoint)\n\nscores = pd.DataFrame(columns = [\"Method\", \"MSE\", \"cos_sim\"])\nscores[\"Method\"] = np.array([\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"])\nscores[\"MSE\"] = np.array([mse_scdart, mse_liger, mse_seurat, mse_unioncom, mse_mmdma, mse_scjoint])\nscores[\"cos_sim\"] = np.array([cos_scdart, cos_liger, cos_seurat, cos_unioncom, cos_mmdma, cos_scjoint])\nimport seaborn as sns\nfig = plt.figure(figsize = (15,7))\naxs = fig.subplots(nrows = 1, ncols = 2)\naxs[0] = sns.barplot(data = scores, x = \"Method\", y = \"MSE\", ax = axs[0], color = \"blue\", alpha = 0.7)\nplt.tight_layout()\naxs[0].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\naxs[0].set_ylabel(\"MSE\")\nnewwidth = 0.5\nfor bar1 in axs[0].patches:\n x = bar1.get_x()\n width = bar1.get_width()\n centre = x+width/2.\n\n bar1.set_x(centre-newwidth/2.)\n bar1.set_width(newwidth)\n\nshow_values_on_bars(axs[0])\n\naxs[1] = sns.barplot(data = scores, x = \"Method\", y = \"cos_sim\", ax = axs[1], color = \"blue\", alpha = 0.7)\nplt.tight_layout()\naxs[1].set_xticklabels(labels = [\"scDART\", \"LIGER\", \"Seurat\", \"UnionCom\", \"MMD-MA\", \"scJoint\"], rotation = 45)\naxs[1].set_ylabel(\"cosine\")\nnewwidth = 0.5\nfor bar1 in axs[1].patches:\n x = bar1.get_x()\n width = bar1.get_width()\n centre = x+width/2.\n\n bar1.set_x(centre-newwidth/2.)\n bar1.set_width(newwidth)\n\nshow_values_on_bars(axs[1])\nfig.savefig(\"results_snare/pdist_consistency.png\", bbox_inches = \"tight\")\n\n\n# In[] good neighborhood overlap with mmd larger than 15\n'''\nscores = pd.read_csv(\"results_snare/scores_l1.csv\")\nimport seaborn as sns\nfig = plt.figure(figsize = (30,7))\naxs = fig.subplots(nrows = 1, ncols = 3)\nsns.boxplot(data = scores, x = \"reg_g\", y = \"mse\", hue = \"reg_mmd\", ax = axs[0])\nsns.boxplot(data = scores, x = \"reg_g\", y = \"mse_norm\", hue = \"reg_mmd\", ax = axs[1])\nsns.boxplot(data = scores, x = \"reg_g\", y = \"pearson\", hue = \"reg_mmd\", ax = axs[2])\nplt.tight_layout()\n\nfig = plt.figure(figsize = (10,7))\nax = fig.add_subplot()\nsns.boxplot(data = scores, x = \"reg_g\", y = \"neigh_overlap\", hue = \"reg_mmd\", ax = ax)\nplt.tight_layout()\n\n\nscores = pd.read_csv(\"results_snare/scores_l2.csv\")\nimport seaborn as sns\nfig = plt.figure(figsize = (30,7))\naxs = fig.subplots(nrows = 1, ncols = 3)\nsns.boxplot(data = scores, x = \"reg_g\", y = \"mse\", hue = \"reg_mmd\", ax = axs[0])\nsns.boxplot(data = scores, x = \"reg_g\", y = \"mse_norm\", hue = \"reg_mmd\", ax = axs[1])\nsns.boxplot(data = scores, x = \"reg_g\", y = \"pearson\", hue = \"reg_mmd\", ax = axs[2])\nplt.tight_layout()\n\n\nfig = plt.figure(figsize = (10,7))\nax = fig.add_subplot()\nsns.boxplot(data = scores, x = \"reg_g\", y = \"neigh_overlap\", hue = \"reg_mmd\", ax = ax)\nplt.tight_layout()\n'''\n\n\n# %%\n"
] | [
[
"numpy.nanmax",
"sklearn.cluster.KMeans",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"scipy.stats.zscore",
"pandas.DataFrame",
"matplotlib.pyplot.get_cmap",
"numpy.concatenate",
"numpy.max",
"torch.FloatTensor",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.mean",
"scipy.stats.spearmanr",
"numpy.where",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.arange",
"numpy.load",
"matplotlib.ticker.FormatStrFormatter",
"numpy.zeros",
"matplotlib.pyplot.figure",
"pandas.concat",
"matplotlib.pyplot.cm.get_cmap",
"numpy.isnan",
"scipy.stats.pearsonr",
"pandas.DataFrame.from_dict",
"numpy.ndenumerate",
"numpy.argsort",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.sum",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"scipy.spatial.distance.cosine",
"matplotlib.pyplot.subplots",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mjgm97/opengamedata-core | [
"b6b2aa6e7c80d1bf50f71ffba917672f80f7f948"
] | [
"models/FeatSeqPercent.py"
] | [
"## @module FeatSeqPercentModel\n# Feature to output the percentile that a session is at along a progression of gameplay timestamp features. Depends\n# heavily on the logic used to produce the quantiles file. The following exemplifies current implementation:\n#\n# Example: A game has 10 checkpoints. Joey has reached 3/10 checkpoints. His total playtime so far is 45 seconds.\n# The model will return the percentile of reaching checkpoint 4 at 45 seconds. If historically 70% of students take\n# longer than 45 seconds to reach checkpoint 4, the model will return 30%.\n#\n# Example: A game has 10 checkpoints. Joey has reached 10/10 checkpoints. His total playtime so far is 190 seconds.\n# He reached checkpoint 10 at 185 seconds. The model will return the percentile of reaching checkpoint 10 at 185\n# seconds, and will continue returning that same value for this session.\n#\n# @param feature_sequence: sequence of timedelta features that are to be in strictly ascending order\n# @param levels: levels applicable to this model\n# @param time_feat: \"sessDuration\" or \"sess_time_active\", depending if the model should be used for active time or\n# overall time. Make sure that the quantiles file uses the same time feature.\n# @param quantile_json_path: Path to a quantiles JSON file constructed by the _FeatureQuantiles private class\n\n\nfrom typing import List, Optional, Dict, Any\nimport pandas as pd\nfrom bisect import bisect_left\nimport numpy as np\nimport json\nfrom datetime import timedelta\nfrom models.FeatureModel import FeatureModel\nimport utils\n\n_POP_ACHS = \"exist group town city\".split()\n_FARM_ACHS = \"farmer farmers farmtown megafarm\".split()\n_MONEY_ACHS = \"paycheck thousandair stability riches\".split()\n_BLOOM_ACHS = \"bloom bigbloom hugebloom massivebloom\".split()\n_REQ_TUTORIALS = \"buy_food build_a_farm timewarp \\\nsuccessful_harvest sell_food buy_fertilizer buy_livestock \\\nlivestock poop rain\".split() # skip build_a_house - it comes at 0.0\n\n\ndef _get_sess_active_time_to_achievement_list(achs: List[str]) -> List[str]:\n return [f'sess_time_active_to_{a}_achievement' for a in achs]\n\n\ndef _get_sess_active_time_to_tutorial_list(tuts: List[str]) -> List[str]:\n return [f'sess_time_active_to_{t}_tutorial' for t in tuts]\n\n\ndef _get_quantiles(df: pd.DataFrame, feats: List[str], filter_debug:bool=True, filter_continue:bool=True) -> Dict[str, List[float]]:\n filter_strings = []\n if filter_debug:\n filter_strings += ['(debug==0)']\n if filter_continue:\n filter_strings += ['(c==0)']\n if filter_strings:\n df = df.rename({\"continue\": \"c\"}, axis=1).query(' & '.join(filter_strings)).rename({\"c\": \"continue\"}, axis=1)\n df = df[feats].replace(0.0, pd.NA)\n df = df.quantile(np.arange(0, 1, .01))\n quantiles = df.to_dict('list')\n return quantiles\n\n## @class _FeatureQuantiles\n# A private class used to create singleton access to quantile data used by the FeatSeqPercentModel class.\nclass _FeatureQuantiles(object):\n\n def __init__(self, arg, filter_continue=True):\n if type(arg) is str:\n json_path = arg\n with open(json_path) as f:\n self._quantiles = json.load(f)\n return\n\n df = arg\n cols = df.select_dtypes(include=\"number\").columns\n self._quantiles = _get_quantiles(df, cols, filter_continue=filter_continue)\n\n @classmethod\n def fromDF(cls, df: pd.DataFrame, filter_continue=True) -> 'FeatureQuantiles':\n return cls(df, filter_continue=filter_continue)\n\n @classmethod\n def fromCSV(cls, csv_path: str, filter_continue=True) -> 'FeatureQuantiles':\n df = pd.read_csv(csv_path, index_col='sessID')\n return cls(df, filter_continue=filter_continue)\n\n @classmethod\n def fromJSON(cls, quantile_json_path: str) -> 'FeatureQuantiles':\n return cls(quantile_json_path)\n\n def get_quantile(self, feat: str, value, verbose: bool = False,\n lo_to_hi: bool = True) -> int:\n quantile = bisect_left(self._quantiles[feat], value)\n if verbose:\n compare_str = \"higher\" if lo_to_hi else \"lower\"\n # print(quantile, len(self._quantiles[feat]))\n high_quant = self._quantiles[feat][quantile] if quantile < len(self._quantiles[feat]) else None\n low_quant = self._quantiles[feat][quantile - 1] if quantile > 0 else None\n quantile = quantile if lo_to_hi else 100 - quantile\n low_quant_offset = -1 if lo_to_hi else +1\n quant_low_str = f'{quantile + low_quant_offset}%={low_quant}'\n quant_high_str = f'{quantile}%={high_quant}'\n quant_str = f\"{quant_low_str} and {quant_high_str}\" if lo_to_hi else f\"{quant_high_str} and {quant_low_str}\"\n utils.Logger.toStdOut(\n f'A {feat} of {value} units is {compare_str} than {quantile}% (between {quant_str}) of sessions.')\n return quantile\n\n def _export_quantiles(self, path):\n with open(path, 'w+') as f:\n json.dump(self._quantiles, f, indent=4)\n\n\n\n## @class FeatSeqPercentModel\n# Feature to output the percentile that a session is at along a progression of gameplay timestamp features. Depends\n# heavily on the logic used to produce the quantiles file. The following exemplifies current implementation:\n#\n# Example: A game has 10 checkpoints. Joey has reached 3/10 checkpoints. His total playtime so far is 45 seconds.\n# The model will return the percentile of reaching checkpoint 4 at 45 seconds. If historically 70% of students take\n# longer than 45 seconds to reach checkpoint 4, the model will return 30%.\n#\n# Example: A game has 10 checkpoints. Joey has reached 10/10 checkpoints. His total playtime so far is 190 seconds.\n# He reached checkpoint 10 at 185 seconds. The model will return the percentile of reaching checkpoint 10 at 185\n# seconds, and will continue returning that same value for this session.\n#\n# @param feature_sequence: sequence of timedelta features that are to be in strictly ascending order\n# @param levels: levels applicable to this model\n# @param time_feat: \"sessDuration\" or \"sess_time_active\", depending if the model should be used for active time or\n# overall time. Make sure that the quantiles file uses the same time feature.\n# @param quantile_json_path: Path to a quantiles JSON file constructed by the _FeatureQuantiles private class\n\nclass FeatSeqPercentModel(FeatureModel):\n def __init__(self, feature_sequence: List[str], levels: List[int] = [], time_feat: str = 'sess_time_active',\n quantile_json_path: str = \"models/lakeland_data/quantiles_no_continue.json\"):\n self._quantile_json_path = quantile_json_path\n self._feature_sequence = feature_sequence\n self._time_feat = time_feat\n self._featureQuantiles = _FeatureQuantiles.fromJSON(\n quantile_json_path=quantile_json_path)\n\n super().__init__()\n\n def _eval(self, sess: dict, verbose: bool = False) -> Optional[float]:\n if sess['continue'] or sess['debug']:\n return None\n time_to_vals = [sess[f] for f in self._feature_sequence]\n idx_next = sum(bool(v) for v in time_to_vals)\n if idx_next < len(self._feature_sequence):\n next_feat = self._feature_sequence[idx_next]\n cur_time = sess[self._time_feat]\n else:\n next_feat = self._feature_sequence[-1]\n cur_time = time_to_vals[-1]\n if type(cur_time) is timedelta: # sessions features give float, but cgi might give timedelta\n cur_time = cur_time.seconds\n\n percentile_if_next_feat_now = self._featureQuantiles.get_quantile(next_feat, cur_time, verbose=verbose)\n\n return percentile_if_next_feat_now\n\n def __repr__(self):\n return f\"FeatSeqPercentModel(feature_sequence={self._feature_sequence}, time_feat='{self._time_feat}'\" \\\n f\"quantile_json_path='{self._quantile_json_path}'\" \\\n f\", levels={self._levels}, input_type={self._input_type})\"\n\n\n"
] | [
[
"numpy.arange",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
clembou/rasterio | [
"57169c31dae04e1319b4c4b607345475a7122910"
] | [
"examples/concurrent-cpu-bound.py"
] | [
"\"\"\"concurrent-cpu-bound.py\n\nOperate on a raster dataset window-by-window using a ThreadPoolExecutor.\n\nSimulates a CPU-bound thread situation where multiple threads can improve performance.\n\nWith -j 4, the program returns in about 1/4 the time as with -j 1.\n\"\"\"\n\nimport concurrent.futures\nimport multiprocessing\nimport time\n\nimport numpy\nimport rasterio\nfrom rasterio._example import compute\n\ndef main(infile, outfile, num_workers=4):\n\n with rasterio.drivers():\n\n # Open the source dataset.\n with rasterio.open(infile) as src:\n\n # Create a destination dataset based on source params.\n # The destination will be tiled, and we'll \"process\" the tiles\n # concurrently.\n meta = src.meta\n del meta['transform']\n meta.update(affine=src.affine)\n meta.update(blockxsize=256, blockysize=256, tiled='yes')\n with rasterio.open(outfile, 'w', **meta) as dst:\n\n # Define a generator for data, window pairs.\n # We use the new read() method here to a 3D array with all\n # bands, but could also use read_band().\n def jobs():\n for ij, window in dst.block_windows():\n data = src.read(window=window)\n result = numpy.zeros(data.shape, dtype=data.dtype)\n yield data, result, window\n\n # Submit the jobs to the thread pool executor.\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=num_workers) as executor:\n\n # Map the futures returned from executor.submit()\n # to their destination windows.\n #\n # The _example.compute function modifies no Python\n # objects and releases the GIL. It can execute\n # concurrently.\n future_to_window = {\n executor.submit(compute, data, res): (res, window)\n for data, res, window in jobs()}\n\n # As the processing jobs are completed, get the\n # results and write the data to the appropriate\n # destination window.\n for future in concurrent.futures.as_completed(\n future_to_window):\n\n result, window = future_to_window[future]\n\n # Since there's no multiband write() method yet in\n # Rasterio, we use write_band for each part of the\n # 3D data array.\n for i, arr in enumerate(result, 1):\n dst.write_band(i, arr, window=window)\n\n\nif __name__ == '__main__':\n\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Concurrent raster processing demo\")\n parser.add_argument(\n 'input',\n metavar='INPUT',\n help=\"Input file name\")\n parser.add_argument(\n 'output',\n metavar='OUTPUT',\n help=\"Output file name\")\n parser.add_argument(\n '-j',\n metavar='NUM_JOBS',\n type=int,\n default=multiprocessing.cpu_count(),\n help=\"Number of concurrent jobs\")\n args = parser.parse_args()\n\n main(args.input, args.output, args.j)\n\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bturner1273/HeathHackathon2018Winner | [
"87990299a2f0d168f6b33aa4c912419e89be853d"
] | [
"venv/lib/python3.6/site-packages/folium/utilities.py"
] | [
"from __future__ import (absolute_import, division, print_function)\n\nimport base64\nimport io\nimport json\nimport math\nimport os\nimport struct\nimport zlib\n\nfrom six import binary_type, text_type\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\ntry:\n from urllib.parse import uses_relative, uses_netloc, uses_params, urlparse\nexcept ImportError:\n from urlparse import uses_relative, uses_netloc, uses_params, urlparse\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _validate_location(location):\n \"\"\"Validates and formats location values before setting.\"\"\"\n if _isnan(location):\n raise ValueError('Location values cannot contain NaNs, '\n 'got {!r}'.format(location))\n if type(location) not in [list, tuple]:\n raise TypeError('Expected tuple/list for location, got '\n '{!r}'.format(location))\n\n if len(location) != 2:\n raise ValueError('Expected two values for location [lat, lon], '\n 'got {}'.format(len(location)))\n location = _locations_tolist(location)\n return location\n\n\ndef _validate_coordinates(coordinates):\n \"\"\"Validates multiple coordinates for the various markers in folium.\"\"\"\n if _isnan(coordinates):\n raise ValueError('Location values cannot contain NaNs, '\n 'got:\\n{!r}'.format(coordinates))\n coordinates = _locations_tolist(coordinates)\n return coordinates\n\n\ndef _locations_tolist(x):\n \"\"\"Transforms recursively a list of iterables into a list of list.\"\"\"\n if hasattr(x, '__iter__'):\n return list(map(_locations_tolist, x))\n else:\n return x\n\n\ndef _flatten(container):\n for i in container:\n if isinstance(i, (list, tuple)):\n for j in _flatten(i):\n yield j\n else:\n yield i\n\n\ndef _isnan(values):\n \"\"\"Check if there are NaNs values in the iterable.\"\"\"\n return any(math.isnan(value) for value in _flatten(values))\n\n\ndef _parse_path(**kw):\n \"\"\"\n Parse leaflet `Path` options.\n http://leafletjs.com/reference-1.2.0.html#path\n\n \"\"\"\n color = kw.pop('color', '#3388ff')\n return {\n 'stroke': kw.pop('stroke', True),\n 'color': color,\n 'weight': kw.pop('weight', 3),\n 'opacity': kw.pop('opacity', 1.0),\n 'lineCap': kw.pop('line_cap', 'round'),\n 'lineJoin': kw.pop('line_join', 'round'),\n 'dashArray': kw.pop('dash_array', None),\n 'dashOffset': kw.pop('dash_offset', None),\n 'fill': kw.pop('fill', False),\n 'fillColor': kw.pop('fill_color', color),\n 'fillOpacity': kw.pop('fill_opacity', 0.2),\n 'fillRule': kw.pop('fill_rule', 'evenodd'),\n 'bubblingMouseEvents': kw.pop('bubbling_mouse_events', True),\n }\n\n\ndef _parse_wms(**kw):\n \"\"\"\n Parse leaflet TileLayer.WMS options.\n http://leafletjs.com/reference-1.2.0.html#tilelayer-wms\n\n \"\"\"\n return {\n 'layers': kw.pop('layers', ''),\n 'styles': kw.pop('styles', ''),\n 'format': kw.pop('fmt', 'image/jpeg'),\n 'transparent': kw.pop('transparent', False),\n 'version': kw.pop('version', '1.1.1'),\n 'crs': kw.pop('crs', None),\n 'uppercase': kw.pop('uppercase', False),\n }\n\n\ndef image_to_url(image, colormap=None, origin='upper'):\n \"\"\"\n Infers the type of an image argument and transforms it into a URL.\n\n Parameters\n ----------\n image: string, file or array-like object\n * If string, it will be written directly in the output file.\n * If file, it's content will be converted as embedded in the\n output file.\n * If array-like, it will be converted to PNG base64 string and\n embedded in the output.\n origin: ['upper' | 'lower'], optional, default 'upper'\n Place the [0, 0] index of the array in the upper left or\n lower left corner of the axes.\n colormap: callable, used only for `mono` image.\n Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]\n for transforming a mono image into RGB.\n It must output iterables of length 3 or 4, with values between\n 0. and 1. You can use colormaps from `matplotlib.cm`.\n\n \"\"\"\n if isinstance(image, (text_type, binary_type)) and not _is_url(image):\n fileformat = os.path.splitext(image)[-1][1:]\n with io.open(image, 'rb') as f:\n img = f.read()\n b64encoded = base64.b64encode(img).decode('utf-8')\n url = 'data:image/{};base64,{}'.format(fileformat, b64encoded)\n elif 'ndarray' in image.__class__.__name__:\n img = write_png(image, origin=origin, colormap=colormap)\n b64encoded = base64.b64encode(img).decode('utf-8')\n url = 'data:image/png;base64,{}'.format(b64encoded)\n else:\n # Round-trip to ensure a nice formatted json.\n url = json.loads(json.dumps(image))\n return url.replace('\\n', ' ')\n\n\ndef _is_url(url):\n \"\"\"Check to see if `url` has a valid protocol.\"\"\"\n try:\n return urlparse(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef write_png(data, origin='upper', colormap=None):\n \"\"\"\n Transform an array of data into a PNG string.\n This can be written to disk using binary I/O, or encoded using base64\n for an inline PNG like this:\n\n >>> png_str = write_png(array)\n >>> \"data:image/png;base64,\"+png_str.encode('base64')\n\n Inspired from\n https://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image\n\n Parameters\n ----------\n data: numpy array or equivalent list-like object.\n Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)\n\n origin : ['upper' | 'lower'], optional, default 'upper'\n Place the [0,0] index of the array in the upper left or lower left\n corner of the axes.\n\n colormap : callable, used only for `mono` image.\n Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]\n for transforming a mono image into RGB.\n It must output iterables of length 3 or 4, with values between\n 0. and 1. Hint: you can use colormaps from `matplotlib.cm`.\n\n Returns\n -------\n PNG formatted byte string\n\n \"\"\"\n if np is None:\n raise ImportError('The NumPy package is required '\n ' for this functionality')\n\n if colormap is None:\n def colormap(x):\n return (x, x, x, 1)\n\n arr = np.atleast_3d(data)\n height, width, nblayers = arr.shape\n\n if nblayers not in [1, 3, 4]:\n raise ValueError('Data must be NxM (mono), '\n 'NxMx3 (RGB), or NxMx4 (RGBA)')\n assert arr.shape == (height, width, nblayers)\n\n if nblayers == 1:\n arr = np.array(list(map(colormap, arr.ravel())))\n nblayers = arr.shape[1]\n if nblayers not in [3, 4]:\n raise ValueError('colormap must provide colors of r'\n 'length 3 (RGB) or 4 (RGBA)')\n arr = arr.reshape((height, width, nblayers))\n assert arr.shape == (height, width, nblayers)\n\n if nblayers == 3:\n arr = np.concatenate((arr, np.ones((height, width, 1))), axis=2)\n nblayers = 4\n assert arr.shape == (height, width, nblayers)\n assert nblayers == 4\n\n # Normalize to uint8 if it isn't already.\n if arr.dtype != 'uint8':\n with np.errstate(divide='ignore', invalid='ignore'):\n arr = arr * 255./arr.max(axis=(0, 1)).reshape((1, 1, 4))\n arr[~np.isfinite(arr)] = 0\n arr = arr.astype('uint8')\n\n # Eventually flip the image.\n if origin == 'lower':\n arr = arr[::-1, :, :]\n\n # Transform the array to bytes.\n raw_data = b''.join([b'\\x00' + arr[i, :, :].tobytes()\n for i in range(height)])\n\n def png_pack(png_tag, data):\n chunk_head = png_tag + data\n return (struct.pack('!I', len(data)) +\n chunk_head +\n struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))\n\n return b''.join([\n b'\\x89PNG\\r\\n\\x1a\\n',\n png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)),\n png_pack(b'IDAT', zlib.compress(raw_data, 9)),\n png_pack(b'IEND', b'')])\n\n\ndef mercator_transform(data, lat_bounds, origin='upper', height_out=None):\n \"\"\"\n Transforms an image computed in (longitude,latitude) coordinates into\n the a Mercator projection image.\n\n Parameters\n ----------\n\n data: numpy array or equivalent list-like object.\n Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)\n\n lat_bounds : length 2 tuple\n Minimal and maximal value of the latitude of the image.\n Bounds must be between -85.051128779806589 and 85.051128779806589\n otherwise they will be clipped to that values.\n\n origin : ['upper' | 'lower'], optional, default 'upper'\n Place the [0,0] index of the array in the upper left or lower left\n corner of the axes.\n\n height_out : int, default None\n The expected height of the output.\n If None, the height of the input is used.\n\n See https://en.wikipedia.org/wiki/Web_Mercator for more details.\n\n \"\"\"\n import numpy as np\n\n def mercator(x):\n return np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi\n\n array = np.atleast_3d(data).copy()\n height, width, nblayers = array.shape\n\n lat_min = max(lat_bounds[0], -85.051128779806589)\n lat_max = min(lat_bounds[1], 85.051128779806589)\n if height_out is None:\n height_out = height\n\n # Eventually flip the image\n if origin == 'upper':\n array = array[::-1, :, :]\n\n lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) *\n (lat_max-lat_min))\n latslats = (mercator(lat_min) +\n np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) *\n (mercator(lat_max)-mercator(lat_min)))\n\n out = np.zeros((height_out, width, nblayers))\n for i in range(width):\n for j in range(nblayers):\n out[:, i, j] = np.interp(latslats, mercator(lats), array[:, i, j])\n\n # Eventually flip the image.\n if origin == 'upper':\n out = out[::-1, :, :]\n return out\n"
] | [
[
"numpy.linspace",
"numpy.isfinite",
"numpy.ones",
"numpy.tan",
"numpy.atleast_3d",
"numpy.errstate",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yjs1224/TextSteg | [
"ba5847d6835d35f3b08fa2dc598933c5db821478"
] | [
"utils.py"
] | [
"import collections\r\nimport random\r\nimport numpy as np\r\nimport configparser\r\nimport json\r\nimport datasets\r\n#\r\nclass MyConfigParser(configparser.ConfigParser):\r\n\tdef optionxform(self, optionstr):\r\n\t\treturn optionstr\r\n\r\nclass MyDict(dict):\r\n\t__setattr__ = dict.__setitem__\r\n\t__getattr__ = dict.__getitem__\r\n\r\n\r\nclass RemovedConfig(object):\r\n\tdef __init__(self, config_path):\r\n\t\tconfig = MyConfigParser()\r\n\t\tconfig.read(config_path, encoding=\"utf-8\")\r\n\t\tself.configs = self.dictobj2obj(config.__dict__[\"_sections\"])\r\n\r\n\r\n\tdef dictobj2obj(self, dictobj):\r\n\t\tif not isinstance(dictobj, dict):\r\n\t\t\treturn dictobj\r\n\t\td = MyDict()\r\n\t\tfor k,v in dictobj.items():\r\n\t\t\td[k] = self.dictobj2obj(v)\r\n\t\treturn d\r\n\r\n\tdef get_configs(self):\r\n\t\treturn self.configs\r\n\r\n\r\nclass Config(object):\r\n\tdef __init__(self, config_path):\r\n\t\tconfigs = json.load(open(config_path, \"r\", encoding=\"utf-8\"))\r\n\t\tself.configs = self.dictobj2obj(configs)\r\n\r\n\tdef dictobj2obj(self, dictobj):\r\n\t\tif not isinstance(dictobj, dict):\r\n\t\t\treturn dictobj\r\n\t\td = MyDict()\r\n\t\tfor k,v in dictobj.items():\r\n\t\t\td[k] = self.dictobj2obj(v)\r\n\t\treturn d\r\n\r\n\tdef get_configs(self):\r\n\t\treturn self.configs\r\n\r\n\r\nclass Vocabulary(object):\r\n\tdef __init__(self, data_path, max_len=200, min_len=5, word_drop=5, encoding='utf8'):\r\n\t\tif type(data_path) == str:\r\n\t\t\tdata_path = [data_path]\r\n\t\tself._data_path = data_path\r\n\t\tself._max_len = max_len\r\n\t\tself._min_len = min_len\r\n\t\tself._word_drop = word_drop\r\n\t\tself._encoding = encoding\r\n\t\tself.token_num = 0\r\n\t\tself.vocab_size_raw = 0\r\n\t\tself.vocab_size = 0\r\n\t\tself.w2i = {}\r\n\t\tself.i2w = {}\r\n\t\tself.start_words = []\r\n\t\tself._build_vocabulary()\r\n\r\n\tdef _build_vocabulary(self):\r\n\t\tself.w2i['_PAD'] = 0\r\n\t\tself.w2i['_UNK'] = 1\r\n\t\tself.w2i['_BOS'] = 2\r\n\t\tself.w2i['_EOS'] = 3\r\n\t\tself.i2w[0] = '_PAD'\r\n\t\tself.i2w[1] = '_UNK'\r\n\t\tself.i2w[2] = '_BOS'\r\n\t\tself.i2w[3] = '_EOS'\r\n\t\twords_all = []\r\n\t\tstart_words = []\r\n\t\tfor data_path in self._data_path:\r\n\t\t\twith open(data_path, 'r', encoding=self._encoding) as f:\r\n\t\t\t\tsentences = f.readlines()\r\n\t\t\tfor sentence in sentences:\r\n\t\t\t\t# _ = list(filter(lambda x: x not in [None, ''], sentence.split()))\r\n\t\t\t\t_ = sentence.split()\r\n\t\t\t\tif (len(_) >= self._min_len) and (len(_) <= self._max_len):\r\n\t\t\t\t\twords_all.extend(_)\r\n\t\t\t\t\tstart_words.append(_[0])\r\n\t\tself.token_num = len(words_all)\r\n\t\tword_distribution = sorted(collections.Counter(words_all).items(), key=lambda x: x[1], reverse=True)\r\n\t\tself.vocab_size_raw = len(word_distribution)\r\n\t\tfor (word, value) in word_distribution:\r\n\t\t\tif value > self._word_drop:\r\n\t\t\t\tself.w2i[word] = len(self.w2i)\r\n\t\t\t\tself.i2w[len(self.i2w)] = word\r\n\t\tself.vocab_size = len(self.i2w)\r\n\t\tstart_word_distribution = sorted(collections.Counter(start_words).items(), key=lambda x: x[1], reverse=True)\r\n\t\tself.start_words = [_[0] for _ in start_word_distribution]\r\n\r\n\r\nclass UNK_Vocabulary(object):\r\n\tdef __init__(self, data_path, max_len=200, min_len=5, word_drop=5, encoding='utf8'):\r\n\t\tif type(data_path) == str:\r\n\t\t\tdata_path = [data_path]\r\n\t\tself._data_path = data_path\r\n\t\tself._max_len = max_len\r\n\t\tself._min_len = min_len\r\n\t\tself._word_drop = word_drop\r\n\t\tself._encoding = encoding\r\n\t\tself.token_num = 0\r\n\t\tself.vocab_size_raw = 0\r\n\t\tself.vocab_size = 0\r\n\t\tself.w2i = {}\r\n\t\tself.i2w = {}\r\n\t\tself.start_words = []\r\n\t\tself._build_vocabulary()\r\n\r\n\tdef _build_vocabulary(self):\r\n\t\t# self.w2i['_PAD'] = 0\r\n\t\t# self.w2i['_UNK'] = 1\r\n\t\t# self.w2i['_BOS'] = 2\r\n\t\t# self.w2i['_EOS'] = 3\r\n\t\t# self.i2w[0] = '_PAD'\r\n\t\t# self.i2w[1] = '_UNK'\r\n\t\t# self.i2w[2] = '_BOS'\r\n\t\t# self.i2w[3] = '_EOS'\r\n\t\twords_all = []\r\n\t\tstart_words = []\r\n\t\tfor data_path in self._data_path:\r\n\t\t\twith open(data_path, 'r', encoding=self._encoding) as f:\r\n\t\t\t\tsentences = f.readlines()\r\n\t\t\tfor sentence in sentences:\r\n\t\t\t\t# _ = list(filter(lambda x: x not in [None, ''], sentence.split()))\r\n\t\t\t\t_ = sentence.split()\r\n\t\t\t\tif (len(_) >= self._min_len) and (len(_) <= self._max_len):\r\n\t\t\t\t\twords_all.extend(_)\r\n\t\t\t\t\tstart_words.append(_[0])\r\n\t\tself.token_num = len(words_all)\r\n\t\tword_distribution = sorted(collections.Counter(words_all).items(), key=lambda x: x[1], reverse=True)\r\n\r\n\t\tself.vocab_size_raw = len(word_distribution)\r\n\t\tfor (word, value) in word_distribution:\r\n\t\t\tif value <= self._word_drop:\r\n\t\t\t\tself.w2i[word] = len(self.w2i)\r\n\t\t\t\tself.i2w[len(self.i2w)] = word\r\n\t\tself.vocab_size = len(self.i2w)\r\n\t\tself.unk_distribution = np.zeros(self.vocab_size)\r\n\t\tfor (w, c) in word_distribution:\r\n\t\t\tif c <= self._word_drop:\r\n\t\t\t\tself.unk_distribution[self.w2i[w]] = c\r\n\t\tself.unk_distribution = self.unk_distribution/np.sum(self.unk_distribution)\r\n\t\tstart_word_distribution = sorted(collections.Counter(start_words).items(), key=lambda x: x[1], reverse=True)\r\n\t\tself.start_unk_distribution = []\r\n\t\tfor (w,c) in start_word_distribution:\r\n\t\t\tif c <= self._word_drop:\r\n\t\t\t\tself.start_unk_distribution.append(c)\r\n\t\tself.start_unk_distribution = np.array(self.start_unk_distribution)\r\n\t\tself.start_unk_distribution = self.start_unk_distribution/np.sum(self.start_unk_distribution)\r\n\r\n\t\tself.start_words = [_[0] for _ in start_word_distribution]\r\n\r\n\r\n\tdef sample(self):\r\n\t\tcand_ = [i for i in range(self.vocab_size)]\r\n\t\tid = np.random.choice(cand_,1, p=self.unk_distribution)[0]\r\n\t\treturn id\r\n\r\n\tdef start_sample(self):\r\n\t\tcand_ = [i for i in range(len(self.start_unk_distribution))]\r\n\t\tid = np.random.choice(cand_,1, p=self.start_unk_distribution)[0]\r\n\t\treturn id\r\n\r\nclass Corpus(object):\r\n\tdef __init__(self, data_path, vocabulary, max_len=200, min_len=5):\r\n\t\tif type(data_path) == str:\r\n\t\t\tdata_path = [data_path]\r\n\t\tself._data_path = data_path\r\n\t\tself._vocabulary = vocabulary\r\n\t\tself._max_len = max_len\r\n\t\tself._min_len = min_len\r\n\t\tself.corpus = []\r\n\t\tself.corpus_length = []\r\n\t\tself.labels = []\r\n\t\tself.sentence_num = 0\r\n\t\tself.max_sentence_length = 0\r\n\t\tself.min_sentence_length = 0\r\n\t\tself._build_corpus()\r\n\r\n\tdef _build_corpus(self):\r\n\t\tdef _transfer(word):\r\n\t\t\ttry:\r\n\t\t\t\treturn self._vocabulary.w2i[word]\r\n\t\t\texcept:\r\n\t\t\t\treturn self._vocabulary.w2i['_UNK']\r\n\t\tlabel = -1\r\n\t\tfor data_path in self._data_path:\r\n\t\t\tlabel += 1\r\n\t\t\twith open(data_path, 'r', encoding='utf8') as f:\r\n\t\t\t\tsentences = f.readlines()\r\n\t\t\t# sentences = list(filter(lambda x: x not in [None, ''], sentences))\r\n\t\t\tfor sentence in sentences:\r\n\t\t\t\t# sentence = list(filter(lambda x: x not in [None, ''], sentence.split()))\r\n\t\t\t\tsentence = sentence.split()\r\n\t\t\t\tif (len(sentence) >= self._min_len) and (len(sentence) <= self._max_len):\r\n\t\t\t\t\tsentence = ['_BOS'] + sentence + ['_EOS']\r\n\t\t\t\t\tself.corpus.append(list(map(_transfer, sentence)))\r\n\t\t\t\t\tself.labels.append(label)\r\n\t\tself.corpus_length = [len(i) for i in self.corpus]\r\n\t\tself.max_sentence_length = max(self.corpus_length)\r\n\t\tself.min_sentence_length = min(self.corpus_length)\r\n\t\tself.sentence_num = len(self.corpus)\r\n\r\n\r\ndef split_corpus(data_path, train_path, test_path, max_len=200, min_len=5, ratio=0.8, seed=0, encoding='utf8',is_inverse=False, inverse_mode=0):\r\n\twith open(data_path, 'r', encoding=encoding) as f:\r\n\t\tsentences = f.readlines()\r\n\tsentences = [_ for _ in filter(lambda x: x not in [None, ''], sentences)\r\n\t if len(_.split()) <= max_len and len(_.split()) >= min_len]\r\n\tnp.random.seed(seed)\r\n\tnp.random.shuffle(sentences)\r\n\ttrain = sentences[:int(len(sentences) * ratio)]\r\n\ttest = sentences[int(len(sentences) * ratio):]\r\n\tif is_inverse:\r\n\t\tif inverse_mode == 0:\r\n\t\t\twith open(train_path, 'w', encoding='utf8') as f:\r\n\t\t\t\tfor sentence in train:\r\n\t\t\t\t\tf.write(\" \".join(sentence.split()[::-1]) + \"\\n\")\r\n\t\t\twith open(test_path, 'w', encoding='utf8') as f:\r\n\t\t\t\tfor sentence in test:\r\n\t\t\t\t\tf.write(\" \".join(sentence.split()[::-1]) + \"\\n\")\r\n\t\tif inverse_mode == 1:\r\n\t\t\tnew_sentences = []\r\n\t\t\tfor sentence in sentences:\r\n\t\t\t\twords = sentence.split()\r\n\t\t\t\tfor i in range(len(words)):\r\n\t\t\t\t\tnew_sentences.append(\" \".join(words[:i+1][::-1]) + \"\\n\")\r\n\t\t\tnp.random.shuffle(new_sentences)\r\n\t\t\tnew_sentences = new_sentences[:2000000] # down sampling\r\n\t\t\ttrain = new_sentences[:int(len(new_sentences) * ratio)]\r\n\t\t\ttest = new_sentences[int(len(new_sentences) * ratio):]\r\n\t\t\twith open(train_path, 'w', encoding='utf8') as f:\r\n\t\t\t\tfor sentence in train:\r\n\t\t\t\t\tf.write(sentence)\r\n\t\t\twith open(test_path, 'w', encoding='utf8') as f:\r\n\t\t\t\tfor sentence in test:\r\n\t\t\t\t\tf.write(sentence)\r\n\telse:\r\n\t\twith open(train_path, 'w', encoding='utf8') as f:\r\n\t\t\tfor sentence in train:\r\n\t\t\t\tf.write(sentence)\r\n\t\twith open(test_path, 'w', encoding='utf8') as f:\r\n\t\t\tfor sentence in test:\r\n\t\t\t\tf.write(sentence)\r\n\r\n\r\nclass Generator(object):\r\n\tdef __init__(self, data):\r\n\t\tself._data = data\r\n\r\n\tdef build_generator(self, batch_size, sequence_len, shuffle=True):\r\n\t\tif shuffle:\r\n\t\t\tnp.random.shuffle(self._data)\r\n\t\tdata_ = []\r\n\t\tfor _ in self._data:\r\n\t\t\tdata_.extend(_)\r\n\t\tbatch_num = len(data_) // (batch_size * sequence_len)\r\n\t\tdata = data_[:batch_size * batch_num * sequence_len]\r\n\t\tdata = np.array(data).reshape(batch_num * batch_size, sequence_len)\r\n\t\twhile True:\r\n\t\t\tbatch_data = data[0:batch_size] # 产生一个batch的index\r\n\t\t\tdata = data[batch_size:] # 去掉本次index\r\n\t\t\tif len(batch_data) == 0:\r\n\t\t\t\treturn True\r\n\t\t\tyield batch_data\r\n\r\n\r\ndef get_corpus_distribution(data_path):\r\n\twith open(data_path, \"r\", encoding=\"utf-8\") as f:\r\n\t\tlines = f.readlines()\r\n\tlengths = dict()\r\n\tfor line in lines:\r\n\t\twords = line.split(\"\\n\")[0].split()\r\n\t\tlength = len(words)\r\n\t\tif lengths.get(length, None) is None:\r\n\t\t\tlengths[length]=1\r\n\t\telse:\r\n\t\t\tlengths[length] += 1\r\n\tlengths_tmp = np.zeros(max(list(lengths.keys()))+1)\r\n\tfor k,v in lengths.items():\r\n\t\tlengths_tmp[k] = v\r\n\tlengths_norm = lengths_tmp\r\n\tlengths_norm = lengths_norm/np.sum(lengths_norm)\r\n\tlength_sum = 0\r\n\tlengths_cdf = np.zeros_like(lengths_norm)\r\n\tfor i,_ in enumerate(lengths_norm.tolist()):\r\n\t\tlength_sum += lengths_norm[i]\r\n\t\tlengths_cdf[i] = length_sum\r\n\treturn lengths_cdf\r\n\r\n\r\ndef location(cdf, key=42, num=100):\r\n\t'''\r\n\tlocation\r\n\tstart from 0\r\n\tend with 49\r\n\t'''\r\n\tnp.random.seed(key)\r\n\trandom.seed(key)\r\n\tuniform_data = np.random.uniform(size=(num,1))\r\n\tidxes = []\r\n\tfor u in uniform_data:\r\n\t\tidx = np.argwhere(cdf>=u)[0][0]\r\n\t\tidxes.append(int(idx))\r\n\treturn idxes\r\n\r\n\r\ndef sample_secret_message(data_path, key=42,num=100):\r\n\twith open(data_path, \"r\", encoding=\"utf-8\") as f:\r\n\t\tlines = f.readlines()\r\n\tnp.random.seed(key)\r\n\trandom.seed(key)\r\n\tsampled_message = []\r\n\tinds = np.random.randint(low=1,high=len(lines),size=num)\r\n\tfor i in inds:\r\n\t\tsampled_message.append(lines[i])\r\n\treturn sampled_message\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tvocabulary = Vocabulary('./data/corpus.txt', word_drop=10)\r\n\tsplit_corpus('./data/corpus.txt', './data/train_clothes', './data/test_clothes')\r\n\t# corpus = Corpus('F:/code/python/__data/dataset2020/news2020.txt', vocabulary)\r\n\ttest = Corpus('./data/test_clothes', vocabulary)\r\n\ttest_generator = Generator(test.corpus)\r\n\ttest_g = test_generator.build_generator(64, 50)\r\n\ttext = test_g.__next__()\r\n\tpass"
] | [
[
"numpy.random.seed",
"numpy.random.choice",
"numpy.random.shuffle",
"numpy.argwhere",
"numpy.zeros_like",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Taosheng-ty/ULTRA | [
"2541982cb21e0acccbe66cd4437194e40e0828ef"
] | [
"ultra/learning_algorithm/dla_attention.py"
] | [
"\"\"\"Training and testing the dual learning algorithm for unbiased learning to rank.\n\nSee the following paper for more information on the dual learning algorithm.\n \n * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18\n \n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport random\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_ranking as tfr\nimport copy\nimport itertools\nfrom six.moves import zip\nfrom tensorflow import dtypes\n\nfrom ultra.learning_algorithm.base_algorithm import BaseAlgorithm\nimport ultra.utils as utils\n\n\ndef sigmoid_prob(logits):\n return tf.sigmoid(logits - tf.reduce_mean(logits, -1, keep_dims=True))\n\nclass DLA_atten(BaseAlgorithm):\n \"\"\"The Dual Learning Algorithm for unbiased learning to rank.\n\n This class implements the Dual Learning Algorithm (DLA) based on the input layer \n feed. See the following paper for more information on the simulation data.\n \n * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18\n \n \"\"\"\n\n def __init__(self, data_set, exp_settings, forward_only=False):\n \"\"\"Create the model.\n \n Args:\n data_set: (Raw_data) The dataset used to build the input layer.\n exp_settings: (dictionary) The dictionary containing the model settings.\n forward_only: Set true to conduct prediction only, false to conduct training.\n \"\"\"\n print('Build DLA atten')\n\n self.hparams = tf.contrib.training.HParams(\n learning_rate=0.05, # Learning rate.\n max_gradient_norm=5.0, # Clip gradients to this norm.\n loss_func='click_weighted_softmax_cross_entropy', # Select Loss function\n logits_to_prob='softmax', # the function used to convert logits to probability distributions\n ranker_learning_rate=-1.0, # The learning rate for ranker (-1 means same with learning_rate).\n ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss\n l2_loss=0.0, # Set strength for L2 regularization.\n l1_loss=0.0,\n max_propensity_weight = -1, # Set maximum value for propensity weights\n constant_propensity_initialization = False, # Set true to initialize propensity with constants.\n grad_strategy='ada', # Select gradient strategy\n )\n print(exp_settings['learning_algorithm_hparams'])\n self.model=None\n self.hparams.parse(exp_settings['learning_algorithm_hparams'])\n self.exp_settings = exp_settings\n\n self.max_candidate_num = exp_settings['max_candidate_num']\n self.feature_size = data_set.feature_size\n if self.hparams.ranker_learning_rate < 0:\n self.ranker_learning_rate = tf.Variable(float(self.hparams.learning_rate), trainable=False)\n else:\n self.ranker_learning_rate = tf.Variable(float(self.hparams.ranker_learning_rate), trainable=False)\n self.learning_rate = self.ranker_learning_rate\n# self.weighs_propen=\n # Feeds for inputs.\n self.is_training = tf.placeholder(tf.bool, name=\"is_train\")\n self.docid_inputs = [] # a list of top documents\n self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size], \n name=\"letor_features\") # the letor features for the documents\n self.labels = [] # the labels for the documents (e.g., clicks)\n self.types=[]\n for i in range(self.max_candidate_num):\n self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],\n name=\"docid_input{0}\".format(i)))\n self.labels.append(tf.placeholder(tf.float32, shape=[None],\n name=\"label{0}\".format(i)))\n self.types.append(tf.placeholder(tf.float32, shape=[None],\n name=\"type{0}\".format(i)))\n self.global_step = tf.Variable(0, trainable=False)\n\n # Select logits to prob function\n self.logits_to_prob = tf.nn.softmax\n if self.hparams.logits_to_prob == 'sigmoid':\n self.logits_to_prob = sigmoid_prob\n\n self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model')\n pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output)\n reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num]\n \n for metric in self.exp_settings['metrics']:\n for topn in self.exp_settings['metrics_topn']:\n metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_labels, pad_removed_output, None)\n tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['eval'])\n\n if not forward_only:\n # Build model\n self.rank_list_size = exp_settings['train_list_cutoff']\n train_output = self.ranking_model(self.rank_list_size, scope='ranking_model')\n self.propensity = self.DenoisingNet(self.rank_list_size, forward_only)\n train_labels = self.labels[:self.rank_list_size]\n\n print('Loss Function is ' + self.hparams.loss_func)\n # Select loss function\n self.loss_func = None\n if self.hparams.loss_func == 'click_weighted_softmax_cross_entropy':\n self.loss_func = self.click_weighted_softmax_cross_entropy_loss\n elif self.hparams.loss_func == 'click_weighted_log_loss':\n self.loss_func = self.click_weighted_log_loss\n elif self.hparams.loss_func == 'click_weighted_pairwise_loss':\n self.loss_func = self.click_weighted_pairwise_loss\n else: # softmax loss without weighting\n self.loss_func = self.softmax_loss\n\n # Compute rank loss\n reshaped_train_labels = tf.transpose(tf.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size]\n self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity))\n self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights)\n pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights\n self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\\\n self.propensity,train_output)\n tf.summary.scalar('click_metrics',self.click_metrics,collections=['train'])\n for i in range(len(pw_list)):\n tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train'])\n tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train'])\n\n # Compute examination loss\n self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output))\n self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights)\n rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights\n for i in range(len(rw_list)):\n tf.summary.scalar('Relevance weights %d' % i, tf.reduce_mean(rw_list[i]), collections=['train'])\n tf.summary.scalar('Exam Loss', tf.reduce_mean(self.exam_loss), collections=['train'])\n \n # Gradients and SGD update operation for training the model.\n self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss\n \n # Select optimizer\n self.optimizer_func = tf.train.AdagradOptimizer\n if self.hparams.grad_strategy == 'sgd':\n self.optimizer_func = tf.train.GradientDescentOptimizer\n\n self.separate_gradient_update()\n \n tf.summary.scalar('Gradient Norm', self.norm, collections=['train'])\n tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train'])\n tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train'])\n \n clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1)\n pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output)\n for metric in self.exp_settings['metrics']:\n for topn in self.exp_settings['metrics_topn']:\n list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True)\n metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None)\n tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train'])\n weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights)\n tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train'])\n\n self.train_summary = tf.summary.merge_all(key='train')\n self.eval_summary = tf.summary.merge_all(key='eval')\n self.saver = tf.train.Saver(tf.global_variables())\n\n def separate_gradient_update(self):\n denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"denoising_model\")\n ranking_model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"ranking_model\")\n self.weighs_propen=denoise_params\n if self.hparams.l2_loss > 0:\n for p in denoise_params:\n# self.weighs_propen=p\n# p=tf.Print(p,[p],message=\"show the weights\")\n self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p))\n for p in ranking_model_params:\n self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p)\n self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss\n\n denoise_gradients = tf.gradients(self.exam_loss, denoise_params)\n ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params)\n if self.hparams.max_gradient_norm > 0:\n denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients,\n self.hparams.max_gradient_norm)\n ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients,\n self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight)\n self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients)\n\n opt_denoise = self.optimizer_func(self.hparams.learning_rate)\n opt_ranker = self.optimizer_func(self.ranker_learning_rate)\n\n denoise_updates = opt_denoise.apply_gradients(zip(denoise_gradients, denoise_params),\n global_step=self.global_step)\n ranker_updates = opt_ranker.apply_gradients(zip(ranking_model_gradients, ranking_model_params))\n\n self.updates = tf.group(denoise_updates, ranker_updates)\n\n def DenoisingNet(self, list_size, forward_only=False, scope=None):\n with tf.variable_scope(scope or \"denoising_model\"):\n # If we are in testing, do not compute propensity\n if forward_only:\n return tf.ones_like(self.output)#, tf.ones_like(self.output)\n input_vec_size = list_size*4\n\n def propensity_network(input_data, index):\n reuse = None if index < 1 else True\n propensity_initializer = tf.constant_initializer(0.001) if self.hparams.constant_propensity_initialization else None\n with tf.variable_scope(\"propensity_network\", initializer=propensity_initializer,\n reuse=reuse):\n output_data = input_data\n current_size = input_vec_size\n output_sizes = [\n int((list_size+1)/2) + 1, \n int((list_size+1)/4) + 1,\n 1\n ]\n for i in range(len(output_sizes)):\n expand_W = tf.get_variable(\"W_%d\" % i, [current_size, output_sizes[i]])\n expand_b = tf.get_variable(\"b_%d\" % i, [output_sizes[i]])\n output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)\n output_data = tf.nn.elu(output_data)\n current_size = output_sizes[i]\n #expand_W = tf.get_variable(\"final_W\", [current_size, 1])\n #expand_b = tf.get_variable(\"final_b\" , [1])\n #output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)\n return output_data\n\n output_propensity_list = []\n for i in range(list_size):\n # Add position information (one-hot vector)\n click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)]\n click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1)\n# click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)]\n click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)]\n click_feature[2*list_size:2*list_size+i+1]=[tf.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)]\n click_feature[3*list_size:3*list_size+list_size-i-1]=[tf.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)]\n # Predict propensity with a simple network\n output_propensity_list.append(propensity_network(tf.concat(click_feature, 1), i))\n self.click_show=[click_feature[h][0] for h in range(4*list_size)]\n return tf.concat(output_propensity_list,1)\n\n def step(self, session, input_feed, forward_only):\n \"\"\"Run a step of the model feeding the given inputs.\n\n Args:\n session: (tf.Session) tensorflow session to use.\n input_feed: (dictionary) A dictionary containing all the input feed data.\n forward_only: whether to do the backward step (False) or only forward (True).\n\n Returns:\n A triple consisting of the loss, outputs (None if we do backward),\n and a tf.summary containing related information about the step.\n\n \"\"\"\n \n # Output feed: depends on whether we do a backward step or not.\n if not forward_only:\n input_feed[self.is_training.name] = True\n output_feed = [self.updates, # Update Op that does SGD.\n self.loss, # Loss for this batch.\n# self.click_show,\n self.weighs_propen,\n self.global_step,\n self.train_summary # Summarize statistics.\n ] \n else:\n input_feed[self.is_training.name] = False\n output_feed = [\n self.eval_summary, # Summarize statistics.\n self.output # Model outputs\n ] \n\n outputs = session.run(output_feed, input_feed)\n \n if not forward_only:\n# print(outputs[3],\"global step\")\n# if outputs[3]%50==0:\n# print(outputs[2])\n return outputs[1], None, outputs[-1] # loss, no outputs, summary.\n else:\n return None, outputs[1], outputs[0] # no loss, outputs, summary.\n\n def softmax_loss(self, output, labels, propensity=None, name=None):\n \"\"\"Computes listwise softmax loss without propensity weighting.\n\n Args:\n output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is\n the ranking score of the corresponding example.\n labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a\n relevant example.\n propensity: No use. \n name: A string used as the name for this variable scope.\n\n Returns:\n (tf.Tensor) A single value tensor containing the loss.\n \"\"\"\n\n loss = None\n with tf.name_scope(name, \"softmax_loss\",[output]):\n label_dis = labels / tf.reduce_sum(labels, 1, keep_dims=True)\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels, 1)\n return tf.reduce_sum(loss) / tf.reduce_sum(labels)\n\n def get_normalized_weights(self, propensity):\n \"\"\"Computes listwise softmax loss with propensity weighting.\n\n Args:\n propensity: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. \n\n Returns:\n (tf.Tensor) A tensor containing the propensity weights.\n \"\"\"\n propensity_list = tf.unstack(propensity, axis=1) # Compute propensity weights\n pw_list = []\n for i in range(len(propensity_list)):\n pw_i = propensity_list[0] / propensity_list[i]\n pw_list.append(pw_i)\n propensity_weights = tf.stack(pw_list, axis=1)\n if self.hparams.max_propensity_weight > 0:\n propensity_weights = tf.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight)\n return propensity_weights\n\n def click_weighted_softmax_cross_entropy_loss(self, output, labels, propensity_weights, name=None):\n \"\"\"Computes listwise softmax loss with propensity weighting.\n\n Args:\n output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is\n the ranking score of the corresponding example.\n labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a\n relevant example.\n propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. \n name: A string used as the name for this variable scope.\n\n Returns:\n (tf.Tensor) A single value tensor containing the loss.\n \"\"\"\n loss = None\n with tf.name_scope(name, \"click_softmax_cross_entropy\",[output]):\n label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True)\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1)\n return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights)\n\n def click_loglikelihood(self, labels, propensity,train_output, name=None):\n \"\"\"Computes listwise softmax loss with propensity weighting.\n\n Args:\n output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is\n the ranking score of the corresponding example.\n labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a\n relevant example.\n propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. \n name: A string used as the name for this variable scope.\n\n Returns:\n (tf.Tensor) A single value tensor containing the loss.\n \"\"\"\n\n# loss = None\n with tf.name_scope(name, \"click_loglikelihood\"):\n ob_prob=tf.nn.softmax(propensity)\n rel_prob=tf.nn.softmax(train_output)\n click_prob=ob_prob*rel_prob\n click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True)\n label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True)\n entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1)\n return tf.reduce_mean(entropy)\n def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None):\n \"\"\"Computes pairwise entropy loss with propensity weighting.\n\n Args:\n output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is\n the ranking score of the corresponding example.\n labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a\n relevant example.\n propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. \n name: A string used as the name for this variable scope.\n\n Returns:\n (tf.Tensor) A single value tensor containing the loss.\n (tf.Tensor) A tensor containing the propensity weights.\n \"\"\"\n loss = None\n with tf.name_scope(name, \"click_weighted_pairwise_loss\",[output]):\n sliced_output = tf.unstack(output, axis=1)\n sliced_label = tf.unstack(labels, axis=1)\n sliced_propensity = tf.unstack(propensity_weights, axis=1)\n for i in range(len(sliced_output)):\n for j in range(i+1, len(sliced_output)):\n cur_label_weight = tf.math.sign(sliced_label[i] - sliced_label[j])\n cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j]\n cur_pair_loss = -tf.exp(sliced_output[i]) / (tf.exp(sliced_output[i]) + tf.exp(sliced_output[j]))\n if loss == None:\n loss = cur_label_weight * cur_pair_loss * cur_propensity\n loss += cur_label_weight * cur_pair_loss * cur_propensity\n batch_size = tf.shape(labels[0])[0]\n return tf.reduce_sum(loss) / tf.cast(batch_size, dtypes.float32) #/ (tf.reduce_sum(propensity_weights)+1)\n\n\n def click_weighted_log_loss(self, output, labels, propensity_weights, name=None):\n \"\"\"Computes pointwise sigmoid loss with propensity weighting.\n\n Args:\n output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is\n the ranking score of the corresponding example.\n labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a\n relevant example.\n propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. \n name: A string used as the name for this variable scope.\n\n Returns:\n (tf.Tensor) A single value tensor containing the loss.\n \"\"\"\n loss = None\n with tf.name_scope(name, \"click_weighted_log_loss\",[output]):\n click_prob = tf.sigmoid(output)\n loss = tf.losses.log_loss(labels, click_prob, propensity_weights)\n return loss\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.math.sign",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.losses.log_loss",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.nn.elu",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.zeros_like",
"tensorflow.summary.merge_all",
"tensorflow.contrib.training.HParams",
"tensorflow.global_norm",
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.sigmoid",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.constant_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
lh-astro/RM-Tools | [
"ac64cc41b2f696f21ee7dd001303cbad1ff71114"
] | [
"RMtools_3D/do_RMsynth_3D.py"
] | [
"#!/usr/bin/env python\n#=============================================================================#\n# #\n# NAME: do_RMsynth_3D.py #\n# #\n# PURPOSE: Run RM-synthesis on a Stokes Q & U cubes. #\n# #\n# MODIFIED: 7-March-2019 by J. West #\n# MODIFIED: 23-October-2019 by A. Thomson #\n# #\n#=============================================================================#\n# #\n# The MIT License (MIT) #\n# #\n# Copyright (c) 2016 Cormac R. Purcell #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the \"Software\"), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n#=============================================================================#\n\nimport sys\nimport os\nimport time\nimport math as m\nimport numpy as np\nimport astropy.io.fits as pf\n\nfrom RMutils.util_RM import do_rmsynth_planes\nfrom RMutils.util_RM import get_rmsf_planes\nfrom RMutils.util_misc import interp_images\n\n\nif sys.version_info.major == 2:\n print('RM-tools will no longer run with Python 2! Please use Python 3.')\n exit()\n\nC = 2.997924538e8 # Speed of light [m/s]\n\n#-----------------------------------------------------------------------------#\ndef run_rmsynth(dataQ, dataU, freqArr_Hz, dataI=None, rmsArr=None,\n phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0,\n weightType=\"uniform\", fitRMSF=False, nBits=32, verbose=True, not_rmsf = False,\n log = print):\n\n \"\"\"Run RM-synthesis on 2/3D data.\n\n Args:\n dataQ (array_like): Stokes Q intensity cube.\n dataU (array_like): Stokes U intensity cube.\n freqArr_Hz (array_like): Frequency of each channel in Hz.\n\n Kwargs:\n dataI (array_like): Model cube of Stokes I spectra (see do_fitIcube).\n rmsArr (array_like): Cube of RMS spectra.\n phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).\n dPhi_radm2 (float): Faraday depth channel size (rad/m^2).\n nSamples (float): Number of samples across the RMSF.\n weightType (str): Can be \"variance\" or \"uniform\"\n \"variance\" -- Weight by RMS.\n \"uniform\" -- Weight uniformly (i.e. with 1s)\n fitRMSF (bool): Fit a Gaussian to the RMSF?\n nBits (int): Precision of floating point numbers.\n verbose (bool): Verbosity.\n not_rmsf (bool): Just do RM synthesis and ignore RMSF?\n log (function): Which logging function to use.\n\n Returns:\n dataArr (list): FDF and RMSF information\n if not_rmsf:\n dataArr = [FDFcube, phiArr_radm2, lam0Sq_m2, lambdaSqArr_m2]\n\n else:\n dataArr = [FDFcube, phiArr_radm2, RMSFcube, phi2Arr_radm2, fwhmRMSFCube,fitStatArr, lam0Sq_m2, lambdaSqArr_m2]\n\n\n \"\"\"\n # Sanity check on header dimensions\n\n if not str(dataQ.shape) == str(dataU.shape):\n log(\"Err: unequal dimensions: Q = \"+str(dataQ.shape)+\", U = \"+str(dataU.shape)+\".\")\n sys.exit()\n\n # Check dimensions of Stokes I cube, if present\n if not dataI is None:\n if not str(dataI.shape) == str(dataQ.shape):\n log(\"Err: unequal dimensions: Q = \"+str(dataQ.shape)+\", I = \"+str(dataI.shape)+\".\")\n sys.exit()\n\n # Default data types\n dtFloat = \"float\" + str(nBits)\n dtComplex = \"complex\" + str(2*nBits)\n\n lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)\n\n dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))\n lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -\n np.nanmin(lambdaSqArr_m2) )\n dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))\n dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))\n\n # Set the Faraday depth range\n fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2\n if dPhi_radm2 is None:\n dPhi_radm2 = fwhmRMSF_radm2 / nSamples\n if phiMax_radm2 is None:\n phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2\n phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM\n\n # Faraday depth sampling. Zero always centred on middle channel\n nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2))) * 2 + 1\n startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0\n stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0\n phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, int(nChanRM))\n phiArr_radm2 = phiArr_radm2.astype(dtFloat)\n if(verbose): log(\"PhiArr = %.2f to %.2f by %.2f (%d chans).\" % (phiArr_radm2[0],\n phiArr_radm2[-1],\n float(dPhi_radm2),\n nChanRM))\n\n\n # Calculate the weighting as 1/sigma^2 or all 1s (uniform)\n if weightType==\"variance\" and rmsArr is not None:\n weightArr = 1.0 / np.power(rmsArr, 2.0)\n else:\n weightType = \"uniform\"\n weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)\n if(verbose): log(\"Weight type is '%s'.\" % weightType)\n\n startTime = time.time()\n\n # Read the Stokes I model and divide into the Q & U data\n if dataI is not None:\n with np.errstate(divide='ignore', invalid='ignore'):\n qArr = np.true_divide(dataQ, dataI)\n uArr = np.true_divide(dataU, dataI)\n else:\n qArr = dataQ\n uArr = dataU\n\n # Perform RM-synthesis on the cube\n FDFcube, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,\n dataU = uArr,\n lambdaSqArr_m2 = lambdaSqArr_m2,\n phiArr_radm2 = phiArr_radm2,\n weightArr = weightArr,\n nBits = 32,\n verbose = verbose)\n # Calculate the Rotation Measure Spread Function cube\n if not_rmsf is not True:\n RMSFcube, phi2Arr_radm2, fwhmRMSFCube, fitStatArr = \\\n get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,\n phiArr_radm2 = phiArr_radm2,\n weightArr = weightArr,\n mskArr = ~np.isfinite(dataQ),\n lam0Sq_m2 = lam0Sq_m2,\n double = True,\n fitRMSF = fitRMSF,\n fitRMSFreal = False,\n nBits = 32,\n verbose = verbose,\n log = log)\n endTime = time.time()\n cputime = (endTime - startTime)\n if(verbose): log(\"> RM-synthesis completed in %.2f seconds.\" % cputime)\n\n\n # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model\n # Note: the Stokes I model MUST be continuous throughout the cube,\n # i.e., no NaNs as the amplitude at freq0_Hz is interpolated from the\n # nearest two planes.\n freq0_Hz = C / m.sqrt(lam0Sq_m2)\n if dataI is not None:\n idx = np.abs(freqArr_Hz - freq0_Hz).argmin()\n if freqArr_Hz[idx]<freq0_Hz:\n Ifreq0Arr = interp_images(dataI[idx, :, :], dataI[idx+1, :, :], f=0.5)\n elif freqArr_Hz[idx]>freq0_Hz:\n Ifreq0Arr = interp_images(dataI[idx-1, :, :], dataI[idx, :, :], f=0.5)\n else:\n Ifreq0Arr = dataI[idx, :, :]\n\n # Multiply the dirty FDF by Ifreq0 to recover the PI\n FDFcube *= Ifreq0Arr\n\n\n if not_rmsf:\n dataArr = [FDFcube, phiArr_radm2, lam0Sq_m2, lambdaSqArr_m2]\n\n else:\n dataArr = [FDFcube, phiArr_radm2, RMSFcube, phi2Arr_radm2, fwhmRMSFCube,fitStatArr, lam0Sq_m2, lambdaSqArr_m2]\n\n return dataArr\n\ndef writefits(dataArr, headtemplate, fitRMSF=False, prefixOut=\"\", outDir=\"\",\n nBits = 32, write_seperate_FDF=False, not_rmsf=True, verbose=True, log=print):\n \"\"\"Write data to disk in FITS\n\n\n Output files:\n Default:\n FDF_dirty.fits: FDF, in 3 extensions: Q,U, and PI.\n FDF_maxPI.fits: 2D map of peak polarized intensity per pixel.\n FDF_peakRM.fits: 2D map of Faraday depth of highest peak, per pixel.\n RMSF.fits: 4 extensions; first 3 are RMSF cubes [Q, U, PI]\n 4th is 2D map of RMSF FWHM.\n write_seperate_FDF=True:\n FDF_dirty.fits is split into three constituent components:\n FDF_real_dirty.fits: Stokes Q\n FDF_im_dirty.fits: Stokes U\n FDF_tot_dirty.fits: Polarizd Intensity (sqrt(Q^2+U^2))\n\n Args:\n dataArr (list): FDF and RMSF information\n if not_rmsf:\n dataArr = [FDFcube, phiArr_radm2, lam0Sq_m2, lambdaSqArr_m2]\n else:\n dataArr = [FDFcube, phiArr_radm2, RMSFcube, phi2Arr_radm2, fwhmRMSFCube,fitStatArr, lam0Sq_m2, lambdaSqArr_m2]\n\n headtemplate: FITS header template\n\n Kwargs:\n fitRMSF (bool): Fit a Gaussian to the RMSF?\n prefixOut (str): Prefix for filenames.\n outDir (str): Directory to save files.\n write_seperate_FDF (bool): Write Q, U, and PI separately?\n verbose (bool): Verbosity.\n not_rmsf (bool): Just do RM synthesis and ignore RMSF?\n log (function): Which logging function to use.\n\n \"\"\"\n if not_rmsf:\n FDFcube, phiArr_radm2, lam0Sq_m2, lambdaSqArr_m2 = dataArr\n\n else:\n FDFcube, phiArr_radm2, RMSFcube, phi2Arr_radm2, fwhmRMSFCube,fitStatArr, lam0Sq_m2, lambdaSqArr_m2 = dataArr\n\n # Default data types\n dtFloat = \"float\" + str(nBits)\n dtComplex = \"complex\" + str(2*nBits)\n\n if(verbose): log(\"Saving the dirty FDF, RMSF and ancillary FITS files.\")\n # Make a copy of the Q header and alter frequency-axis as Faraday depth\n header = headtemplate.copy()\n Ndim=header['NAXIS']\n freq_axis=Ndim #If frequency axis not found, assume it's the last one.\n #Check for frequency axes. Because I don't know what different formatting\n #I might get ('FREQ' vs 'OBSFREQ' vs 'Freq' vs 'Frequency'), convert to\n #all caps and check for 'FREQ' anywhere in the axis name.\n for i in range(1,Ndim+1):\n try:\n if 'FREQ' in header['CTYPE'+str(i)].upper():\n freq_axis=i\n except:\n pass #The try statement is needed for if the FITS header does not\n # have CTYPE keywords.\n\n\n header[\"NAXIS\"+str(freq_axis)] = phiArr_radm2.size\n header[\"CTYPE\"+str(freq_axis)] = (\"FDEP\", 'Faraday depth (linear)')\n header[\"CDELT\"+str(freq_axis)] = (np.diff(phiArr_radm2)[0], '[rad/m^2] Coordinate increment at reference point')\n header[\"CRPIX\"+str(freq_axis)] = 1.0\n header[\"CRVAL\"+str(freq_axis)] = (phiArr_radm2[0], '[rad/m^2] Coordinate value at reference point')\n header[\"CUNIT\"+str(freq_axis)] = \"rad/m^2\"\n header[\"LAMSQ0\"] = (lam0Sq_m2,'Lambda^2_0, in m^2')\n if \"DATAMAX\" in header:\n del header[\"DATAMAX\"]\n if \"DATAMIN\" in header:\n del header[\"DATAMIN\"]\n\n if outDir=='': #To prevent code breaking if file is in current directory\n outDir='.'\n\n #Re-add any initially removed degenerate axes (to match with FITS header)\n #NOTE THIS HAS NOT BEEN RIGOROUSLY TESTED!!!\n output_axes=[]\n for i in range(1,Ndim+1):\n output_axes.append(header['NAXIS'+str(i)]) #Get FITS dimensions\n del output_axes[freq_axis-1] #Remove frequency axis (since it's first in the array)\n output_axes.reverse() #To get into numpy order.\n #Put frequency axis first, and reshape to add degenerate axes:\n FDFcube=np.reshape(FDFcube,[FDFcube.shape[0]]+output_axes)\n if not_rmsf is not True: RMSFcube=np.reshape(RMSFcube,[RMSFcube.shape[0]]+output_axes)\n\n\n #Move Faraday depth axis to appropriate position to match header.\n FDFcube=np.moveaxis(FDFcube,0,Ndim-freq_axis)\n if not_rmsf is not True: RMSFcube=np.moveaxis(RMSFcube,0,Ndim-freq_axis)\n\n\n if(write_seperate_FDF):\n hdu0 = pf.PrimaryHDU(FDFcube.real.astype(dtFloat), header)\n hdu1 = pf.PrimaryHDU(FDFcube.imag.astype(dtFloat), header)\n hdu2 = pf.PrimaryHDU(np.abs(FDFcube).astype(dtFloat), header)\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_real_dirty.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu0.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_im_dirty.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu1.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_tot_dirty.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu2.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n else:\n # Save the dirty FDF\n hdu0 = pf.PrimaryHDU(FDFcube.real.astype(dtFloat), header)\n hdu1 = pf.ImageHDU(FDFcube.imag.astype(dtFloat), header)\n hdu2 = pf.ImageHDU(np.abs(FDFcube).astype(dtFloat), header)\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_dirty.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hduLst = pf.HDUList([hdu0, hdu1, hdu2])\n hduLst.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n hduLst.close()\n\n #Header for outputs that are RM maps (peakRM, RMSF_FWHM)\n\n\n # Save the RMSF\n if not_rmsf is not True:\n header[\"NAXIS\"+str(freq_axis)] = phi2Arr_radm2.size\n header[\"CRVAL\"+str(freq_axis)] = (phi2Arr_radm2[0],'[rad/m^2] Coordinate value at reference point')\n header[\"DATAMAX\"] = np.max(fwhmRMSFCube) + 1\n header[\"DATAMIN\"] = np.max(fwhmRMSFCube) - 1\n rmheader=header.copy()\n rmheader['BUNIT']='rad/m^2'\n #Because there can be problems with different axes having different FITS keywords,\n #don't try to remove the FD axis, but just make it degenerate.\n # Also requires np.expand_dims to set the correct NAXIS.\n rmheader[\"NAXIS\"+str(freq_axis)] = 1\n rmheader[\"CRVAL\"+str(freq_axis)] = phiArr_radm2[0]\n\n if(write_seperate_FDF):\n hdu0 = pf.PrimaryHDU(RMSFcube.real.astype(dtFloat), header)\n hdu1 = pf.PrimaryHDU(RMSFcube.imag.astype(dtFloat), header)\n hdu2 = pf.PrimaryHDU(np.abs(RMSFcube).astype(dtFloat), header)\n hdu3 = pf.PrimaryHDU(np.expand_dims(fwhmRMSFCube.astype(dtFloat), axis=0),\n rmheader)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"RMSF_real.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu0.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"RMSF_im.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu1.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"RMSF_tot.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu2.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n fitsFileOut = outDir + \"/\" + prefixOut + \"RMSF_FWHM.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n hdu3.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n\n else:\n fitsFileOut = outDir + \"/\" + prefixOut + \"RMSF.fits\"\n hdu0 = pf.PrimaryHDU(RMSFcube.real.astype(dtFloat), header)\n hdu1 = pf.ImageHDU(RMSFcube.imag.astype(dtFloat), header)\n hdu2 = pf.ImageHDU(np.abs(RMSFcube).astype(dtFloat), header)\n hdu3 = pf.ImageHDU(fwhmRMSFCube.astype(dtFloat), rmheader)\n hduLst = pf.HDUList([hdu0, hdu1, hdu2, hdu3])\n if(verbose): log(\"> %s\" % fitsFileOut)\n hduLst.writeto(fitsFileOut, output_verify=\"fix\", overwrite=True)\n hduLst.close()\n\n\n\n #Because there can be problems with different axes having different FITS keywords,\n #don't try to remove the FD axis, but just make it degenerate.\n # Also requires np.expand_dims to set the correct NAXIS.\n header[\"NAXIS\"+str(freq_axis)] = 1\n header[\"CRVAL\"+str(freq_axis)] = (phiArr_radm2[0], '[rad/m^2] Coordinate value at reference point')\n if \"DATAMAX\" in header:\n del header[\"DATAMAX\"]\n if \"DATAMIN\" in header:\n del header[\"DATAMIN\"]\n\n\n\n # Save a maximum polarised intensity map\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_maxPI.fits\"\n if(verbose): log(\"> %s\" % fitsFileOut)\n pf.writeto(fitsFileOut,\n np.expand_dims(np.max(np.abs(FDFcube), Ndim-freq_axis).astype(dtFloat), axis=0),\n header,\n overwrite=True, output_verify=\"fix\")\n # Save a peak RM map\n fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_peakRM.fits\"\n header[\"BUNIT\"] = \"rad/m^2\"\n peakFDFmap = np.argmax(np.abs(FDFcube), Ndim-freq_axis).astype(dtFloat)\n peakFDFmap = header[\"CRVAL\"+str(freq_axis)] + (peakFDFmap + 1\n - header[\"CRPIX\"+str(freq_axis)]) * header[\"CDELT\"+str(freq_axis)]\n if(verbose): log(\"> %s\" % fitsFileOut)\n pf.writeto(fitsFileOut, np.expand_dims(peakFDFmap,axis=0), header, overwrite=True,\n output_verify=\"fix\")\n\n# #Cameron: I've removed the moment 1 map for now because I don't think it's properly/robustly defined.\n# # Save an RM moment-1 map\n# fitsFileOut = outDir + \"/\" + prefixOut + \"FDF_mom1.fits\"\n# header[\"BUNIT\"] = \"rad/m^2\"\n# mom1FDFmap = (np.nansum(np.moveaxis(np.abs(FDFcube),FDFcube.ndim-freq_axis,FDFcube.ndim-1) * phiArr_radm2, FDFcube.ndim-1)\n# /np.nansum(np.abs(FDFcube), FDFcube.ndim-freq_axis))\n# mom1FDFmap = mom1FDFmap.astype(dtFloat)\n# if(verbose): log(\"> %s\" % fitsFileOut)\n# pf.writeto(fitsFileOut, mom1FDFmap, header, overwrite=True,\n# output_verify=\"fix\")\n\n\n\n\ndef readFitsCube(file, verbose, log = print):\n \"\"\"The old version of this function could only accept 3 or 4 axis input\n (and implicitly assumed that in the 4 axis case that axis 3 was degenerate).\n I'm trying to somewhat generalize this, so that it will accept NAXIS=1..3\n cases and automatically try to identify which axis is the frequency axis,\n and will try to remove the degenerate axis in the 4D case.\n Where it can't find the correct frequency axis, it will assume it is the\n last one. It assumes any fourth or higher dimensions are degenerate (length 1)\n and will remove them. If the higher dimensions are NOT degenerate (e.g., a\n cube with all 4 Stokes), the code will fail (support may be added later?).\n -Cameron (3 April 2019)\n \"\"\"\n if not os.path.exists(file):\n log(\"Err: File not found\")\n\n if(verbose): log(\"Reading \" + file + \" ...\")\n data = pf.getdata(file)\n head = pf.getheader(file)\n if(verbose): log(\"done.\")\n\n N_dim=head['NAXIS'] #Get number of axes\n if verbose:\n print('Dimensions of the input cube are: ',end=' ')\n for i in range(1,N_dim+1):\n print('NAXIS{} = {}'.format(i,head['NAXIS'+str(i)]),end=' ')\n print()\n\n freq_axis=0 #Default for 'frequency axis not identified'\n #Check for frequency axes. Because I don't know what different formatting\n #I might get ('FREQ' vs 'OBSFREQ' vs 'Freq' vs 'Frequency'), convert to\n #all caps and check for 'FREQ' anywhere in the axis name.\n for i in range(1,N_dim+1):\n try:\n if 'FREQ' in head['CTYPE'+str(i)].upper():\n freq_axis=i\n except:\n pass #The try statement is needed for if the FITS header does not\n # have CTYPE keywords.\n\n #If the frequency axis isn't the last one, rotate the array until it is.\n #Recall that pyfits reverses the axis ordering, so we want frequency on\n #axis 0 of the numpy array.\n if freq_axis != 0 and freq_axis != N_dim:\n data=np.moveaxis(data,N_dim-freq_axis,0)\n\n if N_dim >= 4:\n data=np.squeeze(data) #Remove degenerate axes\n\n if verbose:\n print('Dimensions of the input array are: ',data.shape)\n\n if data.ndim > 3:\n raise Exception('Data cube has too many (non-degenerate) axes!')\n\n return head, data\n\n\ndef readFreqFile(file, verbose, log = print):\n # Read the frequency vector and wavelength sampling\n freqArr_Hz = np.loadtxt(file, dtype=float)\n return freqArr_Hz\n\n#-----------------------------------------------------------------------------#\ndef main():\n import argparse\n\n \"\"\"\n Start the function to perform RM-synthesis if called from the command line.\n \"\"\"\n\n # Help string to be shown using the -h option\n descStr = \"\"\"\n Run RM-synthesis on a pair of Stokes Q and U cubes (3D). This script\n correctly deals with isolated clumps of flagged voxels in the cubes (NaNs).\n Saves cubes containing the complex Faraday dispersion function (FDF), a\n cube of double-size Rotation Measure Spread Functions, a peak Faraday\n depth map, a first-moment map and a maximum polarised intensity map.\n \"\"\"\n\n epilog_text=\"\"\"\n Output files:\n Default:\n FDF_real_dirty.fits: real (Stokes Q) component of the FDF\n FDF_im_dirty.fits: imaginary (Stokes U) component of the FDF\n FDF_tot_dirty.fits: polarized intnsity (Stokes P) component of the FDF\n FDF_maxPI.fits: 2D map of peak polarized intensity per pixel.\n FDF_peakRM.fits: 2D map of Faraday depth of highest peak, per pixel.\n RMSF_real_dirty.fits: real (Stokes Q) component of the RMSF\n RMSF_im_dirty.fits: imaginary (Stokes U) component of the RMSF\n RMSF_tot_dirty.fits: polarized intnsity (Stokes P) component of the RMSF\n RMSF_FWHM: 2D map of RMSF FWHM per pixel.\n\n With -f flag, the 3 FDF cubes are combined in a single file with 3 extensions.\n and the RMSF files are combined in a single file with 4 extensions.\n\n \"\"\"\n\n # Parse the command line options\n parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"fitsQ\", metavar=\"StokesQ.fits\", nargs=1,\n help=\"FITS cube containing Stokes Q data.\")\n parser.add_argument(\"fitsU\", metavar=\"StokesU.fits\", nargs=1,\n help=\"FITS cube containing Stokes U data.\")\n parser.add_argument(\"freqFile\", metavar=\"freqs_Hz.dat\", nargs=1,\n help=\"ASCII file containing the frequency vector.\")\n parser.add_argument(\"-i\", dest=\"fitsI\", default=None,\n help=\"FITS cube containing Stokes I model [None].\")\n parser.add_argument(\"-n\", dest=\"noiseFile\", default=None,\n help=\"FITS file or cube containing noise values [None].\")\n parser.add_argument(\"-w\", dest=\"weightType\", default=\"uniform\",\n help=\"weighting [uniform] (all 1s) or 'variance'.\")\n parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\",\n help=\"Fit a Gaussian to the RMSF [False]\")\n parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None,\n help=\"Absolute max Faraday depth sampled (overrides NSAMPLES) [Auto].\")\n parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None,\n help=\"Width of Faraday depth channel [Auto].\")\n parser.add_argument(\"-o\", dest=\"prefixOut\", default=\"\",\n help=\"Prefix to prepend to output files [None].\")\n parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=5,\n help=\"Number of samples across the FWHM RMSF.\")\n parser.add_argument(\"-f\", dest=\"write_seperate_FDF\", action=\"store_false\",\n help=\"Store different Stokes as FITS extensions [False, store as seperate files].\")\n parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\",\n help=\"Verbose [False].\")\n parser.add_argument(\"-R\", dest=\"not_RMSF\", action=\"store_true\",\n help=\"Skip calculation of RMSF? [False]\")\n args = parser.parse_args()\n\n # Sanity checks\n for f in args.fitsQ + args.fitsU:\n if not os.path.exists(f):\n print(\"File does not exist: '%s'.\" % f)\n sys.exit()\n dataDir, dummy = os.path.split(args.fitsQ[0])\n verbose=args.verbose\n if args.fitsI is not None:\n dataI = readFitsCube(args.fitsI, verbose)[1]\n else:\n dataI=None\n if args.noiseFile is not None:\n rmsArr = readFreqFile(args.noiseFile, verbose)\n else:\n rmsArr=None\n\n header,dataQ = readFitsCube(args.fitsQ[0], verbose)\n\n # Run RM-synthesis on the cubes\n dataArr = run_rmsynth(dataQ = dataQ,\n dataU = readFitsCube(args.fitsU[0], verbose)[1],\n freqArr_Hz = readFreqFile(args.freqFile[0], verbose),\n dataI = dataI,\n rmsArr = rmsArr,\n phiMax_radm2 = args.phiMax_radm2,\n dPhi_radm2 = args.dPhi_radm2,\n nSamples = args.nSamples,\n weightType = args.weightType,\n fitRMSF = args.fitRMSF,\n nBits = 32,\n verbose = verbose,\n not_rmsf = args.not_RMSF)\n\n # Write to files\n writefits(dataArr,\n headtemplate = header,\n fitRMSF = False,\n prefixOut = args.prefixOut,\n outDir = dataDir,\n write_seperate_FDF = args.write_seperate_FDF,\n not_rmsf = args.not_RMSF,\n nBits = 32,\n verbose = verbose)\n\n\n#-----------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.nanmax",
"numpy.true_divide",
"numpy.expand_dims",
"numpy.abs",
"numpy.isfinite",
"numpy.power",
"numpy.reshape",
"numpy.squeeze",
"numpy.nanmin",
"numpy.ones",
"numpy.max",
"numpy.diff",
"numpy.moveaxis",
"numpy.errstate",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChiamYu/cameo | [
"13571b93bdd195d6d39a9ec43180916a1ff4490a"
] | [
"cameo/strain_design/heuristic/evolutionary_based.py"
] | [
"# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import absolute_import, print_function\n\nimport logging\n\nimport inspyred\nimport numpy\nfrom IProgress.progressbar import ProgressBar\nfrom IProgress.widgets import Bar, Percentage\nfrom pandas import DataFrame\n\nfrom cobra import Model\nfrom cameo.core.strain_design import StrainDesignMethod, StrainDesignMethodResult, StrainDesign\nfrom cameo.core.target import ReactionKnockoutTarget, GeneKnockoutTarget, ReactionCofactorSwapTarget\nfrom cameo.core.manipulation import swap_cofactors\nfrom cobra.exceptions import OptimizationError\nfrom cameo.flux_analysis.analysis import phenotypic_phase_plane\nfrom cameo.flux_analysis.simulation import fba\nfrom cameo.strain_design.heuristic.evolutionary.archives import ProductionStrainArchive\nfrom cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_min_yield, \\\n biomass_product_coupled_yield\nfrom cameo.strain_design.heuristic.evolutionary.optimization import GeneKnockoutOptimization, \\\n ReactionKnockoutOptimization, CofactorSwapOptimization, NADH_NADPH\nfrom cameo.strain_design.heuristic.evolutionary.processing import process_reaction_knockout_solution, \\\n process_gene_knockout_solution, process_reaction_swap_solution\nfrom cameo.util import TimeMachine\nfrom cameo.visualization.plotting import plotter\nfrom cameo.core.utils import get_reaction_for\n\n__all__ = [\"OptGene\"]\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OptGene(StrainDesignMethod):\n def __init__(self, model, evolutionary_algorithm=inspyred.ec.GA, manipulation_type=\"genes\", essential_genes=None,\n essential_reactions=None, plot=True, exclude_non_gene_reactions=True, *args, **kwargs):\n if not isinstance(model, Model):\n raise TypeError(\"Argument 'model' should be of type 'cobra.Model'.\")\n\n super(OptGene, self).__init__(*args, **kwargs)\n\n if exclude_non_gene_reactions:\n essential_reactions = essential_reactions or []\n essential_reactions += [r for r in model.reactions if not r.genes]\n\n self._model = model\n self._algorithm = evolutionary_algorithm\n self._optimization_algorithm = None\n self._manipulation_type = None\n self._essential_genes = essential_genes\n self._essential_reactions = essential_reactions\n self._plot = plot\n self._manipulation_type = manipulation_type\n\n @property\n def manipulation_type(self):\n return self._manipulation_type\n\n @property\n def plot(self):\n return self._plot\n\n @plot.setter\n def plot(self, plot):\n self._plot = plot\n if self._optimization_algorithm is not None:\n self._optimization_algorithm.plot = plot\n\n @manipulation_type.setter\n def manipulation_type(self, manipulation_type):\n self._manipulation_type = manipulation_type\n\n def run(self, target=None, biomass=None, substrate=None, max_knockouts=5, variable_size=True,\n simulation_method=fba, growth_coupled=False, max_evaluations=20000, population_size=200,\n max_results=50, use_nullspace_simplification=True, seed=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n target : str, Metabolite or Reaction\n The design target\n biomass : str, Metabolite or Reaction\n The biomass definition in the model\n substrate : str, Metabolite or Reaction\n The main carbon source\n max_knockouts : int\n Max number of knockouts allowed\n variable_size : bool\n If true, all candidates have the same size. Otherwise the candidate size can be from 1 to max_knockouts.\n simulation_method : function\n Any method from cameo.flux_analysis.simulation or equivalent\n growth_coupled : bool\n If true will use the minimum flux rate to compute the fitness\n max_evaluations : int\n Number of evaluations before stop\n population_size : int\n Number of individuals in each generation\n max_results : int\n Max number of different designs to return if found.\n kwargs : dict\n Arguments for the simulation method.\n seed : int\n A seed for random.\n use_nullspace_simplification : Boolean (default True)\n Use a basis for the nullspace to find groups of reactions whose fluxes are multiples of each other and dead\n end reactions. From each of these groups only 1 reaction will be included as a possible knockout.\n\n\n Returns\n -------\n OptGeneResult\n \"\"\"\n\n target = get_reaction_for(self._model, target)\n biomass = get_reaction_for(self._model, biomass)\n substrate = get_reaction_for(self._model, substrate)\n\n if growth_coupled:\n objective_function = biomass_product_coupled_min_yield(biomass, target, substrate)\n else:\n objective_function = biomass_product_coupled_yield(biomass, target, substrate)\n if self.manipulation_type == \"genes\":\n optimization_algorithm = GeneKnockoutOptimization(\n model=self._model,\n heuristic_method=self._algorithm,\n essential_genes=self._essential_genes,\n plot=self.plot,\n objective_function=objective_function,\n use_nullspace_simplification=use_nullspace_simplification)\n elif self.manipulation_type == \"reactions\":\n optimization_algorithm = ReactionKnockoutOptimization(\n model=self._model,\n heuristic_method=self._algorithm,\n essential_reactions=self._essential_reactions,\n plot=self.plot,\n objective_function=objective_function,\n use_nullspace_simplification=use_nullspace_simplification)\n else:\n raise ValueError(\"Invalid manipulation type %s\" % self.manipulation_type)\n optimization_algorithm.simulation_kwargs = kwargs\n optimization_algorithm.simulation_method = simulation_method\n optimization_algorithm.archiver = ProductionStrainArchive()\n\n result = optimization_algorithm.run(max_evaluations=max_evaluations,\n pop_size=population_size,\n max_size=max_knockouts,\n variable_size=variable_size,\n maximize=True,\n max_archive_size=max_results,\n seed=seed,\n **kwargs)\n\n kwargs.update(optimization_algorithm.simulation_kwargs)\n\n return OptGeneResult(self._model, result, objective_function, simulation_method, self.manipulation_type,\n biomass, target, substrate, kwargs)\n\n\nclass OptGeneResult(StrainDesignMethodResult):\n __method_name__ = \"OptGene\"\n\n __aggregation_function = {\n \"genes\": lambda x: tuple(tuple(e for e in elements) for elements in x.values)\n }\n\n def __init__(self, model, knockouts, objective_function, simulation_method, manipulation_type,\n biomass, target, substrate, simulation_kwargs, *args, **kwargs):\n super(OptGeneResult, self).__init__(self._generate_designs(knockouts, manipulation_type), *args, **kwargs)\n assert isinstance(model, Model)\n\n self._model = model\n self._knockouts = knockouts\n self._objective_function = objective_function\n self._simulation_method = simulation_method\n self._manipulation_type = manipulation_type\n self._biomass = biomass\n self._target = target\n self._substrate = substrate\n self._processed_solutions = None\n self._simulation_kwargs = simulation_kwargs\n\n @staticmethod\n def _generate_designs(knockouts, manipulation_type):\n designs = []\n if manipulation_type == \"reactions\":\n target_class = ReactionKnockoutTarget\n elif manipulation_type == \"genes\":\n target_class = GeneKnockoutTarget\n else:\n raise ValueError(\"Invalid 'manipulation_type' %s\" % manipulation_type)\n\n for knockout_design, _ in knockouts:\n designs.append(StrainDesign([target_class(ko) for ko in knockout_design]))\n\n return designs\n\n def _repr_html_(self):\n return \"\"\"\n <h3>OptGene Result</h3>\n <ul>\n <li>Simulation: %s<br/></li>\n <li>Objective Function: %s<br/></li>\n </ul>\n %s\n \"\"\" % (self._simulation_method.__name__,\n self._objective_function._repr_latex_(),\n self.data_frame._repr_html_())\n\n @property\n def data_frame(self):\n if self._processed_solutions is None:\n self._process_solutions()\n\n if self._manipulation_type == \"reactions\":\n data_frame = DataFrame(self._processed_solutions)\n else:\n columns = self._processed_solutions.columns.difference([\"reactions\", \"size\"])\n aggregation_functions = {k: self.__aggregation_function.get(k, lambda x: x.values[0]) for k in columns}\n data_frame = self._processed_solutions.groupby([\"reactions\", \"size\"], as_index=False) \\\n .aggregate(aggregation_functions)\n data_frame = data_frame[self._processed_solutions.columns]\n\n data_frame.sort_values(\"size\", inplace=True)\n data_frame.index = [i for i in range(len(data_frame))]\n return data_frame\n\n def _process_solutions(self):\n if self._manipulation_type == \"reactions\":\n self._process_reaction_knockout_solutions()\n elif self._manipulation_type == \"genes\":\n self._process_gene_knockout_solutions()\n\n def _process_gene_knockout_solutions(self):\n processed_solutions = DataFrame(columns=[\"reactions\", \"genes\", \"size\", \"fva_min\", \"fva_max\",\n \"target_flux\", \"biomass_flux\", \"yield\", \"fitness\"])\n\n if len(self._knockouts) == 0:\n logger.warn(\"No solutions found\")\n self._processed_solutions = processed_solutions\n\n else:\n progress = ProgressBar(maxval=len(self._knockouts), widgets=[\"Processing solutions: \", Bar(), Percentage()])\n for i, solution in progress(enumerate(self._knockouts)):\n try:\n processed_solutions.loc[i] = process_gene_knockout_solution(\n self._model, solution[0], self._simulation_method, self._simulation_kwargs, self._biomass,\n self._target, self._substrate, self._objective_function)\n except OptimizationError as e:\n logger.error(e)\n processed_solutions.loc[i] = [numpy.nan for _ in processed_solutions.columns]\n\n self._processed_solutions = processed_solutions\n\n def _process_reaction_knockout_solutions(self):\n processed_solutions = DataFrame(columns=[\"reactions\", \"size\", \"fva_min\", \"fva_max\",\n \"target_flux\", \"biomass_flux\", \"yield\", \"fitness\"])\n\n if len(self._knockouts) == 0:\n logger.warn(\"No solutions found\")\n self._processed_solutions = processed_solutions\n\n else:\n progress = ProgressBar(maxval=len(self._knockouts), widgets=[\"Processing solutions: \", Bar(), Percentage()])\n for i, solution in progress(enumerate(self._knockouts)):\n try:\n processed_solutions.loc[i] = process_reaction_knockout_solution(\n self._model, solution[0], self._simulation_method, self._simulation_kwargs, self._biomass,\n self._target, self._substrate, self._objective_function)\n except OptimizationError as e:\n logger.error(e)\n processed_solutions.loc[i] = [numpy.nan for _ in processed_solutions.columns]\n\n self._processed_solutions = processed_solutions\n\n def display_on_map(self, index=0, map_name=None, palette=\"YlGnBu\"):\n with self._model:\n for ko in self.data_frame.loc[index, \"reactions\"]:\n self._model.reactions.get_by_id(ko).knock_out()\n fluxes = self._simulation_method(self._model, **self._simulation_kwargs)\n fluxes.display_on_map(map_name=map_name, palette=palette)\n\n def plot(self, index=0, grid=None, width=None, height=None, title=None, palette=None, **kwargs):\n wt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])\n with self._model:\n for ko in self.data_frame.loc[index, \"reactions\"]:\n self._model.reactions.get_by_id(ko).knock_out()\n mt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])\n\n if title is None:\n title = \"Production Envelope\"\n\n dataframe = DataFrame(columns=[\"ub\", \"lb\", \"value\", \"strain\"])\n for _, row in wt_production.iterrows():\n _df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], \"WT\"]],\n columns=dataframe.columns)\n dataframe = dataframe.append(_df)\n for _, row in mt_production.iterrows():\n _df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], \"MT\"]],\n columns=dataframe.columns)\n dataframe = dataframe.append(_df)\n\n plot = plotter.production_envelope(dataframe, grid=grid, width=width, height=height, title=title,\n x_axis_label=self._biomass.id, y_axis_label=self._target.id, palette=palette)\n plotter.display(plot)\n\n\nclass HeuristicOptSwap(StrainDesignMethod):\n def __init__(self, model, evolutionary_algorithm=inspyred.ec.GA, plot=True, cofactor_id_swaps=NADH_NADPH,\n exclude_non_gene_reactions=True, *args, **kwargs):\n super(HeuristicOptSwap, self).__init__(*args, **kwargs)\n self._skip_reactions = []\n if exclude_non_gene_reactions:\n self._skip_reactions += [r for r in model.reactions if not r.genes]\n\n self._algorithm = evolutionary_algorithm\n self._swap_pairs = cofactor_id_swaps\n self._optimization_algorithm = None\n self._model = self._optimization_algorithm.model\n self._plot = plot\n\n def run(self, target=None, biomass=None, substrate=None, max_swaps=5, variable_size=True,\n simulation_method=fba, growth_coupled=False, max_evaluations=20000, population_size=200,\n time_machine=None, max_results=50, seed=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n target : str, Metabolite or Reaction\n The design target.\n biomass : str, Metabolite or Reaction\n The biomass definition in the model.\n substrate : str, Metabolite or Reaction\n The main carbon source.\n max_swaps : int\n Max number of swaps allowed.\n variable_size : bool\n If true, all candidates have the same size. Otherwise the candidate size can be from 1 to max_knockouts.\n simulation_method : function\n Any method from cameo.flux_analysis.simulation or equivalent.\n growth_coupled : bool\n If true will use the minimum flux rate to compute the fitness.\n max_evaluations : int\n Number of evaluations before stop.\n population_size : int\n Number of individuals in each generation.\n time_machine : TimeMachine\n See TimeMachine.\n max_results : int\n Max number of different designs to return if found.\n kwargs : dict\n Arguments for the simulation method.\n seed : int\n A seed for random.\n\n\n Returns\n -------\n HeuristicOptSwapResult\n \"\"\"\n\n target = get_reaction_for(self._model, target)\n biomass = get_reaction_for(self._model, biomass)\n substrate = get_reaction_for(self._model, substrate)\n\n if growth_coupled:\n objective_function = biomass_product_coupled_min_yield(biomass, target, substrate)\n else:\n objective_function = biomass_product_coupled_yield(biomass, target, substrate)\n\n optimization_algorithm = CofactorSwapOptimization(model=self._model,\n cofactor_id_swaps=self._cofactor_id_swaps,\n skip_reactions=self._skip_reactions,\n objective_function=objective_function)\n\n optimization_algorithm.simulation_kwargs = kwargs\n optimization_algorithm.simulation_method = simulation_method\n optimization_algorithm.archiver = ProductionStrainArchive()\n\n result = optimization_algorithm.run(max_evaluations=max_evaluations,\n pop_size=population_size,\n max_size=max_swaps,\n variable_size=variable_size,\n maximize=True,\n max_archive_size=max_results,\n seed=seed,\n **kwargs)\n\n kwargs.update(optimization_algorithm.simulation_kwargs)\n\n return HeuristicOptSwapResult(self._model, result, self._swap_pairs, objective_function,\n simulation_method, biomass, target, substrate, kwargs)\n\n\nclass HeuristicOptSwapResult(StrainDesignMethodResult):\n __method_name__ = \"HeuristicOptSwap\"\n\n def __init__(self, model, swaps, swap_pairs, objective_function, simulation_method, biomass, target,\n substrate, simulation_kwargs, *args, **kwargs):\n super(HeuristicOptSwapResult, self).__init__(self._generate_designs(swaps, swap_pairs), *args, **kwargs)\n assert isinstance(model, Model)\n\n self._model = model\n self._swaps = swaps\n self._swap_pairs = swap_pairs\n self._objective_function = objective_function\n self._simulation_method = simulation_method\n self._biomass = biomass\n self._target = target\n self._substrate = substrate\n self._processed_solutions = None\n self._simulation_kwargs = simulation_kwargs\n\n @staticmethod\n def _generate_designs(swaps, swap_pair):\n designs = []\n for swap_design, _ in swaps:\n designs.append(StrainDesign([ReactionCofactorSwapTarget(swap, swap_pair) for swap in swap_design]))\n\n return designs\n\n def _repr_html_(self):\n return \"\"\"\n <h3>OptSwap Result</h3>\n <ul>\n <li>Simulation: %s<br/></li>\n <li>Objective Function: %s<br/></li>\n </ul>\n %s\n \"\"\" % (self._simulation_method.__name__,\n self._objective_function._repr_latex_(),\n self.data_frame._repr_html_())\n\n @property\n def data_frame(self):\n if self._processed_solutions is None:\n self._process_solutions()\n\n data_frame = DataFrame(self._processed_solutions)\n\n data_frame.sort_values(\"size\", inplace=True)\n data_frame.index = [i for i in range(len(data_frame))]\n return data_frame\n\n def _process_solutions(self):\n processed_solutions = DataFrame(columns=[\"reactions\", \"size\", \"fva_min\", \"fva_max\",\n \"target_flux\", \"biomass_flux\", \"yield\", \"fitness\"])\n\n if len(self._swaps) == 0:\n logger.warn(\"No solutions found\")\n self._processed_solutions = processed_solutions\n\n else:\n progress = ProgressBar(maxval=len(self._swaps), widgets=[\"Processing solutions: \", Bar(), Percentage()])\n for i, solution in progress(enumerate(self._swaps)):\n try:\n processed_solutions.loc[i] = process_reaction_swap_solution(\n self._model, solution[0], self._simulation_method, self._simulation_kwargs, self._biomass,\n self._target, self._substrate, self._objective_function, self._swap_pairs)\n except OptimizationError as e:\n logger.error(e)\n processed_solutions.loc[i] = [numpy.nan for _ in processed_solutions.columns]\n\n self._processed_solutions = processed_solutions\n\n def display_on_map(self, index=0, map_name=None, palette=\"YlGnBu\"):\n with self._model:\n for ko in self.data_frame.loc[index, \"reactions\"]:\n swap_cofactors(self._model.reactions.get_by_id(ko), self._model, self._swap_pairs)\n fluxes = self._simulation_method(self._model, **self._simulation_kwargs)\n fluxes.display_on_map(map_name=map_name, palette=palette)\n\n def plot(self, index=0, grid=None, width=None, height=None, title=None, palette=None, **kwargs):\n wt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])\n with self._model:\n for ko in self.data_frame.loc[index, \"reactions\"]:\n swap_cofactors(self._model.reactions.get_by_id(ko), self._model, self._swap_pairs)\n mt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])\n\n if title is None:\n title = \"Production Envelope\"\n\n dataframe = DataFrame(columns=[\"ub\", \"lb\", \"value\", \"strain\"])\n for _, row in wt_production.iterrows():\n _df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], \"WT\"]],\n columns=dataframe.columns)\n dataframe = dataframe.append(_df)\n for _, row in mt_production.iterrows():\n _df = DataFrame([[row['objective_upper_bound'], row['objective_lower_bound'], row[self._biomass.id], \"MT\"]],\n columns=dataframe.columns)\n dataframe = dataframe.append(_df)\n\n plot = plotter.production_envelope(dataframe, grid=grid, width=width, height=height, title=title,\n x_axis_label=self._biomass.id, y_axis_label=self._target.id, palette=palette)\n plotter.display(plot)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
saketkc/moca_web | [
"38dfbdd9eeb739322ff3722727e43f1f4da07d3f"
] | [
"src/bedoperations/model.py"
] | [
"import os\nfrom ..helpers import MocaException\nimport pandas\nfrom pybedtools import BedTool\n\n__NARROWPEAK_COLUMNS__ = ['chrom', 'chromStart', 'chromEnd',\n 'name', 'score', 'strand',\n 'signalValue', 'p-value', 'q-value']\n\n__BROADPEAK_COLUMNS__ = ['chrom', 'chromStart', 'chromEnd',\n 'name', 'score', 'strand',\n 'signalValue', 'p-value', 'q-value', 'peak']\n\n__BED_TYPES__ = {10: 'narrowPeak',\n 9: 'broadPeak',\n 3: 'questPeak'}\n\n__BED_COLUMN_MAPPING__ = {9: __NARROWPEAK_COLUMNS__,\n 10: __BROADPEAK_COLUMNS__}\n\nclass Bedfile(object):\n \"\"\"Class to crate a bed file object\n Parameters\n ----------\n filepath: string\n Absolute path to bedfile\n\n genome_table: string\n Absolute path to geonme chromosome size file\n \"\"\"\n def __init__(self, filepath, genome_table):\n self.filepath = filepath\n self.bed_format = None\n if not os.path.isfile(filepath):\n raise MocaException('Bed file {} not found'.format(self.filepath))\n self._read()\n self.bed_format = self.guess_bedformat()\n self.sort_bed()\n self.bed = BedTool(filepath)\n self.genome_table = genome_table\n assert self.bed_Format is not None\n\n def _read(self):\n try:\n self.bed_df = pandas.read_table(self.filepath,\n header=None)\n except Exception as e:\n raise MocaException('Error reading bed file {}'.format(self.filepath),\n 'Traceback: {}'.format(e))\n\n def guess_bedformat(self):\n \"\"\"Method to guess bed format\n Returns\n -------\n bed_format: string\n BED format\n\n Example:\n >>> bed_df = Bedfile('file.bed')\n >>> print(bed_df.guess_bed_format())\n\n \"\"\"\n self.bed_columns = self.bed_df.columns\n count = len(self.bed_columns)\n try:\n bed_format = __BED_TYPES__[count]\n except KeyError:\n raise MocaException('Bed file had {} columns. Supported column lengths are {}')\n return bed_format\n\n def slop_bed(self, flank_length=5):\n \"\"\"Add flanking sequences to bed file\n Parameters\n ----------\n flank_length: int\n the bed region is expanded in both direction by flank_length number of bases\n Returns\n -------\n slop_bed: dataframe\n Slopped bed data object\n \"\"\"\n self.bed.slop(g=self.genome_table,\n b=flank_length\n )\n\n def convert_to_scorefile(self):\n\n \"\"\"\n filename, file_extension = os.path.splitext(self.filepath)\n filename += '.sorted'\n self.bed_df.to_csv(filename+file_extension,\n sep='\\t',\n columns=['chrom', 'peak_positions', 'score'],\n index=False,\n header=False)\n \"\"\"\n if filetype=='narrowPeak':\n filter_df1 = df[df.peak.astype(int)==-1]\n filter_df2 = df[df.peak.astype(int)!=-1]\n filter_df1['peak_positions'] = (filter_df1['chromStart'].astype(int)+filter_df1['chromEnd'].astype(int))\n filter_df1['peak_positions'] = [int(x/2) for x in filter_df1['peak_positions'].astype(int)]\n filter_df2['peak_positions'] = filter_df2['chromStart'].astype(int)+filter_df2['peak'].astype(int)\n df = pandas.concat([filter_df1, filter_df2])\n else:\n df['peak_positions'] = (df['chromStart']+df['chromEnd'])\n df['peak_positions'] = [int(x/2) for x in df['peak_positions'].astype(int)]\n\n\n\n def extract_fasta(self, fasta_file):\n \"\"\"Extract fasta of bed regions\n Parameters\n ----------\n fasta_file: string\n Absolute path to location of fasta file\n Returns\n -------\n fasta: string\n Fasta sequence combined\n \"\"\"\n self.bed.sequence(fi=fasta_file)\n\n\n def sort_by(self, columns=None, ascending=False):\n \"\"\"Method to sort columns of bedfiles\n Parameters\n ----------\n columns: list\n list of column names to sort by\n ascending: bool\n Sort order(Default: true)\n\n Returns\n -------\n sorted_bed_df: dataframe\n dataframe with sorted columns\n \"\"\"\n assert type(columns) is list\n return self.bed_df.sort(columns, ascending)\n"
] | [
[
"pandas.read_table",
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Spaceenter/OpenFermion | [
"c1bf76582ec94373333d95fc27d1b92248ba3efd"
] | [
"src/openfermion/transforms/_projection.py"
] | [
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions to reduce the number of qubits involved in modeling a given system.\n\"\"\"\n\nimport numpy\n\nfrom openfermion.ops import QubitOperator\n\n\ndef project_onto_sector(operator, qubits, sectors):\n '''\n Takes a QubitOperator, and projects out a list\n of qubits, into either the +1 or -1 sector.\n Note - this requires knowledge of which sector\n we wish to project into.\n\n Args:\n operator: the QubitOperator to work on\n qubits: a list of indices of qubits in\n operator to remove\n sectors: for each qubit, whether to project\n into the 0 subspace (<Z>=1) or the\n 1 subspace (<Z>=-1).\n\n Returns:\n projected_operator: the resultant operator\n '''\n if type(operator) is not QubitOperator:\n raise ValueError('''Input operator must be a QubitOperator''')\n\n projected_operator = QubitOperator()\n for term, factor in operator.terms.items():\n\n # Any term containing X or Y on the removed\n # qubits has an expectation value of zero\n if [t for t in term if t[0] in qubits\n and t[1] in ['X', 'Y']]:\n continue\n\n new_term = tuple((t[0]-len([q for q in qubits if q < t[0]]), t[1])\n for t in term if t[0] not in qubits)\n new_factor =\\\n factor * (-1)**(sum([sectors[qubits.index(t[0])]\n for t in term if t[0] in qubits]))\n projected_operator += QubitOperator(new_term, new_factor)\n\n return projected_operator\n\n\ndef projection_error(operator, qubits, sectors):\n '''\n Calculates the error from the project_onto_sector function.\n\n Args:\n operator: the QubitOperator to work on\n qubits: a list of indices of qubits in\n operator to remove\n sectors: for each qubit, whether to project\n into the 0 subspace (<Z>=1) or the\n 1 subspace (<Z>=-1).\n\n Returns:\n error: the trace norm of the removed term.\n '''\n if type(operator) is not QubitOperator:\n raise ValueError('''Input operator must be a QubitOperator''')\n\n error = 0\n for term, factor in operator.terms.items():\n\n # Any term containing X or Y on the removed\n # qubits contributes to the error\n if [t for t in term if t[0] in qubits\n and t[1] in ['X', 'Y']]:\n error += abs(factor)**2\n\n return numpy.sqrt(error)\n"
] | [
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Samsomyajit/s-atmech | [
"65e937294a498c2e7c46a825d98e8edd015dd7d5"
] | [
"s-atmech/bahdanau.py"
] | [
"import tensorflow as tf\nimport os\nfrom tensorflow.python.keras.layers import Layer\nfrom tensorflow.python.keras import backend as K\n\n\nclass AttentionLayer(Layer):\n \"\"\"\n There are three sets of weights introduced W_a, U_a, and V_a\n \"\"\"\n\n def __init__(self, **kwargs):\n super(AttentionLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert isinstance(input_shape, list)\n # Create a trainable weight variable for this layer.\n\n self.W_a = self.add_weight(name='W_a',\n shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),\n initializer='uniform',\n trainable=True)\n self.U_a = self.add_weight(name='U_a',\n shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),\n initializer='uniform',\n trainable=True)\n self.V_a = self.add_weight(name='V_a',\n shape=tf.TensorShape((input_shape[0][2], 1)),\n initializer='uniform',\n trainable=True)\n\n super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end\n\n def call(self, inputs, verbose=False):\n \"\"\"\n inputs: [encoder_output_sequence, decoder_output_sequence]\n \"\"\"\n assert type(inputs) == list\n encoder_out_seq, decoder_out_seq = inputs\n if verbose:\n print('encoder_out_seq>', encoder_out_seq.shape)\n print('decoder_out_seq>', decoder_out_seq.shape)\n\n def energy_step(inputs, states):\n \"\"\" Step function for computing energy for a single decoder state \"\"\"\n\n assert_msg = \"States must be a list. However states {} is of type {}\".format(states, type(states))\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\n\n \"\"\" Some parameters required for shaping tensors\"\"\"\n en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]\n de_hidden = inputs.shape[-1]\n\n \"\"\" Computing S.Wa where S=[s0, s1, ..., si]\"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_enc_outputs = K.reshape(encoder_out_seq, (-1, en_hidden))\n # <= batch_size*en_seq_len, latent_dim\n W_a_dot_s = K.reshape(K.dot(reshaped_enc_outputs, self.W_a), (-1, en_seq_len, en_hidden))\n if verbose:\n print('wa.s>',W_a_dot_s.shape)\n\n \"\"\" Computing hj.Ua \"\"\"\n U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim\n if verbose:\n print('Ua.h>',U_a_dot_h.shape)\n\n \"\"\" tanh(S.Wa + hj.Ua) \"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_Ws_plus_Uh = K.tanh(K.reshape(W_a_dot_s + U_a_dot_h, (-1, en_hidden)))\n if verbose:\n print('Ws+Uh>', reshaped_Ws_plus_Uh.shape)\n\n \"\"\" softmax(va.tanh(S.Wa + hj.Ua)) \"\"\"\n # <= batch_size, en_seq_len\n e_i = K.reshape(K.dot(reshaped_Ws_plus_Uh, self.V_a), (-1, en_seq_len))\n # <= batch_size, en_seq_len\n e_i = K.softmax(e_i)\n\n if verbose:\n print('ei>', e_i.shape)\n\n return e_i, [e_i]\n\n def context_step(inputs, states):\n \"\"\" Step function for computing ci using ei \"\"\"\n # <= batch_size, hidden_size\n c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)\n if verbose:\n print('ci>', c_i.shape)\n return c_i, [c_i]\n\n def create_inital_state(inputs, hidden_size):\n \n fake_state = K.zeros_like(inputs) # <= (batch_size, enc_seq_len, latent_dim\n fake_state = K.sum(fake_state, axis=[1, 2]) # <= (batch_size)\n fake_state = K.expand_dims(fake_state) # <= (batch_size, 1)\n fake_state = K.tile(fake_state, [1, hidden_size]) # <= (batch_size, latent_dim\n return fake_state\n\n fake_state_c = create_inital_state(encoder_out_seq, encoder_out_seq.shape[-1])\n fake_state_e = create_inital_state(encoder_out_seq, encoder_out_seq.shape[1]) # <= (batch_size, enc_seq_len, latent_dim\n\n \"\"\" Computing energy outputs \"\"\"\n # e_outputs => (batch_size, de_seq_len, en_seq_len)\n last_out, e_outputs, _ = K.rnn(\n energy_step, decoder_out_seq, [fake_state_e],\n )\n\n \"\"\" Computing context vectors \"\"\"\n last_out, c_outputs, _ = K.rnn(\n context_step, e_outputs, [fake_state_c],\n )\n\n return c_outputs, e_outputs\n\n def compute_output_shape(self, input_shape):\n \"\"\" Outputs produced by the layer \"\"\"\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]\n\n \"\"\"testing\"\"\"\nif __name__ == \"__main__\":\n lyr = AttentionLayer(lyr)\n print (lyr.compute_output_shape(10))\n"
] | [
[
"tensorflow.python.keras.backend.rnn",
"tensorflow.python.keras.backend.softmax",
"tensorflow.TensorShape",
"tensorflow.python.keras.backend.tile",
"tensorflow.python.keras.backend.sum",
"tensorflow.python.keras.backend.reshape",
"tensorflow.python.keras.backend.zeros_like",
"tensorflow.python.keras.backend.dot",
"tensorflow.python.keras.backend.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
firaschaabani/fairseq | [
"425c36eafff535fe7337f8bdd5ace22ebacc78cb"
] | [
"fairseq/data/audio/raw_audio_dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport logging\nimport os\nimport sys\nimport io\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom .. import FairseqDataset, BaseWrapperDataset\nfrom ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes\nfrom fairseq.data.audio.audio_utils import (\n parse_path, read_from_stored_zip, is_sf_audio_data\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass RawAudioDataset(FairseqDataset):\n def __init__(\n self,\n sample_rate,\n max_sample_size=None,\n min_sample_size=0,\n shuffle=True,\n pad=False,\n normalize=False,\n compute_mask_indices=False,\n **mask_compute_kwargs,\n ):\n super().__init__()\n\n self.sample_rate = sample_rate\n self.sizes = []\n self.max_sample_size = (\n max_sample_size if max_sample_size is not None else sys.maxsize\n )\n self.min_sample_size = min_sample_size\n self.pad = pad\n self.shuffle = shuffle\n self.normalize = normalize\n self.compute_mask_indices = compute_mask_indices\n if self.compute_mask_indices:\n self.mask_compute_kwargs = mask_compute_kwargs\n self._features_size_map = {}\n self._C = mask_compute_kwargs[\"encoder_embed_dim\"]\n self._conv_feature_layers = eval(mask_compute_kwargs[\"conv_feature_layers\"])\n\n def __getitem__(self, index):\n raise NotImplementedError()\n\n def __len__(self):\n return len(self.sizes)\n\n def postprocess(self, feats, curr_sample_rate):\n if feats.dim() == 2:\n feats = feats.mean(-1)\n\n if curr_sample_rate != self.sample_rate:\n raise Exception(f\"sample rate: {curr_sample_rate}, need {self.sample_rate}\")\n\n assert feats.dim() == 1, feats.dim()\n\n if self.normalize:\n with torch.no_grad():\n feats = F.layer_norm(feats, feats.shape)\n return feats\n\n def crop_to_max_size(self, wav, target_size):\n size = len(wav)\n diff = size - target_size\n if diff <= 0:\n return wav\n\n start = np.random.randint(0, diff + 1)\n end = size - diff + start\n return wav[start:end]\n\n def _compute_mask_indices(self, dims, padding_mask):\n B, T, C = dims\n mask_indices, mask_channel_indices = None, None\n if self.mask_compute_kwargs[\"mask_prob\"] > 0:\n mask_indices = compute_mask_indices(\n (B, T),\n padding_mask,\n self.mask_compute_kwargs[\"mask_prob\"],\n self.mask_compute_kwargs[\"mask_length\"],\n self.mask_compute_kwargs[\"mask_selection\"],\n self.mask_compute_kwargs[\"mask_other\"],\n min_masks=2,\n no_overlap=self.mask_compute_kwargs[\"no_mask_overlap\"],\n min_space=self.mask_compute_kwargs[\"mask_min_space\"],\n )\n mask_indices = torch.from_numpy(mask_indices)\n if self.mask_compute_kwargs[\"mask_channel_prob\"] > 0:\n mask_channel_indices = compute_mask_indices(\n (B, C),\n None,\n self.mask_compute_kwargs[\"mask_channel_prob\"],\n self.mask_compute_kwargs[\"mask_channel_length\"],\n self.mask_compute_kwargs[\"mask_channel_selection\"],\n self.mask_compute_kwargs[\"mask_channel_other\"],\n no_overlap=self.mask_compute_kwargs[\"no_mask_channel_overlap\"],\n min_space=self.mask_compute_kwargs[\"mask_channel_min_space\"],\n )\n mask_channel_indices = (\n torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1)\n )\n\n return mask_indices, mask_channel_indices\n\n @staticmethod\n def _bucket_tensor(tensor, num_pad, value):\n return F.pad(tensor, (0, num_pad), value=value)\n\n def collater(self, samples):\n samples = [s for s in samples if s[\"source\"] is not None]\n if len(samples) == 0:\n return {}\n\n sources = [s[\"source\"] for s in samples]\n sizes = [len(s) for s in sources]\n\n if self.pad:\n target_size = min(max(sizes), self.max_sample_size)\n else:\n target_size = min(min(sizes), self.max_sample_size)\n\n collated_sources = sources[0].new_zeros(len(sources), target_size)\n padding_mask = (\n torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None\n )\n for i, (source, size) in enumerate(zip(sources, sizes)):\n diff = size - target_size\n if diff == 0:\n collated_sources[i] = source\n elif diff < 0:\n assert self.pad\n collated_sources[i] = torch.cat(\n [source, source.new_full((-diff,), 0.0)]\n )\n padding_mask[i, diff:] = True\n else:\n collated_sources[i] = self.crop_to_max_size(source, target_size)\n\n input = {\"source\": collated_sources}\n out = {\"id\": torch.LongTensor([s[\"id\"] for s in samples])}\n if self.pad:\n input[\"padding_mask\"] = padding_mask\n\n if hasattr(self, \"num_buckets\") and self.num_buckets > 0:\n assert self.pad, \"Cannot bucket without padding first.\"\n bucket = max(self._bucketed_sizes[s[\"id\"]] for s in samples)\n num_pad = bucket - collated_sources.size(-1)\n if num_pad:\n input[\"source\"] = self._bucket_tensor(collated_sources, num_pad, 0)\n input[\"padding_mask\"] = self._bucket_tensor(padding_mask, num_pad, True)\n\n if self.compute_mask_indices:\n B = input[\"source\"].size(0)\n T = self._get_mask_indices_dims(input[\"source\"].size(-1))\n padding_mask_reshaped = input[\"padding_mask\"].clone()\n extra = padding_mask_reshaped.size(1) % T\n if extra > 0:\n padding_mask_reshaped = padding_mask_reshaped[:, :-extra]\n padding_mask_reshaped = padding_mask_reshaped.view(\n padding_mask_reshaped.size(0), T, -1\n )\n padding_mask_reshaped = padding_mask_reshaped.all(-1)\n input[\"padding_count\"] = padding_mask_reshaped.sum(-1).max().item()\n mask_indices, mask_channel_indices = self._compute_mask_indices(\n (B, T, self._C),\n padding_mask_reshaped,\n )\n input[\"mask_indices\"] = mask_indices\n input[\"mask_channel_indices\"] = mask_channel_indices\n out[\"sample_size\"] = mask_indices.sum().item()\n\n out[\"net_input\"] = input\n return out\n\n def _get_mask_indices_dims(self, size, padding=0, dilation=1):\n if size not in self._features_size_map:\n L_in = size\n for (_, kernel_size, stride) in self._conv_feature_layers:\n L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1\n L_out = 1 + L_out // stride\n L_in = L_out\n self._features_size_map[size] = L_out\n return self._features_size_map[size]\n\n def num_tokens(self, index):\n return self.size(index)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n if self.pad:\n return self.sizes[index]\n return min(self.sizes[index], self.max_sample_size)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n\n if self.shuffle:\n order = [np.random.permutation(len(self))]\n else:\n order = [np.arange(len(self))]\n\n order.append(self.sizes)\n return np.lexsort(order)[::-1]\n\n def set_bucket_info(self, num_buckets):\n self.num_buckets = num_buckets\n if self.num_buckets > 0:\n self._collated_sizes = np.minimum(\n np.array(self.sizes),\n self.max_sample_size,\n )\n self.buckets = get_buckets(\n self._collated_sizes,\n self.num_buckets,\n )\n self._bucketed_sizes = get_bucketed_sizes(\n self._collated_sizes, self.buckets\n )\n logger.info(\n f\"{len(self.buckets)} bucket(s) for the audio dataset: \"\n f\"{self.buckets}\"\n )\n\n\nclass FileAudioDataset(RawAudioDataset):\n def __init__(\n self,\n manifest_path,\n sample_rate,\n max_sample_size=None,\n min_sample_size=0,\n shuffle=True,\n pad=False,\n normalize=False,\n num_buckets=0,\n compute_mask_indices=False,\n **mask_compute_kwargs,\n ):\n super().__init__(\n sample_rate=sample_rate,\n max_sample_size=max_sample_size,\n min_sample_size=min_sample_size,\n shuffle=shuffle,\n pad=pad,\n normalize=normalize,\n compute_mask_indices=compute_mask_indices,\n **mask_compute_kwargs,\n )\n\n skipped = 0\n self.fnames = []\n sizes = []\n self.skipped_indices = set()\n\n with open(manifest_path, \"r\") as f:\n self.root_dir = f.readline().strip()\n for i, line in enumerate(f):\n items = line.strip().split(\"\\t\")\n assert len(items) == 2, line\n sz = int(items[1])\n if min_sample_size is not None and sz < min_sample_size:\n skipped += 1\n self.skipped_indices.add(i)\n continue\n self.fnames.append(items[0])\n sizes.append(sz)\n logger.info(f\"loaded {len(self.fnames)}, skipped {skipped} samples\")\n\n self.sizes = np.array(sizes, dtype=np.int64)\n\n try:\n import pyarrow\n\n self.fnames = pyarrow.array(self.fnames)\n except:\n logger.debug(\n \"Could not create a pyarrow array. Please install pyarrow for better performance\"\n )\n pass\n\n self.set_bucket_info(num_buckets)\n\n def __getitem__(self, index):\n import soundfile as sf\n\n path_or_fp = os.path.join(self.root_dir, self.fnames[index])\n _path, slice_ptr = parse_path(path_or_fp)\n if len(slice_ptr) == 2:\n byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])\n assert is_sf_audio_data(byte_data)\n path_or_fp = io.BytesIO(byte_data)\n\n wav, curr_sample_rate = sf.read(path_or_fp, dtype=\"float32\")\n\n feats = torch.from_numpy(wav).float()\n feats = self.postprocess(feats, curr_sample_rate)\n return {\"id\": index, \"source\": feats}\n\n\nclass BinarizedAudioDataset(RawAudioDataset):\n def __init__(\n self,\n data_dir,\n split,\n sample_rate,\n max_sample_size=None,\n min_sample_size=0,\n shuffle=True,\n pad=False,\n normalize=False,\n num_buckets=0,\n compute_mask_indices=False,\n **mask_compute_kwargs,\n ):\n super().__init__(\n sample_rate=sample_rate,\n max_sample_size=max_sample_size,\n min_sample_size=min_sample_size,\n shuffle=shuffle,\n pad=pad,\n normalize=normalize,\n compute_mask_indices=compute_mask_indices,\n **mask_compute_kwargs,\n )\n\n from fairseq.data import data_utils, Dictionary\n\n self.fnames_dict = Dictionary.load(os.path.join(data_dir, \"dict.txt\"))\n\n root_path = os.path.join(data_dir, f\"{split}.root\")\n if os.path.exists(root_path):\n with open(root_path, \"r\") as f:\n self.root_dir = next(f).strip()\n else:\n self.root_dir = None\n\n fnames_path = os.path.join(data_dir, split)\n self.fnames = data_utils.load_indexed_dataset(fnames_path, self.fnames_dict)\n lengths_path = os.path.join(data_dir, f\"{split}.lengths\")\n\n with open(lengths_path, \"r\") as f:\n for line in f:\n sz = int(line.rstrip())\n assert (\n sz >= min_sample_size\n ), f\"Min sample size is not supported for binarized dataset, but found a sample with size {sz}\"\n self.sizes.append(sz)\n\n self.sizes = np.array(self.sizes, dtype=np.int64)\n\n self.set_bucket_info(num_buckets)\n logger.info(f\"loaded {len(self.fnames)} samples\")\n\n def __getitem__(self, index):\n import soundfile as sf\n\n fname = self.fnames_dict.string(self.fnames[index], separator=\"\")\n if self.root_dir:\n fname = os.path.join(self.root_dir, fname)\n\n wav, curr_sample_rate = sf.read(fname)\n feats = torch.from_numpy(wav).float()\n feats = self.postprocess(feats, curr_sample_rate)\n return {\"id\": index, \"source\": feats}\n"
] | [
[
"torch.nn.functional.layer_norm",
"torch.BoolTensor",
"torch.LongTensor",
"torch.from_numpy",
"numpy.lexsort",
"torch.no_grad",
"numpy.array",
"torch.nn.functional.pad",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bridgeland/minnetonka | [
"0e114a613d931e1a2bedc501d508e7fb00abb306"
] | [
"test/test_minnetonka.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"test_minnetonka.py: test the minnetonka language for value modeling\"\"\"\n\n__author__ = \"Dave Bridgeland\"\n__copyright__ = \"Copyright 2017-2020, Hanging Steel Productions LLC\"\n__credits__ = [\"Dave Bridgeland\"]\n\n__version__ = \"0.0.1\"\n__maintainer__ = \"Dave Bridgeland\"\n__email__ = \"[email protected]\"\n__status__ = \"Prototype\"\n\nimport warnings\nimport unittest \nimport unittest.mock\nimport io\nimport sys\nimport random\nimport collections\nimport numpy as np \n\nimport minnetonka as mn\n\nclass ModelCreationTest(unittest.TestCase):\n \"\"\"Create a model, in a couple of ways. Access the treatments\"\"\"\n def test_create_with_one_treatment(self):\n \"\"\"Create a model with a single treatment\"\"\"\n m = mn.model(treatments=['As is'])\n ts = list(m.treatments())\n self.assertEqual(len(ts), 1)\n self.assertEqual(ts[0].name, 'As is')\n\n def test_create_with_two_treatments(self):\n \"\"\"Create a model with a single treatment\"\"\"\n m = mn.model(treatments=['As is', 'To be'])\n ts = list(m.treatments())\n self.assertEqual(len(ts), 2)\n self.assertEqual(ts[0].name, 'As is')\n self.assertEqual(ts[1].name, 'To be')\n\n def test_create_model_with_no_explicit_treatments(self):\n \"\"\"Create a model with no explicit treatments\"\"\"\n m = mn.model()\n ts = list(m.treatments())\n self.assertEqual(len(ts), 1)\n self.assertEqual(ts[0].name, '')\n\n def test_create_model_with_descriptions(self):\n \"\"\"Create a model with treatment descriptions\"\"\"\n m = mn.model(treatments=[('As is', 'The current situation'),\n ('To be', 'The future')])\n ts = list(m.treatments())\n self.assertEqual(len(ts), 2)\n self.assertEqual(ts[0].name, 'As is')\n self.assertEqual(ts[0].description, 'The current situation')\n self.assertEqual(ts[1].name, 'To be')\n self.assertEqual(ts[1].description, 'The future')\n\n def test_four_mixed_treatments(self):\n \"\"\"Create a model with four treatments, some of which are described\"\"\"\n m = mn.model(treatments=[('As is', 'The current situation'), \n 'To be', \n 'Alternative 1',\n ('Alternative 2', 'Another possibility')])\n ts = list(m.treatments())\n self.assertEqual(len(ts), 4)\n self.assertEqual(ts[0].name, 'As is')\n self.assertEqual(ts[0].description, 'The current situation')\n self.assertEqual(ts[1].name, 'To be')\n self.assertIsNone(ts[1].description)\n self.assertEqual(ts[2].name, 'Alternative 1')\n self.assertIsNone(ts[2].description)\n self.assertEqual(ts[3].name, 'Alternative 2')\n self.assertEqual(ts[3].description, 'Another possibility')\n\n\nclass ModelTreatmentAccess(unittest.TestCase):\n \"\"\"Access the treatments from a model\"\"\"\n def test_access_treatments(self):\n \"\"\"Access the treatments from a model\"\"\"\n m = mn.model(treatments=[('As is', 'The current situation'), \n ('To be', 'The future')])\n self.assertEqual(m.treatment('As is').name, 'As is')\n self.assertEqual(\n m.treatment('As is').description, 'The current situation')\n self.assertEqual(m.treatment('To be').description, 'The future')\n\n\nclass ModelVariableAccess(unittest.TestCase):\n \"\"\"Access the variable (classes) of a model\"\"\"\n def test_variable_access(self):\n \"\"\"Access a variable with .mn.variable()\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', 12)\n DischargeEnds = mn.variable('DischargeEnds', 18)\n m = mn.model([DischargeBegins, DischargeEnds])\n self.assertEqual(m.variable('DischargeBegins'), DischargeBegins)\n self.assertEqual(m.variable('DischargeEnds'), DischargeEnds)\n\n def test_variable_access(self):\n \"\"\"Access a variable that does not exist\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', 12)\n DischargeEnds = mn.variable('DischargeEnds', 18)\n m = mn.model([DischargeBegins, DischargeEnds])\n with self.assertRaises(mn.MinnetonkaError) as me:\n m.variable('DischargeAbides')\n self.assertEqual(\n me.exception.message, 'Unknown variable DischargeAbides')\n\n def test_subscripts(self):\n DischargeBegins = mn.variable('DischargeBegins', 12)\n DischargeEnds = mn.variable('DischargeEnds', 18)\n m = mn.model([DischargeBegins, DischargeEnds])\n self.assertEqual(m['DischargeBegins'], DischargeBegins)\n self.assertEqual(m['DischargeEnds'], DischargeEnds)\n with self.assertRaises(mn.MinnetonkaError) as me:\n m['DischargeAbides']\n self.assertEqual(\n me.exception.message, 'Unknown variable DischargeAbides')\n\n def test_redefined_variable(self):\n DischargeBegins = mn.variable('DischargeBegins', 12)\n DB2 = mn.variable('DischargeBegins', 13)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n m = mn.model([DischargeBegins, DB2])\n self.assertEqual(len(w), 1)\n self.assertEqual(w[-1].category, mn.MinnetonkaWarning)\n self.assertEqual(\n str(w[-1].message), 'Variable DischargeBegins redefined')\n self.assertEqual(m['DischargeBegins'][''], 13)\n\n\nclass TreatmentTest(unittest.TestCase):\n \"\"\"Basic test of treatments\"\"\"\n def test_repr(self): \n \"\"\"Are the treatments repred correctly?\"\"\"\n m = mn.model(treatments=[\n 'As is', ('Value at risk', 'Total value that could be achieved')])\n nullTreatment, valueAtRisk = m.treatments()\n self.assertEqual(repr(nullTreatment), \"Treatment('As is')\")\n self.assertEqual(\n repr(valueAtRisk),\n \"Treatment('Value at risk', 'Total value that could be achieved')\")\n\n def test_by_name(self):\n \"\"\"Is the class Treatment keeping track of all the treatments?\"\"\"\n m = mn.model(treatments=[\n 'As is', ('Value at risk', 'Total value that could be achieved')])\n nullTreatment, valueAtRisk = m.treatments()\n self.assertEqual(m.treatment('As is'), nullTreatment)\n self.assertEqual(m.treatment('Value at risk'), valueAtRisk)\n\n def test_by_name_not_found(self):\n \"\"\"Does Treatment raise an error if the treatment is not found?\"\"\"\n m = mn.model(treatments=[\n 'As is', ('Value at risk', 'Total value that could be achieved')])\n with self.assertRaises(mn.MinnetonkaError) as me:\n foo = m.treatment('Could be')\n self.assertEqual(\n me.exception.message, 'Model has no treatment Could be')\n\n\nclass SimpleQuantityTest(unittest.TestCase):\n \"\"\"Tests for simple quantities\"\"\"\n def test_single_simple_quantity(self):\n \"\"\"Does the simple quantity know its value?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', 12)\n m = mn.model([DischargeBegins])\n self.assertEqual(DischargeBegins.by_treatment('').amount(), 12)\n\n def test_single_simple_quantity_via_subscript(self):\n \"\"\"Does the simple quantity know its value?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', 12)\n m = mn.model([DischargeBegins])\n self.assertEqual(DischargeBegins[''], 12)\n\n def test_simple_equality_two_treatments(self):\n \"\"\"Does a simple quantity know its values in 2 different treatments?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n m = mn.model([DischargeBegins], ['As is', 'To be'])\n self.assertEqual(m['DischargeBegins']['As is'], 12)\n self.assertEqual(m['DischargeBegins']['To be'], 2)\n\n def test_constant_with_default_across_treatments(self):\n DischargeEnds = mn.variable('DischargeEnds', 15)\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n m = mn.model([DischargeBegins, DischargeEnds], ['As is', 'To be'])\n self.assertEqual(DischargeEnds['As is'], 15)\n self.assertEqual(DischargeEnds['To be'], 15)\n self.assertEqual(m['DischargeBegins']['As is'], 12)\n self.assertEqual(m['DischargeBegins']['To be'], 2)\n\n def test_incomplete_varying_quantity(self):\n \"\"\"Does an incomplete varying quantity know it's incomplete?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, '2B': 2}))\n with self.assertRaises(mn.MinnetonkaError) as cm:\n m = mn.model([DischargeBegins], ['As is', 'To be'])\n self.assertEqual(cm.exception.message,\n \"Treatment 'To be' not defined\")\n\n def test_quantity_knows_treatment(self): \n \"\"\"Does the simple quantity know its treatment?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n m = mn.model([DischargeBegins], ['As is', 'To be'])\n self.assertEqual(m['DischargeBegins'].by_treatment('As is').treatment(),\n m.treatment('As is'))\n self.assertEqual(m['DischargeBegins'].by_treatment('To be').treatment(),\n m.treatment('To be'))\n\n def test_treatment_knows_quantity(self):\n \"\"\"Does the treatment know its simple quantity?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n m = mn.model([DischargeBegins], ['As is', 'To be'])\n self.assertEqual(m['DischargeBegins'].by_treatment('As is'),\n m.treatment('As is')['DischargeBegins'])\n self.assertEqual(m['DischargeBegins'].by_treatment('To be'),\n m.treatment('To be')['DischargeBegins'])\n\n def test_reset(self):\n \"\"\"Can a simple quantity reset correctly?\"\"\"\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n m = mn.model([DischargeBegins], ['As is', 'To be'])\n DischargeBegins['As is'] = 11\n m.reset()\n self.assertEqual(DischargeBegins['As is'], 12)\n\n def test_docstring(self):\n \"\"\"Can a simple quantity have a docstring?\"\"\"\n DischargeEnds = mn.variable('DischargeEnds', \n \"\"\"The quarter when discharging ends\"\"\",\n 15)\n self.assertEqual(\n DischargeEnds.__doc__, 'The quarter when discharging ends')\n\n\nclass ContextManagerTest(unittest.TestCase):\n \"\"\"Create a model and variables using a contex manager\"\"\"\n def test_variable_access_within_context_manager(self):\n \"\"\"Does a model defined as a context mgr know about the variables?\"\"\"\n with mn.model() as m:\n DischargeBegins = mn.variable('DischargeBegins', 12)\n self.assertEqual(m.variable('DischargeBegins'), DischargeBegins)\n\n def test_model_initialization_via_context_manager(self):\n \"\"\"Does a model defined as a context manager initialize?\"\"\"\n with mn.model():\n DischargeBegins = mn.variable('DischargeBegins', 12)\n DischargeEnds = mn.variable('DischargeEnds', 15)\n self.assertEqual(DischargeBegins[''], 12)\n self.assertEqual(DischargeEnds[''], 15)\n\n def test_variables_without_python_vars_within_context_manager(self):\n \"\"\"Does a model context manager need vars to have python vars?\"\"\"\n with mn.model() as m:\n mn.variable('DischargeBegins', 12)\n mn.variable('DischargeEnds', 15)\n self.assertEqual(m['DischargeBegins'][''], 12)\n self.assertEqual(m['DischargeEnds'][''], 15)\n\n def test_reopen_context_manager(self):\n \"\"\"Can I reopen a previously defined context manager and add a var?\"\"\"\n with mn.model() as m:\n mn.variable('DischargeBegins', 12)\n with m:\n mn.variable('DischargeEnds', 15)\n self.assertEqual(m['DischargeEnds'][''], 15)\n\n def test_reopen_context_manager_after_step(self):\n \"\"\"Can I reopen a previously defined context manager and add a var?\"\"\"\n with mn.model() as m:\n mn.variable('DischargeBegins', 12)\n m.step()\n with m:\n mn.variable('DischargeEnds', 15)\n self.assertEqual(m['DischargeEnds'][''], 15)\n self.assertEqual(m['DischargeBegins'][''], 12)\n\n def test_redefine_variable(self):\n \"\"\"Can I redefine a variable in a subsequent context?\"\"\"\n with mn.model() as m:\n mn.variable('DischargeBegins', 12)\n self.assertEqual(m['DischargeBegins'][''], 12)\n # Yuck. Need to encapsulate all this code to check for warnings\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n with m:\n mn.variable('DischargeBegins', 13)\n self.assertEqual(len(w), 1)\n self.assertEqual(w[-1].category, mn.MinnetonkaWarning)\n self.assertEqual(\n str(w[-1].message), 'Variable DischargeBegins redefined')\n self.assertEqual(m['DischargeBegins'][''], 13)\n\n\nclass NoArgVariableTest(unittest.TestCase):\n \"\"\"Tests for variables without any arguments\"\"\"\n def test_simple_noarg(self):\n \"\"\"Does a simple no arg variable work?\"\"\"\n with mn.model():\n DischargeProgress = mn.variable('DischargeProgress', lambda: 0.5)\n self.assertEqual(DischargeProgress[''], 0.5)\n\n def test_simple_no_arg_with_docstring(self):\n \"\"\"Does a simple no arg variable wiht a docstring work?\"\"\"\n with mn.model():\n DischargeProgress = mn.variable('DischargeProgress', \n \"\"\"Between the beginning and the end, how much progress?\"\"\",\n lambda: 0.5)\n self.assertEqual(\n DischargeProgress.__doc__, \n 'Between the beginning and the end, how much progress?')\n self.assertEqual(DischargeProgress[''], 0.5)\n\n def test_changing_no_arg(self):\n \"\"\"Can the no arg variable change its behavior?\"\"\"\n with mn.model(treatments=['As is']) as m:\n progress = 0.5\n DischargeProgress = mn.variable('DischargeProgress', lambda: progress)\n self.assertEqual(DischargeProgress['As is'], 0.5)\n progress = 0.7\n m.step()\n self.assertEqual(DischargeProgress['As is'], 0.7)\n\n def test_reset_no_arg(self):\n \"\"\"Can a no arg variable reset?\"\"\"\n with mn.model() as m:\n DischargeProgress = mn.variable('DischargeProgress', lambda: 0.5)\n self.assertEqual(DischargeProgress[''], 0.5)\n m.reset()\n self.assertEqual(DischargeProgress[''], 0.5)\n\n def test_embedded_fn(self):\n \"\"\"Can a function be defined within a model context?\"\"\"\n with mn.model() as m:\n def _fn(x):\n return x + 1\n\n Foo = mn.variable('Foo', _fn, 'Bar')\n mn.variable('Bar', 9)\n\n self.assertEqual(Foo[''], 10)\n\n def test_different_treatments_different_callables(self):\n \"\"\"Can different treatments be given different callables\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n Foo = mn.variable('Foo', mn.PerTreatment(\n {'As is': lambda: 12, 'To be': lambda: 13}))\n\n self.assertEqual(Foo['As is'], 12)\n self.assertEqual(Foo['To be'], 13)\n\n def test_callable_and_constant(self):\n \"\"\"Can one treatment have a callable and another a constant\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n Foo = mn.variable('Foo', mn.PerTreatment(\n {'As is': lambda: 12, 'To be': 13}))\n\n self.assertEqual(Foo['As is'], 12)\n self.assertEqual(Foo['To be'], 13)\n\n\nclass OneArgVariableTest(unittest.TestCase):\n \"\"\"Tests for variables that have a single argument\"\"\"\n def test_sunny_day(self):\n with mn.model(treatments=['As is', 'To be']):\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 4, 'To be': 3}))\n current_step = 7\n DischargeProgress = mn.variable('DischargeProgress', \n \"\"\"Between the beginning and the end, how much progress?\"\"\",\n lambda db: (current_step - db) / 4,\n 'DischargeBegins') \n self.assertEqual(DischargeProgress['As is'], 0.75)\n self.assertEqual(DischargeProgress['To be'], 1.0)\n self.assertEqual(\n DischargeProgress.__doc__, \n 'Between the beginning and the end, how much progress?')\n\n def test_different_treatments_different_callables(self):\n \"\"\"Can different treatments be given different callables\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n DischargeBegins = mn.variable('DischargeBegins', 4)\n current_step = 7\n DischargeProgress = mn.variable('DischargeProgress', \n \"\"\"Between the beginning and the end, how much progress?\"\"\",\n mn.PerTreatment(\n {'As is': lambda db: (current_step - db) / 4,\n 'To be': lambda db: (current_step - db + 1) / 4}),\n 'DischargeBegins')\n\n self.assertEqual(DischargeProgress['As is'], 0.75)\n self.assertEqual(DischargeProgress['To be'], 1)\n\n def test_populate_no_depends(self):\n current_step = 7\n with self.assertRaises(TypeError):\n with mn.model(treatments=['As is', 'To be']):\n mn.variable('Progress', lambda db: (current_step - db) / 4)\n\n def test_simple_circularity(self):\n \"\"\"Test for detecting a variable that depends on itself\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as me:\n with mn.model():\n mn.variable('Reflect', lambda r: r+2, 'Reflect')\n self.assertEqual(me.exception.message,\n 'Circularity among variables: Reflect <- Reflect')\n \n\nclass TwoArgsTest(unittest.TestCase):\n \"\"\"Tests for variables that have two arguments\"\"\"\n def test_two_args_sunny_day(self):\n with mn.model():\n mn.variable('DischargeBegins', 5)\n mn.variable('DischargeEnds', 9)\n current_step = 7\n DischargeProgress = mn.variable('DischargeProgress',\n \"\"\"Between the beginning and the end, how much progress?\"\"\",\n lambda db, de: (current_step - db) / (de - db),\n 'DischargeBegins', 'DischargeEnds') \n self.assertEqual(DischargeProgress[''], 0.5)\n self.assertEqual(\n DischargeProgress.__doc__, \n 'Between the beginning and the end, how much progress?')\n\n def test_two_args_2_treatments_sunny_day(self):\n with mn.model(treatments=['As is', 'Just might work']):\n mn.variable('DischargeBegins', 5)\n mn.variable('DischargeEnds', \n mn.PerTreatment({'As is': 13, 'Just might work': 11}))\n current_step = 7\n DischargeProgress = mn.variable('DischargeProgress',\n lambda db, de: (current_step - db) / (de - db),\n 'DischargeBegins', 'DischargeEnds') \n self.assertEqual(DischargeProgress['As is'], 0.25)\n self.assertEqual(DischargeProgress['Just might work'], 1/3)\n\n def test_two_arg_circularity(self):\n \"\"\"Can variable detect dependency on var that depends on the first?\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as me:\n with mn.model():\n mn.variable('Foo', lambda b: b + 2, 'Bar')\n mn.variable('Bar', lambda f: f - 2, 'Foo')\n self.assertEqual(me.exception.message,\n 'Circularity among variables: Foo <- Bar <- Foo')\n\n def test_three_arg_circularity(self):\n \"\"\"Can variable detect dependency on var that depends on the first?\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as me:\n with mn.model():\n mn.variable('Foo', lambda b: b + 2, 'Bar')\n mn.variable('Bar', lambda b: b - 2, 'Baz')\n mn.variable('Baz', lambda f: f + 10, 'Foo')\n self.assertEqual(me.exception.message,\n 'Circularity among variables: Foo <- Bar <- Baz <- Foo')\n\n\nclass ExpressionCacheTest(unittest.TestCase):\n \"\"\"Test the behavior of the cache\"\"\"\n def test_cache_retention(self):\n \"\"\"Test cache retention\"\"\"\n with mn.model():\n hidden = 12\n mn.variable('Cached', lambda: hidden)\n UsesCached = mn.variable('UsesCached', lambda x: x, 'Cached')\n self.assertEqual(UsesCached[''], 12)\n hidden = 14\n self.assertEqual(UsesCached[''], 12)\n\n\nclass TimeTest(unittest.TestCase):\n \"\"\"Test step(), on all kinds of simple variables\"\"\"\n def test_step_constant(self):\n \"\"\"Test step on a constant variable\"\"\"\n with mn.model() as m:\n HoursPerDay = mn.variable('HoursPerDay', 24)\n self.assertEqual(HoursPerDay[''], 24)\n m.step()\n self.assertEqual(HoursPerDay[''], 24)\n\n def test_TIME(self):\n \"\"\"Test usage of the TIME value in a lambda\"\"\"\n with mn.model() as m:\n Tm = mn.variable('Tm', lambda md: md.TIME, '__model__')\n self.assertEqual(Tm[''], 0)\n m.step()\n self.assertEqual(Tm[''], 1)\n m.reset()\n self.assertEqual(Tm[''], 0)\n\n def test_STEP(self):\n \"\"\"Test usage of the STEP value in a lambda\"\"\"\n with mn.model() as m:\n St = mn.variable('St', lambda md: md.STEP, '__model__')\n self.assertEqual(St[''], 0)\n m.step()\n self.assertEqual(St[''], 1)\n m.reset()\n self.assertEqual(St[''], 0)\n\n def test_TIME_smaller_timestep(self):\n \"\"\"Test usage of the TIME value when timestep is not 1\"\"\"\n with mn.model(timestep=0.5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n m.step()\n self.assertEqual(Time[''], 0.5)\n self.assertEqual(Step[''], 1)\n m.step()\n self.assertEqual(Time[''], 1)\n self.assertEqual(Step[''], 2)\n m.reset()\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n\n def test_TIME_n(self):\n \"\"\"Test usage of the step(n)\"\"\"\n with mn.model() as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n m.step(5)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n m.step(3)\n self.assertEqual(Time[''], 8)\n self.assertEqual(Step[''], 8)\n m.reset()\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n m.step(4)\n self.assertEqual(Time[''], 4)\n self.assertEqual(Step[''], 4)\n\n def test_TIME_n_smaller(self):\n \"\"\"Test usage of the step(n) with a non-unitary timestep\"\"\"\n with mn.model(timestep=0.25) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n self.assertEqual(Time[''], 0)\n m.step(5)\n self.assertEqual(Time[''], 1.25)\n m.step(3)\n self.assertEqual(Time[''], 2)\n m.reset()\n self.assertEqual(Time[''], 0)\n m.step(4)\n self.assertEqual(Time[''], 1)\n\n def test_step_usage(self):\n \"\"\"Test step usage in a more complex situation.\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n mn.variable('DischargeBegins', 5)\n mn.variable('DischargeEnds', \n mn.PerTreatment({'As is': 13, 'To be': 11}))\n DischargeProgress = mn.variable(\n 'DischargeProgress', \n lambda db, de, md: max(0, min(1, (md.TIME - db) / (de - db))),\n 'DischargeBegins', 'DischargeEnds', '__model__') \n \n self.assertEqual(DischargeProgress['As is'], 0)\n self.assertEqual(DischargeProgress['To be'], 0)\n m.step(6)\n self.assertEqual(DischargeProgress['As is'], 0.125)\n self.assertEqual(DischargeProgress['To be'], 1/6)\n m.step()\n self.assertEqual(DischargeProgress['As is'], 0.25)\n self.assertEqual(DischargeProgress['To be'], 1/3)\n m.step(4)\n self.assertEqual(DischargeProgress['As is'], 0.75)\n self.assertEqual(DischargeProgress['To be'], 1)\n m.step(2)\n self.assertEqual(DischargeProgress['As is'], 1)\n self.assertEqual(DischargeProgress['To be'], 1)\n\n def test_depends_on_step(self):\n \"\"\"Test various kinds of variables depending on step.\"\"\"\n with mn.model() as m:\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n StockStep = mn.stock('StockStep', \n lambda s: s, ('Step',), \n lambda s: s, ('Step',))\n AccumStep = mn.accum('AccumStep', lambda s: s, ('Step',), 0)\n PreviousStep = mn.previous('PreviousStep', 'Step', 0)\n\n self.assertEqual(StockStep[''], 0)\n self.assertEqual(AccumStep[''], 0)\n self.assertEqual(PreviousStep[''], 0)\n m.step()\n self.assertEqual(StockStep[''], 0)\n self.assertEqual(AccumStep[''], 1)\n self.assertEqual(PreviousStep[''], 0)\n m.step()\n self.assertEqual(StockStep[''], 1)\n self.assertEqual(AccumStep[''], 3)\n self.assertEqual(PreviousStep[''], 1)\n m.step()\n self.assertEqual(StockStep[''], 3)\n self.assertEqual(AccumStep[''], 6)\n self.assertEqual(PreviousStep[''], 2)\n\n\nclass StartAndEndTest(unittest.TestCase):\n \"\"\"Test step(), on all kinds of simple variables\"\"\"\n\n def test_start_time_simple(self):\n \"\"\"Test step usage with non-zero start.\"\"\"\n with mn.model(start_time=2019) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n\n self.assertEqual(Time[''], 2019)\n self.assertEqual(Step[''], 0)\n m.step()\n self.assertEqual(Time[''], 2020)\n self.assertEqual(Step[''], 1)\n m.step()\n self.assertEqual(Time[''], 2021)\n self.assertEqual(Step[''], 2)\n m.reset()\n self.assertEqual(Time[''], 2019)\n self.assertEqual(Step[''], 0)\n\n def test_start_time_with_timestep(self):\n \"\"\"Test step usage with non-zero start and timestep.\"\"\"\n with mn.model(start_time=2019, timestep=0.25) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n\n self.assertEqual(Time[''], 2019)\n self.assertEqual(Step[''], 0)\n m.step()\n self.assertEqual(Time[''], 2019.25)\n self.assertEqual(Step[''], 1)\n m.step()\n self.assertEqual(Time[''], 2019.5)\n self.assertEqual(Step[''], 2)\n m.reset()\n self.assertEqual(Time[''], 2019)\n self.assertEqual(Step[''], 0)\n\n def test_end_time(self):\n \"\"\"Test step usage with end time.\"\"\"\n with mn.model(end_time=5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n self.assertEqual(Foo[''], 0)\n m.step(5) \n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n with self.assertRaises(mn.MinnetonkaError) as err:\n m.step()\n self.assertEqual(err.exception.message,\n \"Attempted to simulation beyond end_time: 5\")\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n\n def test_step_to_end(self):\n \"\"\"Test simple case of stepping to end.\"\"\"\n with mn.model(end_time=5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n m.reset()\n m.step()\n self.assertEqual(Time[''], 1)\n self.assertEqual(Step[''], 1)\n self.assertEqual(Foo[''], 1)\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n\n def test_step_to_end_twice(self):\n \"\"\"Test step to end redundantly.\"\"\"\n with mn.model(end_time=5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n m.step(to_end=True)\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n m.reset()\n m.step()\n self.assertEqual(Time[''], 1)\n self.assertEqual(Step[''], 1)\n self.assertEqual(Foo[''], 1)\n m.step(to_end=True)\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 5)\n self.assertEqual(Foo[''], 5)\n\n def test_step_to_end_with_timestep(self):\n \"\"\"Test step to end with a non-one timestep.\"\"\"\n with mn.model(end_time=5, timestep=0.25) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 20)\n self.assertEqual(Foo[''], 5)\n m.reset()\n m.step()\n self.assertEqual(Time[''], 0.25)\n self.assertEqual(Step[''], 1)\n self.assertEqual(Foo[''], 0.25)\n m.step(to_end=True)\n self.assertEqual(Time[''], 5)\n self.assertEqual(Step[''], 20)\n self.assertEqual(Foo[''], 5)\n\n def test_step_to_end_with_incompatible_timestep(self):\n \"\"\"Test step to end with incompatible timestep.\"\"\"\n with mn.model(end_time=4.6, timestep=0.5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n m.step(to_end=True)\n self.assertEqual(Time[''], 4.5)\n self.assertEqual(Step[''], 9)\n self.assertEqual(Foo[''], 4.5)\n m.reset()\n m.step()\n self.assertEqual(Time[''], 0.5)\n self.assertEqual(Step[''], 1)\n self.assertEqual(Foo[''], 0.5)\n m.step(to_end=True)\n self.assertEqual(Time[''], 4.5)\n self.assertEqual(Step[''], 9)\n self.assertEqual(Foo[''], 4.5)\n\n def test_start_and_end(self):\n \"\"\"Test a model that has both a start time and an end time.\"\"\"\n with mn.model(start_time=2018, end_time=2022) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n\n self.assertEqual(Time[''], 2018)\n self.assertEqual(Foo[''], 0)\n m.step()\n self.assertEqual(Time[''], 2019)\n self.assertEqual(Foo[''], 1)\n m.step(to_end=True)\n self.assertEqual(Time[''], 2022)\n self.assertEqual(Foo[''], 4)\n m.reset()\n self.assertEqual(Time[''], 2018)\n self.assertEqual(Foo[''], 0)\n\n def test_incompatible_start_and_end(self):\n \"\"\"Test a model that has an incompatible start_time and end_time.\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as err:\n with mn.model(start_time=2018, end_time=2017) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n Foo = mn.stock('Foo', 1, 0)\n self.assertEqual(err.exception.message,\n 'End time 2017 is before start time 2018')\n\n def test_STARTTIME_and_ENDTIME(self):\n \"\"\"Test access of start and end variables.\"\"\"\n with mn.model(start_time=2019, end_time=2022) as m:\n Start = mn.variable('Start', lambda md: md.STARTTIME, '__model__')\n End = mn.variable('End', lambda md: md.ENDTIME, '__model__')\n\n self.assertEqual(Start[''], 2019)\n self.assertEqual(End[''], 2022)\n m.step()\n self.assertEqual(Start[''], 2019)\n self.assertEqual(End[''], 2022)\n m.reset()\n self.assertEqual(Start[''], 2019)\n self.assertEqual(End[''], 2022)\n\n\nclass ConstantTest(unittest.TestCase):\n \"\"\"Test constants, that are initiallized and then don't change\"\"\"\n def test_simple_constant(self):\n \"\"\"Does a simple constant have the right value?\"\"\"\n with mn.model() as m:\n DischargeBegins = mn.constant('DischargeBegins', 12)\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step()\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step(4)\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.reset()\n self.assertEqual(m['DischargeBegins'][''], 12)\n\n def test_lambda_constant(self):\n \"\"\"Is a constant only evaluated once?\"\"\"\n how_many = 0\n def eval_once():\n nonlocal how_many\n assert(how_many == 0)\n how_many += 1\n return 12\n\n with mn.model() as m:\n DischargeBegins = mn.constant('DischargeBegins', eval_once)\n\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step()\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step(4)\n self.assertEqual(m['DischargeBegins'][''], 12) \n how_many = 0\n m.reset()\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step()\n self.assertEqual(m['DischargeBegins'][''], 12)\n\n\n def test_lambda_constant_multiple_args(self):\n \"\"\"Is a constant with multiple arguments only evaluated once?\"\"\"\n how_many = 0\n def eval_once(a, b):\n nonlocal how_many\n assert(how_many == 0)\n how_many += 1\n return a + b\n\n with mn.model() as m:\n mn.variable('Foo', 9)\n mn.variable('Bar', 3)\n DischargeBegins = mn.constant(\n 'DischargeBegins', eval_once, 'Foo', 'Bar')\n\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step()\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step(4)\n self.assertEqual(m['DischargeBegins'][''], 12) \n how_many = 0\n m.reset()\n self.assertEqual(m['DischargeBegins'][''], 12)\n m.step()\n self.assertEqual(m['DischargeBegins'][''], 12)\n\n\n def test_constant_and_treatments(self):\n \"\"\"Can a constant take different values in different treatments?\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n DischargeBegins = mn.constant('DischargeBegins', \n mn.PerTreatment({'Bar': 9, 'Baz':10}))\n\n self.assertEqual(m['DischargeBegins']['Bar'], 9)\n self.assertEqual(m['DischargeBegins']['Baz'], 10)\n m.step()\n self.assertEqual(m['DischargeBegins']['Bar'], 9)\n self.assertEqual(m['DischargeBegins']['Baz'], 10)\n \n \nclass BasicStockTest(unittest.TestCase):\n \"\"\"Test stocks\"\"\"\n\n def test_simple_stock_zero_initial(self):\n \"\"\"Stock with no callables and no initial\"\"\"\n with mn.model() as m:\n S = mn.stock('S', 5)\n self.assertEqual(S[''], 0)\n m.step()\n self.assertEqual(S[''], 5)\n m.step()\n self.assertEqual(S[''], 10)\n m.reset()\n self.assertEqual(S[''], 0)\n m.step(3)\n self.assertEqual(S[''], 15)\n self.assertEqual(S.__doc__, '')\n\n def test_simple_stock_zero_initial_half_step(self):\n \"\"\"Stock with no callables, no initial, and timestep = 0.5\"\"\"\n with mn.model(timestep=0.5) as m:\n S = mn.stock('S', 5)\n self.assertEqual(S[''], 0)\n m.step()\n self.assertEqual(S[''], 2.5)\n m.step()\n self.assertEqual(S[''], 5)\n m.reset()\n self.assertEqual(S[''], 0)\n m.step(3)\n self.assertEqual(S[''], 7.5)\n\n def test_simple_stock_zero_initial_and_docstring(self):\n \"\"\"Stock with no callables and no initial, but with docstring\"\"\"\n with mn.model() as m:\n S = mn.stock('S', \"\"\"Increase by 5 every step\"\"\", 5)\n self.assertEqual(S[''], 0)\n m.step()\n self.assertEqual(S[''], 5)\n self.assertEqual(S.__doc__, 'Increase by 5 every step')\n\n def test_simple_stock_with_initial(self):\n \"\"\"Stock with no callables but with an initial\"\"\"\n with mn.model() as m:\n S = mn.stock('S', 1, 22)\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n\n def test_simple_stock_with_initial_and_docstring(self):\n \"\"\"Stock with no callables but with an initial\"\"\"\n with mn.model() as m:\n S = mn.stock('S', \"\"\"Start at 22 and increase by 1\"\"\", 1, 22)\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n self.assertEqual(S.__doc__, 'Start at 22 and increase by 1')\n\n def test_simple_stock_with_varying_initial(self):\n \"\"\"Stock with no callables but with a treatment-varying initial\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n S = mn.stock('S', 1, mn.PerTreatment({'As is': 22, 'To be': 23}))\n self.assertEqual(S['As is'], 22)\n self.assertEqual(S['To be'], 23)\n m.step()\n self.assertEqual(S['As is'], 23)\n self.assertEqual(S['To be'], 24)\n m.step()\n self.assertEqual(S['As is'], 24)\n self.assertEqual(S['To be'], 25)\n m.reset()\n self.assertEqual(S['As is'], 22)\n self.assertEqual(S['To be'], 23)\n m.step(3)\n self.assertEqual(S['As is'], 25)\n self.assertEqual(S['To be'], 26)\n\n def test_stock_with_callable_flow(self):\n \"\"\"Stock with callable flow, but depends on nothing\"\"\"\n with mn.model() as m:\n S = mn.stock('S', lambda: 1, (), 22)\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n\n def test_stock_with_callable_flow_and_init(self):\n \"\"\"Stock with callable flow and callable init, depends on nothing\"\"\"\n with mn.model() as m:\n S = mn.stock('S', \n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda: 1, (), lambda: 22, ())\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n self.assertEqual(S.__doc__, 'Start at 22 and increase by 1')\n\n def test_stock_with_simple_increment_variable(self):\n \"\"\"Stock with very simple variable dependency\"\"\"\n with mn.model() as m:\n mn.variable('X', 1)\n S = mn.stock('S', lambda x: x, ('X',), 22)\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n\n def test_stock_with_nontuple_dependency(self):\n \"\"\"Test stock with a nontuple dependency, translated to tuple.\"\"\" \n with mn.model() as m:\n mn.variable('XY', 1)\n S = mn.stock('S', lambda x: x, 'XY', 22)\n\n with mn.model() as m:\n mn.variable('XY', 1)\n S = mn.stock('S', lambda: 1, (), lambda x: x, 'XY')\n\n\n def test_stock_with_two_callables_with_depends(self):\n \"\"\"Stock with depends vars for both flow and initial\"\"\"\n with mn.model() as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',))\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n self.assertEqual(S.__doc__, 'Start at 22 and increase by 1')\n\n def test_stock_with_variable_increase(self):\n with mn.model() as m:\n mn.variable('Time', lambda md: md.TIME, '__model__')\n S = mn.stock('S', lambda s: s, ('Time',), 0)\n m.step()\n self.assertEqual(S[''], 0)\n m.step()\n self.assertEqual(S[''], 1)\n m.step()\n self.assertEqual(S[''], 3)\n m.step()\n self.assertEqual(S[''], 6)\n\n def test_stock_with_positive_feedback(self):\n \"\"\"Classic interest stock\"\"\"\n with mn.model() as m:\n Savings = mn.stock(\n 'Savings', lambda interest: interest, ('Interest',), 1000)\n mn.variable('Rate', 0.05)\n mn.variable(\n 'Interest', lambda savings, rate: savings * rate, \n 'Savings', 'Rate')\n self.assertEqual(Savings[''], 1000)\n m.step()\n self.assertEqual(Savings[''], 1050)\n m.step()\n self.assertEqual(Savings[''], 1102.5)\n m.step()\n self.assertEqual(Savings[''], 1157.625)\n m.reset()\n self.assertEqual(Savings[''], 1000)\n m.step()\n self.assertEqual(Savings[''], 1050)\n\n def test_stock_with_positive_feedback_small_timestep(self):\n \"\"\"Classic interest stock with a smaller timestep\"\"\"\n with mn.model(timestep=0.25) as m:\n Savings = mn.stock('Savings', \n lambda interest: interest, ('Interest',), 1000)\n mn.variable('Rate', 0.05)\n mn.variable('Interest', \n lambda savings, rate: savings * rate,\n 'Savings', 'Rate')\n self.assertEqual(Savings[''], 1000)\n m.step()\n self.assertEqual(Savings[''], 1012.5)\n m.step()\n self.assertAlmostEqual(Savings[''], 1025.156, places=3)\n m.step()\n self.assertAlmostEqual(Savings[''], 1037.971, places=3)\n m.step()\n self.assertAlmostEqual(Savings[''], 1050.945, places=3)\n m.step()\n self.assertAlmostEqual(Savings[''], 1064.082, places=3)\n\n def test_stock_with_positive_feedback_and_treatments(self):\n \"\"\"Classic interest stock\"\"\"\n with mn.model(treatments=['Good', 'Better', 'Best']) as m:\n Savings = mn.stock('Savings', \n lambda interest: interest, ('Interest',), 1000)\n mn.variable('Rate', \n mn.PerTreatment({'Good': 0.04, 'Better': 0.05, 'Best': 0.06}))\n mn.variable('Interest', \n lambda savings, rate: savings * rate,\n 'Savings', 'Rate')\n self.assertEqual(Savings['Good'], 1000)\n self.assertEqual(Savings['Better'], 1000)\n self.assertEqual(Savings['Best'], 1000)\n m.step()\n self.assertEqual(Savings['Good'], 1040)\n self.assertEqual(Savings['Better'], 1050)\n self.assertEqual(Savings['Best'], 1060)\n m.step()\n self.assertEqual(Savings['Good'], 1081.6)\n self.assertEqual(Savings['Better'], 1102.5)\n self.assertEqual(Savings['Best'], 1123.6)\n m.reset()\n self.assertEqual(Savings['Good'], 1000)\n self.assertEqual(Savings['Better'], 1000)\n self.assertEqual(Savings['Best'], 1000)\n m.step()\n self.assertEqual(Savings['Good'], 1040)\n self.assertEqual(Savings['Better'], 1050)\n self.assertEqual(Savings['Best'], 1060)\n\n def test_stock_with_many_depends(self):\n \"\"\"Test stock that depends on a lot of callables\"\"\"\n with mn.model() as m:\n ABCDEFGH = mn.stock(\n 'ABCDEFGH', \n lambda a, b, c, d, e, f, g, h: a + b + c + d + e + f + g + h,\n ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'),\n 0)\n mn.variable('A', lambda md: md.TIME, '__model__')\n mn.variable('B', lambda md: md.TIME, '__model__')\n mn.variable('C', lambda md: md.TIME, '__model__')\n mn.variable('D', lambda md: md.TIME, '__model__')\n mn.variable('E', lambda md: md.TIME, '__model__')\n mn.variable('F', lambda md: md.TIME, '__model__')\n mn.variable('G', lambda md: md.TIME, '__model__')\n mn.variable('H', lambda md: md.TIME, '__model__')\n self.assertEqual(ABCDEFGH[''], 0)\n m.step()\n self.assertEqual(ABCDEFGH[''], 0)\n m.step()\n self.assertEqual(ABCDEFGH[''], 8)\n m.step()\n self.assertEqual(ABCDEFGH[''], 24)\n m.step()\n self.assertEqual(ABCDEFGH[''], 48)\n\n def test_stock_order(self):\n \"\"\"Test a stock that uses variables defined before and after\"\"\"\n with mn.model() as m:\n mn.variable('Before', lambda md: md.TIME, '__model__')\n mn.stock('UsingTimes', \n lambda before, after: before + after, ('Before', 'After'), \n 0)\n mn.variable('After', lambda md: md.TIME, '__model__')\n\n self.assertEqual(m['UsingTimes'][''], 0)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 0)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 2)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 6)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 12)\n m.step(2)\n self.assertEqual(m['UsingTimes'][''], 30)\n\n def test_eval_count(self):\n \"\"\"Test a stock that uses two variables that count # of calls\"\"\"\n before_count = 0\n after_count = 0\n\n def before():\n nonlocal before_count\n before_count += 1\n return before_count\n\n def after():\n nonlocal after_count\n after_count += 1\n return after_count\n\n with mn.model() as m:\n mn.variable('Before', before)\n mn.stock('UsingBeforeAndAfter',\n lambda b, a: b + a, ('Before', 'After'),\n 0)\n mn.variable('After', after)\n\n self.assertEqual(m['UsingBeforeAndAfter'][''], 0)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 2)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 6)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 12)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 20)\n m.step(2)\n self.assertEqual(m['UsingBeforeAndAfter'][''], 42)\n m.reset()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 0)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 16)\n\n def test_variable_using_stock(self):\n \"\"\"Test whether a variable can use an stock value\"\"\"\n with mn.model() as m:\n mn.stock('Revenue', 5, 0)\n mn.variable('Cost', 10)\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n self.assertEqual(m['Earnings'][''], -10)\n m.step()\n self.assertEqual(m['Earnings'][''], -5)\n m.step()\n self.assertEqual(m['Earnings'][''], 0)\n m.step()\n self.assertEqual(m['Earnings'][''], 5)\n m.reset()\n self.assertEqual(m['Earnings'][''], -10)\n\n def test_stock_using_stock(self):\n \"\"\"Test a stock that uses another stock\"\"\"\n with mn.model() as m:\n mn.stock('First', 1)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)\n\n def test_stock_using_stock_alt_ordering(self):\n \"\"\"Test a stock using another stock, with user defined first\"\"\"\n with mn.model() as m:\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('First', 1)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)\n\n def test_stock_init_circularity(self):\n \"\"\"Test a variable circularity involving stocks\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as me:\n with mn.model() as m:\n mn.stock('Foo', lambda b: b, ('Bar',), lambda b: b, ('Bar',))\n mn.variable('Bar', lambda f: f, 'Foo')\n self.assertEqual(me.exception.message,\n 'Circularity among variables: Foo <- Bar <- Foo')\n\n def test_stock_one_treatment_only(self):\n \"\"\"Variable that uses a stock for 1 treatment and constant 4 another\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n ValueAtRisk = mn.variable('ValueAtRisk',\n mn.PerTreatment({'As is': lambda x: x, 'To be': 0}),\n 'ValueAtRiskAsIs')\n\n mn.stock('ValueAtRiskAsIs', 1, 1)\n\n self.assertEqual(ValueAtRisk['To be'], 0)\n self.assertEqual(ValueAtRisk['As is'], 1)\n m.step()\n self.assertEqual(ValueAtRisk['To be'], 0)\n self.assertEqual(ValueAtRisk['As is'], 2)\n m.step(2)\n self.assertEqual(ValueAtRisk['To be'], 0)\n self.assertEqual(ValueAtRisk['As is'], 4)\n m.reset()\n self.assertEqual(ValueAtRisk['To be'], 0)\n self.assertEqual(ValueAtRisk['As is'], 1)\n\n def test_one_treatment_stock_both_sides(self):\n \"\"\"A stock that has both init and incr defined with treatments\"\"\",\n with mn.model(treatments=['As is', 'To be']) as m:\n Foo = mn.stock('Foo',\n mn.PerTreatment({'As is': lambda x: x, 'To be': 1}),\n ('Bar',),\n mn.PerTreatment({'As is': 0, 'To be': lambda x: x + 1}),\n ('Baz',))\n mn.variable('Bar', 2)\n mn.variable('Baz', 1)\n\n self.assertEqual(Foo['To be'], 2)\n self.assertEqual(Foo['As is'], 0)\n m.step()\n self.assertEqual(Foo['To be'], 3)\n self.assertEqual(Foo['As is'], 2)\n m.step()\n self.assertEqual(Foo['To be'], 4)\n self.assertEqual(Foo['As is'], 4)\n\n\nclass BasicAccumTest(unittest.TestCase):\n \"\"\"Test accums\"\"\"\n\n def test_simple_accum_zero_initial(self):\n \"\"\"Accum with no callables and no initial\"\"\"\n with mn.model() as m:\n A = mn.accum('A', 5)\n\n self.assertEqual(A[''], 0)\n m.step()\n self.assertEqual(A[''], 5) \n m.step()\n self.assertEqual(A[''], 10)\n m.reset()\n self.assertEqual(A[''], 0)\n m.step(3)\n self.assertEqual(A[''], 15)\n self.assertEqual(A.__doc__, '')\n\n def test_simple_accum_zero_initial_and_docstring(self):\n \"\"\"Accum with no callables and no initial, but with a docstring\"\"\"\n with mn.model() as m:\n A = mn.accum('A', \"\"\"Increase by 5 every step\"\"\", 5)\n\n self.assertEqual(A[''], 0)\n m.step()\n self.assertEqual(A[''], 5)\n self.assertEqual(A.__doc__, 'Increase by 5 every step')\n\n def test_simple_accum_with_initial(self):\n \"\"\"Accum with no callables but with an initial\"\"\"\n with mn.model() as m:\n A = mn.accum('A', 1, 22)\n \n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n m.step()\n self.assertEqual(A[''], 24)\n m.reset()\n self.assertEqual(A[''], 22)\n m.step(3)\n self.assertEqual(A[''], 25)\n\n def test_simple_accum_with_initial_and_docstring(self):\n \"\"\"Accum with no callables but with an initial\"\"\"\n with mn.model() as m:\n A = mn.accum('A', \"\"\"Start at 22 and increase by 1\"\"\", 1, 22)\n\n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n self.assertEqual(A.__doc__, 'Start at 22 and increase by 1')\n\n def test_simple_accum_with_varying_initial(self):\n \"\"\"Accum with no callables but with a treatment-varying initial\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n A = mn.accum('A', 1, mn.PerTreatment({'As is': 22, 'To be': 23}))\n\n self.assertEqual(A['As is'], 22)\n self.assertEqual(A['To be'], 23)\n m.step() \n self.assertEqual(A['As is'], 23)\n self.assertEqual(A['To be'], 24)\n m.reset() \n m.step(3)\n self.assertEqual(A['As is'], 25)\n self.assertEqual(A['To be'], 26)\n\n def test_simple_accum_zero_initial_small_timestep(self):\n \"\"\"Accum with no callables and no initial\"\"\"\n with mn.model(timestep=0.25) as m:\n A = mn.accum('A', 5)\n\n self.assertEqual(A[''], 0)\n m.step()\n self.assertEqual(A[''], 5) \n m.step()\n self.assertEqual(A[''], 10)\n m.reset()\n self.assertEqual(A[''], 0)\n m.step(3)\n self.assertEqual(A[''], 15)\n\n def test_accum_with_callable_flow(self):\n \"\"\"accum with callable flow, but depends on nothing\"\"\"\n with mn.model() as m:\n A = mn.accum('A', lambda: 1, (), 22)\n\n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n m.step()\n self.assertEqual(A[''], 24)\n m.reset()\n self.assertEqual(A[''], 22)\n m.step(3)\n self.assertEqual(A[''], 25)\n\n def test_accum_with_callable_flow_and_init(self):\n \"\"\"accum with callable flow and callable init, but depends on nothing\n \"\"\"\n with mn.model() as m:\n A = mn.accum('A', \n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda: 1, (), lambda: 22, ())\n\n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n m.step()\n self.assertEqual(A[''], 24)\n m.reset()\n self.assertEqual(A[''], 22)\n m.step(3)\n self.assertEqual(A[''], 25)\n self.assertEqual(A.__doc__, 'Start at 22 and increase by 1')\n\n def test_accum_with_simple_increment_variable(self):\n \"\"\"accum with very simple variable dependency\"\"\"\n with mn.model() as m:\n mn.variable('X', 1)\n A = mn.accum('A', lambda x: x, ('X',), 22)\n\n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n m.step()\n self.assertEqual(A[''], 24)\n m.reset()\n self.assertEqual(A[''], 22)\n m.step(3)\n self.assertEqual(A[''], 25)\n\n def test_accum_with_two_callables_with_depends(self):\n \"\"\"accum with depends vars for both flow and initial\"\"\"\n with mn.model() as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n A = mn.accum('A',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',))\n\n self.assertEqual(A[''], 22)\n m.step()\n self.assertEqual(A[''], 23)\n m.step()\n self.assertEqual(A[''], 24)\n m.reset()\n self.assertEqual(A[''], 22)\n m.step(3)\n self.assertEqual(A[''], 25)\n self.assertEqual(A.__doc__, 'Start at 22 and increase by 1')\n\n def test_accum_with_nontuple_dependency(self):\n \"\"\"Test accum with a nontuple dependency, translated to tuple.\"\"\" \n with mn.model() as m:\n mn.variable('X1', 1)\n mn.variable('Y2', 22)\n A = mn.accum('A',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, 'X1', lambda x: x, 'Y2') \n\n def test_accum_with_variable_increase(self):\n with mn.model() as m:\n mn.variable('Time', lambda md: md.TIME, '__model__')\n A = mn.accum('A', lambda s: s, ('Time',), 0)\n\n self.assertEqual(A[''], 0)\n m.step()\n self.assertEqual(A[''], 1)\n m.step()\n self.assertEqual(A[''], 3)\n m.step()\n self.assertEqual(A[''], 6)\n\n def test_accum_with_many_depends(self):\n \"\"\"Test accum that depends on a lot of callables\"\"\"\n with mn.model() as m:\n ABCDEFGH = mn.accum(\n 'ABCDEFGH', \n lambda a, b, c, d, e, f, g, h: a + b + c + d + e + f + g + h,\n ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'),\n 0)\n mn.variable('A', lambda md: md.TIME, '__model__')\n mn.variable('B', lambda md: md.TIME, '__model__')\n mn.variable('C', lambda md: md.TIME, '__model__')\n mn.variable('D', lambda md: md.TIME, '__model__')\n mn.variable('E', lambda md: md.TIME, '__model__')\n mn.variable('F', lambda md: md.TIME, '__model__')\n mn.variable('G', lambda md: md.TIME, '__model__')\n mn.variable('H', lambda md: md.TIME, '__model__')\n\n self.assertEqual(ABCDEFGH[''], 0)\n m.step()\n self.assertEqual(ABCDEFGH[''], 8)\n m.step()\n self.assertEqual(ABCDEFGH[''], 24)\n m.step()\n self.assertEqual(ABCDEFGH[''], 48)\n\n def test_accum_order(self):\n \"\"\"Test a accum that uses variables defined before and after\"\"\"\n with mn.model() as m:\n mn.variable('Before', lambda md: md.TIME, '__model__')\n mn.accum('UsingTimes', \n lambda before, after: before + after, ('Before', 'After'), \n 0)\n mn.variable('After', lambda md: md.TIME, '__model__')\n\n self.assertEqual(m['UsingTimes'][''], 0)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 2)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 6)\n m.step()\n self.assertEqual(m['UsingTimes'][''], 12)\n m.step(2)\n self.assertEqual(m['UsingTimes'][''], 30)\n\n def test_eval_count(self):\n \"\"\"Test a accum that uses two variables that count # of calls\"\"\"\n before_count = 0\n after_count = 0\n\n def before():\n nonlocal before_count\n before_count += 1\n return before_count\n\n def after():\n nonlocal after_count\n after_count += 1\n return after_count\n\n with mn.model() as m:\n mn.variable('Before', before)\n mn.accum('UsingBeforeAndAfter',\n lambda b, a: b + a, ('Before', 'After'),\n 0)\n mn.variable('After', after)\n\n self.assertEqual(m['UsingBeforeAndAfter'][''], 0)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 4)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 10)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 18)\n m.step(2)\n self.assertEqual(m['UsingBeforeAndAfter'][''], 40)\n m.reset()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 0)\n m.step()\n self.assertEqual(m['UsingBeforeAndAfter'][''], 16)\n\n def test_variable_using_accum(self):\n \"\"\"Test whether a variable can use an accum value\"\"\"\n with mn.model() as m:\n mn.accum('Revenue', 5, 0)\n mn.variable('Cost', 10)\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n self.assertEqual(m['Earnings'][''], -10)\n self.assertEqual(m['Revenue'][''], 0)\n m.step()\n self.assertEqual(m['Earnings'][''], -5)\n self.assertEqual(m['Revenue'][''], 5)\n m.step()\n self.assertEqual(m['Earnings'][''], 0)\n m.step()\n self.assertEqual(m['Earnings'][''], 5)\n m.step()\n self.assertEqual(m['Earnings'][''], 10)\n m.reset()\n self.assertEqual(m['Earnings'][''], -10)\n\n def test_accum_using_accum(self):\n \"\"\"Test an accume that uses a variable that is another accum\"\"\"\n with mn.model() as m:\n mn.accum('First', 1)\n mn.accum('Second', lambda f: f, ('First',), 0)\n mn.accum('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 2)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 7)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 16)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 30)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 15)\n self.assertEqual(m['Third'][''], 50)\n m.step()\n self.assertEqual(m['First'][''], 6)\n self.assertEqual(m['Second'][''], 21)\n self.assertEqual(m['Third'][''], 77)\n\n def test_accum_using_accum_alt_ordering(self):\n \"\"\"Test accum that uses a previously defined accum\"\"\"\n with mn.model() as m:\n mn.accum('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n mn.accum('Second', lambda f: f, ('First',), 0)\n mn.accum('First', 1)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 2)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 7)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 16)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 30)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 15)\n self.assertEqual(m['Third'][''], 50)\n m.step()\n self.assertEqual(m['First'][''], 6)\n self.assertEqual(m['Second'][''], 21)\n self.assertEqual(m['Third'][''], 77)\n\n def test_accum_with_circularity(self):\n \"\"\"Accum does not support the kind of circularity that stock does\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as cm:\n with mn.model() as m:\n mn.accum('Savings', \n lambda interest: interest, ('Interest',), 1000)\n mn.variable('Rate', 0.05)\n mn.variable('Interest', lambda savings, rate: savings * rate,\n 'Savings', 'Rate')\n\n self.assertEqual(cm.exception.message,\n 'Circularity among variables: Savings <- Interest <- Savings')\n \n def test_accum_one_treatment_both_sides(self):\n \"\"\"An accum that has both init and incr defined with treatments\"\"\",\n with mn.model(treatments=['As is', 'To be']) as m:\n Foo = mn.accum('Foo',\n mn.PerTreatment({'As is': lambda x: x, 'To be': 1}),\n ('Bar',),\n mn.PerTreatment({'As is': 0, 'To be': lambda x: x + 1}),\n ('Baz',))\n mn.variable('Bar', 2)\n mn.variable('Baz', 1)\n\n self.assertEqual(Foo['To be'], 2)\n self.assertEqual(Foo['As is'], 0)\n m.step()\n self.assertEqual(Foo['To be'], 3)\n self.assertEqual(Foo['As is'], 2)\n m.step()\n self.assertEqual(Foo['To be'], 4)\n self.assertEqual(Foo['As is'], 4)\n\nclass StandardSystemDynamicsTest(unittest.TestCase):\n \"\"\"Test a few basic SD models\"\"\"\n\n def test_population(self):\n \"\"\"Test basic population growth model\"\"\"\n with mn.model() as m:\n Population = mn.stock('Population', \n lambda births: births, ('Births',),\n 10000)\n Births = mn.variable('Births', \n lambda pop, rate: pop * rate, \n 'Population', 'BirthRate')\n mn.variable('BirthRate', 0.1)\n\n self.assertEqual(Population[''], 10000)\n self.assertEqual(Births[''], 1000)\n m.step()\n self.assertEqual(Births[''], 1100)\n self.assertEqual(Population[''], 11000)\n m.step(2)\n self.assertEqual(Births[''], 1331)\n self.assertEqual(Population[''], 13310)\n m.reset()\n self.assertEqual(Population[''], 10000)\n self.assertEqual(Births[''], 1000)\n m.step()\n self.assertEqual(Births[''], 1100)\n self.assertEqual(Population[''], 11000)\n\n def test_mice(self):\n \"\"\"Test standard birth and death model\"\"\"\n with mn.model() as m:\n MicePopulation = mn.stock('MicePopulation',\n lambda births, deaths: births - deaths, \n ('MiceBirths', 'MiceDeaths'),\n 10000)\n MiceBirths = mn.variable('MiceBirths', \n lambda pop, rate: pop * rate, 'MicePopulation', 'MiceBirthRate')\n mn.variable('MiceBirthRate', 0.1)\n MiceDeaths = mn.variable('MiceDeaths', \n lambda pop, rate: pop * rate, 'MicePopulation', 'MiceDeathRate')\n mn.variable('MiceDeathRate', 0.05)\n \n self.assertEqual(MicePopulation[''], 10000)\n self.assertEqual(MiceBirths[''], 1000)\n self.assertEqual(MiceDeaths[''], 500)\n m.step()\n self.assertEqual(MicePopulation[''], 10500)\n m.step()\n self.assertEqual(MicePopulation[''], 11025)\n\n# a bit less verbose\ndef assert_array_equal(array1, array2):\n np.testing.assert_array_equal(array1, array2)\n\ndef assert_array_almost_equal(array1, array2):\n np.testing.assert_allclose(array1, array2)\n\n\nclass OneDimensionalArrayTest(unittest.TestCase):\n \"\"\"Test one dimensional numpy arrays\"\"\"\n def test_array_access(self):\n \"\"\"Test whether a variable can take an array value\"\"\"\n with mn.model() as m:\n revenue = mn.variable('Revenue', np.array([30.1, 15, 20]))\n\n self.assertEqual(revenue[''][0], 30.1)\n self.assertEqual(revenue[''][1], 15)\n self.assertEqual(revenue[''][2], 20)\n\n def test_expression(self):\n \"\"\"Test whether an array supports simple expressions\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', np.array([30.1, 15, 20]))\n mn.variable('Cost', np.array([10, 10, 10]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Earnings'][''], np.array([20.1, 5, 10]))\n\n def test_mixed_array_and_scalar(self):\n \"\"\"Test whether an array and a scalar can be combined w/o trouble\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', np.array([30.1, 15, 20]))\n mn.variable('Cost', 10)\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Earnings'][''], np.array([20.1, 5, 10]))\n\n def test_simple_stock(self):\n \"\"\"Test whether an array supports simple stocks\"\"\"\n with mn.model() as m:\n mn.stock('Revenue', np.array([5, 5, 10]), np.array([0, 0, 0]))\n mn.variable('Cost', np.array([10, 10, 10]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([5, 5, 10]))\n assert_array_equal(m['Earnings'][''], np.array([-5, -5, 0]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([10, 10, 20]))\n assert_array_equal(m['Earnings'][''], np.array([0, 0, 10]))\n m.reset()\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n\n def test_simple_stock_small_timestep(self):\n \"\"\"Test whether an array supports simple stocks\"\"\"\n with mn.model(timestep=0.25) as m:\n mn.stock('Revenue', np.array([5, 5, 10]), np.array([0, 0, 0]))\n mn.variable('Cost', np.array([10, 10, 10]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([1.25, 1.25, 2.5]))\n assert_array_equal(m['Earnings'][''], np.array([-8.75, -8.75, -7.5]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([2.5, 2.5, 5]))\n assert_array_equal(m['Earnings'][''], np.array([-7.5, -7.5, -5]))\n m.reset()\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n\n def test_simple_stock_with_treatments(self):\n \"\"\"Test whether an array supports simple stocks, with treatments\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m: \n mn.stock('Revenue', np.array([5, 5, 10]), np.array([0, 0, 0]))\n mn.variable('Cost', \n mn.PerTreatment({'As is': np.array([10, 10, 10]), \n 'To be': np.array([9, 8, 6])}))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Revenue']['As is'], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings']['As is'], np.array([-10, -10, -10]))\n assert_array_equal(m['Earnings']['To be'], np.array([-9, -8, -6]))\n m.step()\n assert_array_equal(m['Earnings']['As is'], np.array([-5, -5, 0]))\n assert_array_equal(m['Earnings']['To be'], np.array([-4, -3, 4]))\n m.step()\n assert_array_equal(m['Earnings']['As is'], np.array([0, 0, 10]))\n assert_array_equal(m['Earnings']['To be'], np.array([1, 2, 14]))\n m.reset() \n assert_array_equal(m['Earnings']['As is'], np.array([-10, -10, -10]))\n assert_array_equal(m['Earnings']['To be'], np.array([-9, -8, -6]))\n\n def test_simple_accum(self):\n \"\"\"Test whether an array supports simple accums\"\"\"\n with mn.model() as m:\n mn.accum('Revenue', np.array([5, 5, 10]), np.array([0, 0, 0]))\n mn.variable('Cost', np.array([10, 10, 10]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([5, 5, 10]))\n assert_array_equal(m['Earnings'][''], np.array([-5, -5, 0]))\n m.step()\n assert_array_equal(m['Revenue'][''], np.array([10, 10, 20]))\n assert_array_equal(m['Earnings'][''], np.array([0, 0, 10]))\n m.reset()\n assert_array_equal(m['Revenue'][''], np.array([0, 0, 0]))\n assert_array_equal(m['Earnings'][''], np.array([-10, -10, -10]))\n\n def test_array_sum(self):\n \"\"\"Can I sum over an array?\"\"\"\n with mn.model() as m:\n mn.variable('Cost', np.array([10, 10, 5]))\n mn.variable('TotalCost', np.sum, 'Cost')\n\n self.assertEqual(m['TotalCost'][''], 25)\n\n\nclass TwoDimensionalArrayTest(unittest.TestCase):\n \"\"\"Test 2D numpy arrays\"\"\"\n def test_array_access(self):\n \"\"\"Test whether a variable can take a 2D array value\"\"\"\n with mn.model() as m:\n Revenue = mn.variable('Revenue', np.array([[30.1, 15, 20], [1, 2, 0]]))\n\n assert_array_equal(Revenue[''], np.array([[30.1, 15, 20], [1, 2, 0]]))\n\n def test_mixed_array_and_scalar(self):\n \"\"\"Test whether a lambda variable can take 2D arrays\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', np.array([[30.1, 15, 20], [1, 2, 0]]))\n mn.variable('Cost', 10)\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n\n assert_array_equal(\n m['Earnings'][''], np.array([[20.1, 5, 10], [-9, -8, -10]]))\n\n def test_simple_stock(self):\n \"\"\"Test whether a 2D array supports simple stocks\"\"\"\n with mn.model() as m:\n mn.stock('Revenue', np.array([[5, 5], [10, 15]]), np.zeros((2, 2)))\n mn.variable('Cost', np.array([[10, 10], [0, 9]]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n mn.stock('AccumulatedEarnings', \n lambda r: r, ('Revenue',), \n np.zeros((2, 2)))\n\n revenue = m['Revenue']\n earnings = m['Earnings']\n\n assert_array_equal(revenue[''], np.array([[0, 0], [0, 0]]))\n assert_array_equal(earnings[''], np.array([[-10, -10], [0, -9]]))\n m.step()\n assert_array_equal(revenue[''], np.array([[5, 5], [10, 15]]))\n assert_array_equal(earnings[''], np.array([[-5, -5], [10, 6]]))\n m.step()\n assert_array_equal(revenue[''], np.array([[10, 10], [20, 30]]))\n assert_array_equal(earnings[''], np.array([[0, 0], [20, 21]]))\n m.reset()\n assert_array_equal(revenue[''], np.array([[0, 0], [0, 0]]))\n assert_array_equal(earnings[''], np.array([[-10, -10], [0, -9]]))\n\n def test_simple_stock_short_timestep(self):\n \"\"\"Test whether a 2D array supports simple stocks\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Revenue', np.array([[5, 5], [10, 15]]), np.zeros((2, 2)))\n mn.variable('Cost', np.array([[10, 10], [0, 9]]))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n mn.stock('AccumulatedEarnings', \n lambda r: r, ('Revenue',), \n np.zeros((2, 2)))\n\n revenue = m['Revenue']\n earnings = m['Earnings']\n\n assert_array_equal(revenue[''], np.array([[0, 0], [0, 0]]))\n assert_array_equal(earnings[''], np.array([[-10, -10], [0, -9]]))\n m.step(2)\n assert_array_equal(revenue[''], np.array([[5, 5], [10, 15]]))\n assert_array_equal(earnings[''], np.array([[-5, -5], [10, 6]]))\n m.step(2)\n assert_array_equal(revenue[''], np.array([[10, 10], [20, 30]]))\n assert_array_equal(earnings[''], np.array([[0, 0], [20, 21]]))\n m.reset()\n assert_array_equal(revenue[''], np.array([[0, 0], [0, 0]]))\n assert_array_equal(earnings[''], np.array([[-10, -10], [0, -9]]))\n\n def test_array_sum(self):\n \"\"\"Test sum over 2D array\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', np.array([[30.1, 15, 20], [1, 2, 0]]))\n mn.variable('Cost', 10)\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n mn.variable('TotalEarnings', np.sum, 'Earnings')\n mn.variable('TotalEarningsByCostCenter', \n lambda e: np.sum(e, axis=0), 'Earnings')\n\n self.assertAlmostEqual(m['TotalEarnings'][''], 8.1)\n assert_array_almost_equal(\n m['TotalEarningsByCostCenter'][''], [11.1, -3, 0])\n\n\nclass FunctionTest(unittest.TestCase):\n \"\"\"Test use of free-standing function\"\"\"\n def test_function_with_constant(self):\n \"\"\"Test a variable initialized with a function call\"\"\"\n # Is this even a useful test??\n def is_special(facility, situation, criterion):\n return (facility == 1) and (situation == 0) and (criterion == 2)\n\n def create_attractiveness():\n attr = np.empty((3, 3, 3))\n for index in np.ndindex(*(attr.shape)):\n if is_special(*index):\n attr[index] = index[0] * 100 + index[1] * 10 + index[2]\n else:\n attr[index] = index[0] * 10 + index[1] \n return attr\n\n with mn.model() as m:\n mn.variable('Attractiveness', create_attractiveness())\n\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 102)\n\n def test_function_with_variable(self):\n \"\"\"Test a variable updated with a function as a callable\"\"\"\n def is_special(facility, situation, criterion):\n return (facility == 1) and (situation == 0) and (criterion == 2)\n\n def attractiveness(md):\n attr = np.empty((3, 3, 3))\n for index in np.ndindex(*(attr.shape)):\n if is_special(*index):\n attr[index] = md.TIME\n else:\n attr[index] = index[0] * 10 + index[1] \n return attr\n\n with mn.model() as m:\n mn.variable('Attractiveness', attractiveness, '__model__')\n\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 0)\n m.step()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 1)\n m.step()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 2)\n m.step(10)\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 12)\n m.reset()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 0)\n m.step()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0)\n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 22)\n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 20)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 1)\n\n def test_function_with_stock(self):\n \"\"\"Test a stock initialized and updated with functions\"\"\"\n def is_special(facility, situation, criterion):\n return (facility == 1) and (situation == 0) and (criterion == 2)\n\n def create_attractiveness():\n return np.zeros((3, 3, 3))\n\n def update_attractiveness():\n update = np.zeros((3, 3, 3))\n for index in np.ndindex(*(update.shape)):\n if is_special(*index):\n update[index] = 1\n return update \n\n with mn.model() as m:\n mn.stock('Attractiveness', \n update_attractiveness, (), create_attractiveness, ())\n\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0) \n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 0)\n m.step() \n self.assertEqual(m['Attractiveness'][''][2, 2, 2], 0) \n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 1)\n m.step() \n self.assertEqual(m['Attractiveness'][''][2, 0, 1], 0)\n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 2)\n m.step(10)\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0) \n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 12)\n m.reset()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0) \n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 0)\n m.step()\n self.assertEqual(m['Attractiveness'][''][0, 0, 0], 0) \n self.assertEqual(m['Attractiveness'][''][1, 0, 2], 1)\n\n\nclass NamedTuplesFundasTest(unittest.TestCase):\n \"\"\"Test functionality for named tuples\"\"\"\n def setUp(self):\n self.OneType = mn.mn_namedtuple('OneType', ['Foo', 'Bar', 'Baz'])\n self.AnotherType = mn.mn_namedtuple('AnotherType', ['Foo', 'Bar'])\n\n def test_add(self):\n \"\"\"Test addition for Minnetonka named tuples\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) + self.OneType(4, 5, 0), \n self.OneType(5, 7, 3))\n\n def test_add_scalar(self):\n \"\"\"Test addition for MN named tuple and a scalar\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) + 2, self.OneType(3, 4, 5))\n self.assertEqual(2 + self.OneType(1, 2, 0), self.OneType(3, 4, 2))\n\n def test_add_failure(self):\n \"\"\"Test addition failure for Minnetonka named tuples\"\"\"\n with self.assertRaises(TypeError):\n self.OneType(1, 2, 3) + self.AnotherType(3, 4)\n\n def test_subtract(self):\n \"\"\"Test subtraction for Minnetonka named tuples\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) - self.OneType(4, 5, 0), \n self.OneType(-3, -3, 3))\n\n def test_subtract_scalar(self):\n \"\"\"Test subtraction of scalar value from MN named tuple\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) - 2, self.OneType(-1, 0, 1))\n self.assertEqual(5 - self.OneType(1, 2, 0), self.OneType(4, 3, 5))\n\n def test_subtract_failure (self):\n \"\"\"Test subtraction failure for Minnetonka named tuples\"\"\"\n with self.assertRaises(TypeError):\n self.OneType(1, 2, 3) - self.AnotherType(3, 4)\n\n def test_multiply(self):\n \"\"\"Test multiplication of two mn named tuples together\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) * self.OneType(4, 5, 6),\n self.OneType(4, 10, 18))\n\n def test_multiply_scalar(self):\n \"\"\"Test multiplication of named tuple by scalar\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) * 2, self.OneType(2, 4, 6))\n\n def test_multiply_failure(self):\n \"\"\"Test multiplication failure for Minnetonka named tuples\"\"\"\n with self.assertRaises(TypeError):\n self.OneType(1, 2, 3) * self.AnotherType(5, 6)\n\n def test_divide(self):\n \"\"\"Test divide of two mn named tuples together\"\"\"\n self.assertEqual(self.OneType(4, 5, 6) / self.OneType(1, 2, 3),\n self.OneType(4, 2.5, 2))\n\n def test_divide_scalar(self):\n \"\"\"Test divide of named tuple by scalar\"\"\"\n self.assertEqual(self.OneType(1, 2, 3) / 2, self.OneType(0.5, 1, 1.5))\n\n def test_divide_failure(self):\n \"\"\"Test divide failure for Minnetonka named tuples\"\"\"\n with self.assertRaises(TypeError):\n self.OneType(1, 2, 3) / self.AnotherType(5, 6)\n\n def test_round(self):\n \"\"\"Test rounding a mn named tuple.\"\"\"\n self.assertEqual(\n round(self.OneType(3.2, 2, 14.65)), \n self.OneType(3, 2, 15))\n self.assertEqual(\n round(self.OneType(3.2, 2, 14.65), 1), \n self.OneType(3.2, 2, 14.7))\n\n def test_le(self):\n \"\"\"Test <= on two named tuples.\"\"\"\n self.assertTrue(self.OneType(1.4, 2, 14.65) <= self.OneType(1.4, 4, 15))\n self.assertFalse(self.OneType(1.4, 2, 14.65) <= self.OneType(1, 4, 15))\n\n def test_le_scalar(self):\n \"\"\"Test <= on two named tuples.\"\"\"\n self.assertTrue(self.OneType(1.4, 2, 14.65) <= 20)\n self.assertFalse(self.OneType(1.4, 2, 14.65) <= 14.0)\n self.assertTrue(1.4 <= self.OneType(1.4, 2, 14.65))\n self.assertFalse(2.0 <= self.OneType(1.4, 2, 14.65))\n\n def test_le_failure(self):\n \"\"\"Test <= on two incomparable named tuples.\"\"\"\n with self.assertRaises(TypeError):\n self.OneType(1, 2, 3) <= self.AnotherType(5, 6),\n\n def test_equal(self):\n \"\"\"Test whether two equivalent namedtuples are judged equal\"\"\"\n self.assertEqual(self.OneType(0, 10, -10), self.OneType(0, 10, -10))\n\n def test_not_equal(self):\n \"\"\"Test whether two uneqal namedtuples are judged unequal\"\"\"\n self.assertNotEqual(self.OneType(0, 10, -10), self.OneType(0, 10, -9))\n\n def test_create(self):\n \"\"\"Test whether the new method _create works\"\"\"\n self.assertEqual(self.OneType._create(1), self.OneType(1, 1, 1))\n self.assertEqual(self.AnotherType._create(0), self.AnotherType(0, 0))\n\n\nclass UseOfNamedTupleTest(unittest.TestCase):\n \"\"\"Test Minnetonka functionality for named tuples, instead of scalars\"\"\"\n def setUp(self):\n self.Payer = mn.mn_namedtuple(\n 'Payer', ['Medicare', 'Medicaid', 'Commercial'])\n\n def test_constant(self):\n \"\"\"Test whether a constant can be a named tuple\"\"\"\n with mn.model() as m:\n Revenue = mn.variable('Revenue', self.Payer(30, 15, 20))\n\n self.assertEqual(Revenue[''].Medicare, 30)\n self.assertEqual(Revenue[''].Medicaid, 15)\n self.assertEqual(Revenue[''].Commercial, 20)\n\n def test_expression(self):\n \"\"\"Test whether a variable with a callable can be a named tuple\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', self.Payer(30, 15, 20))\n mn.variable('Cost', self.Payer(10, 10, 10))\n Earnings = mn.variable('Earnings', \n lambda r, c: r - c, 'Revenue', 'Cost')\n\n self.assertEqual(Earnings[''], self.Payer(20, 5, 10))\n\n def test_sum(self):\n \"\"\"Does a sum over a named tuple work?\"\"\"\n with mn.model() as m:\n mn.variable('Revenue', self.Payer(30, 15, 20))\n TotalRevenue = mn.variable('TotalRevenue', sum, 'Revenue')\n self.assertEqual(TotalRevenue[''], 65)\n\n\n def test_simple_stock(self):\n \"\"\"Test whether a simple stock can be a named tuple\"\"\"\n with mn.model() as m:\n Revenue = mn.stock('Revenue', \n self.Payer(5, 5, 10), self.Payer(0, 0, 0))\n mn.variable('Cost', self.Payer(10, 10, 10))\n Earnings = mn.variable('Earnings', \n lambda r, c: r - c, 'Revenue', 'Cost')\n\n self.assertEqual(Revenue[''], self.Payer(0, 0, 0))\n self.assertEqual(Earnings[''], self.Payer(-10, -10, -10))\n m.step()\n self.assertEqual(Revenue[''], self.Payer(5, 5, 10))\n self.assertEqual(Earnings[''], self.Payer(-5, -5, 0))\n m.step()\n self.assertEqual(Revenue[''], self.Payer(10, 10, 20))\n self.assertEqual(Earnings[''], self.Payer(0, 0, 10))\n m.reset()\n self.assertEqual(Revenue[''], self.Payer(0, 0, 0))\n self.assertEqual(Earnings[''], self.Payer(-10, -10, -10))\n\n def test_simple_stock_short_timestep(self):\n \"\"\"Test whether a simple stock can be a named tuple; non-1 timestep\"\"\"\n with mn.model(timestep=0.5) as m:\n Revenue = mn.stock('Revenue', \n self.Payer(5, 5, 10), self.Payer(0, 0, 0))\n mn.variable('Cost', self.Payer(10, 10, 10))\n Earnings = mn.variable('Earnings', \n lambda r, c: r - c, 'Revenue', 'Cost')\n\n self.assertEqual(Revenue[''], self.Payer(0, 0, 0))\n self.assertEqual(Earnings[''], self.Payer(-10, -10, -10))\n m.step(2)\n self.assertEqual(Revenue[''], self.Payer(5, 5, 10))\n self.assertEqual(Earnings[''], self.Payer(-5, -5, 0))\n m.step(2)\n self.assertEqual(Revenue[''], self.Payer(10, 10, 20))\n self.assertEqual(Earnings[''], self.Payer(0, 0, 10))\n m.reset()\n self.assertEqual(Revenue[''], self.Payer(0, 0, 0))\n self.assertEqual(Earnings[''], self.Payer(-10, -10, -10))\n\n def test_stock_with_callables(self):\n \"\"\"Test whether a stock with callables can use named tuples\"\"\"\n with mn.model() as m:\n mn.stock('Revenue', self.Payer(5, 5, 10), self.Payer(0, 0, 0))\n mn.variable('Cost', self.Payer(10, 10, 10))\n mn.variable('Earnings', lambda r, c: r - c, 'Revenue', 'Cost')\n AccumulatedEarnings = mn.stock('AccumulatedEarnings',\n lambda e: e, ('Earnings',),\n self.Payer(0, 0, 0))\n\n self.assertEqual(AccumulatedEarnings[''], self.Payer(0, 0, 0))\n m.step()\n self.assertEqual(AccumulatedEarnings[''], self.Payer(-10, -10, -10))\n m.step()\n self.assertEqual(AccumulatedEarnings[''], self.Payer(-15, -15, -10))\n m.step()\n self.assertEqual(AccumulatedEarnings[''], self.Payer(-15, -15, 0))\n m.reset()\n self.assertEqual(AccumulatedEarnings[''], self.Payer(0, 0, 0))\n\n def test_namedtuple_per_treatment(self):\n \"\"\"Test whether a treatment can accept a namedtuple\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n BaseRevenue = mn.variable('BaseRevenue',\n mn.PerTreatment({'As is': self.Payer(12, 13, 14),\n 'To be': self.Payer(2, 4, 6)}))\n TotalRevenue = mn.variable('TotalRevenue',\n lambda br: br + 2,\n 'BaseRevenue')\n\n self.assertEqual(BaseRevenue['As is'], self.Payer(12, 13, 14))\n self.assertEqual(BaseRevenue['To be'], self.Payer(2, 4, 6))\n self.assertEqual(TotalRevenue['As is'], self.Payer(14, 15, 16))\n self.assertEqual(TotalRevenue['To be'], self.Payer(4, 6, 8))\n\nclass TwoSimulations(unittest.TestCase):\n \"\"\"Very simple situations of having two simulations\"\"\"\n def test_creating_two_constants_with_same_name(self):\n \"\"\"Test creating two constants within two different simulations\"\"\"\n work_model = mn.model([mn.variable('HoursPerDay', 8)])\n calendar_model = mn.model([mn.variable('HoursPerDay', 24)])\n self.assertEqual(work_model['HoursPerDay'][''], 8)\n self.assertEqual(calendar_model['HoursPerDay'][''], 24)\n\n def test_two_variables_with_same_name(self):\n \"\"\"Test whether two variables with the same name will udpate amouns\"\"\"\n with mn.model() as work_model:\n mn.variable('HoursPerDay', 8)\n mn.variable('HoursPerWeek', lambda hpd: hpd*5, 'HoursPerDay',)\n\n with mn.model() as calendar_model:\n mn.variable('HoursPerDay', 24)\n mn.stock('AccumulatedHours', lambda hpd: hpd, ('HoursPerDay',), 0)\n\n self.assertEqual(work_model['HoursPerWeek'][''], 40)\n self.assertEqual(calendar_model['AccumulatedHours'][''], 0)\n work_model.step(); calendar_model.step()\n self.assertEqual(work_model['HoursPerWeek'][''], 40)\n self.assertEqual(calendar_model['AccumulatedHours'][''], 24)\n work_model.step(); calendar_model.step()\n self.assertEqual(work_model['HoursPerWeek'][''], 40)\n self.assertEqual(calendar_model['AccumulatedHours'][''], 48)\n work_model.reset(); calendar_model.reset()\n self.assertEqual(work_model['HoursPerWeek'][''], 40)\n self.assertEqual(calendar_model['AccumulatedHours'][''], 0)\n\n def test_two_models_different_timing(self):\n \"\"\"Test whether two models work with different timing of steps\"\"\"\n with mn.model() as calendar_model_1:\n mn.variable('HoursPerDay', 24)\n mn.stock('AccumulatedHours', lambda hpd: hpd, ('HoursPerDay',), 0)\n\n with mn.model() as calendar_model_2: \n mn.variable('HoursPerDay', 24)\n mn.stock('AccumulatedHours', lambda hpd: hpd, ('HoursPerDay',), 0)\n\n self.assertEqual(calendar_model_1['AccumulatedHours'][''], 0)\n self.assertEqual(calendar_model_2['AccumulatedHours'][''], 0)\n calendar_model_1.step()\n self.assertEqual(calendar_model_1['AccumulatedHours'][''], 24)\n self.assertEqual(calendar_model_2['AccumulatedHours'][''], 0)\n calendar_model_1.step(); calendar_model_2.step()\n self.assertEqual(calendar_model_1['AccumulatedHours'][''], 48)\n self.assertEqual(calendar_model_2['AccumulatedHours'][''], 24)\n calendar_model_2.step(3)\n self.assertEqual(calendar_model_1['AccumulatedHours'][''], 48)\n self.assertEqual(calendar_model_2['AccumulatedHours'][''], 96)\n calendar_model_1.reset()\n self.assertEqual(calendar_model_1['AccumulatedHours'][''], 0)\n self.assertEqual(calendar_model_2['AccumulatedHours'][''], 96)\n\n\nclass UserSetAmount(unittest.TestCase):\n \"\"\"For testing whether users can set the amount of variables\"\"\"\n\n def test_update_amount(self):\n \"\"\"Can a constant take a new value?\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 12, 'To be': 2}))\n\n self.assertEqual(DischargeBegins['As is'], 12)\n DischargeBegins['As is'] = 11\n self.assertEqual(DischargeBegins['As is'], 11)\n self.assertEqual(DischargeBegins['To be'], 2)\n m.reset(reset_external_vars=False)\n self.assertEqual(DischargeBegins['As is'], 11)\n self.assertEqual(DischargeBegins['To be'], 2)\n m.reset()\n self.assertEqual(DischargeBegins['As is'], 12)\n self.assertEqual(DischargeBegins['To be'], 2)\n\n def test_update_amount_no_arg(self):\n \"\"\"Can a no arg variable take a new value?\"\"\"\n with mn.model() as m:\n DischargeProgress = mn.variable('DischargeProgress', lambda: 0.5)\n\n self.assertEqual(DischargeProgress[''], 0.5)\n DischargeProgress[''] = 0.75\n self.assertEqual(DischargeProgress[''], 0.75)\n m.step()\n self.assertEqual(DischargeProgress[''], 0.75)\n m.reset(reset_external_vars=False)\n self.assertEqual(DischargeProgress[''], 0.75)\n m.reset()\n self.assertEqual(DischargeProgress[''], 0.5)\n\n def test_update_amount_depends(self):\n \"\"\"Can a variable take a new value when it has dependencies?\"\"\"\n with mn.model() as m:\n Foo = mn.variable('Foo', 9)\n Bar = mn.variable('Bar', lambda f: f, 'Foo')\n\n self.assertEqual(Bar[''], 9)\n Foo[''] = 2.4\n m.recalculate()\n self.assertEqual(Bar[''], 2.4)\n m.reset(reset_external_vars=False)\n self.assertEqual(Bar[''], 2.4)\n Bar[''] = 8\n m.recalculate()\n self.assertEqual(Bar[''], 8)\n m.reset()\n self.assertEqual(Bar[''], 9)\n\n def test_update_amount_depends_constants(self):\n \"\"\"Can a constant with constant dependencies take a new value?\"\"\"\n with mn.model() as m:\n Foo = mn.constant('Foo', 9)\n Bar = mn.constant('Bar', lambda f: f, 'Foo')\n\n self.assertEqual(Bar[''], 9)\n Foo[''] = 2.4\n m.recalculate()\n self.assertEqual(Bar[''], 2.4)\n m.reset(reset_external_vars=False)\n self.assertEqual(Bar[''], 2.4)\n m.reset()\n self.assertEqual(Bar[''], 9)\n\n def test_update_depends_stock(self):\n \"\"\"Can a stock with constant dependencies take a new value?\"\"\"\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Bar'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)\n\n def test_update_depends_stock_chain(self):\n \"\"\"Can a stock with change of constant dependencies take a new value?\"\"\"\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', lambda x: x, 'Baz')\n Baz = mn.constant('Baz', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Baz'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)\n\n def test_stock_with_user_setting_amount(self):\n \"\"\"Test stock with user setting amount\"\"\"\n with mn.model() as m:\n Foo = mn.stock('Foo', 1, 0)\n\n m.step()\n self.assertEqual(Foo[''], 1)\n Foo[''] = 10\n self.assertEqual(Foo[''], 10)\n m.step()\n self.assertEqual(Foo[''], 11)\n m.reset()\n m.step()\n self.assertEqual(Foo[''], 1)\n Foo[''] = 7\n m.reset(reset_external_vars=False)\n self.assertEqual(Foo[''], 0)\n\n def test_user_setting_constant_multiple_treatments(self):\n \"\"\"Can a user set the amount of a constant for multiple treatments?\"\"\"\n with mn.model(treatments={'As is', 'To be'}) as m:\n DischargeBegins = mn.variable('DischargeBegins', \n mn.PerTreatment({'As is': 10, 'To be': 8, 'Might be': 6}))\n DischargeEnds = mn.variable('DischargeEnds', 14)\n DischargeDuration = mn.variable('DischargeDuration', \n lambda e, b: e - b, 'DischargeEnds', 'DischargeBegins')\n\n self.assertEqual(DischargeBegins['As is'], 10)\n self.assertEqual(DischargeDuration['To be'], 6)\n DischargeBegins['__all__'] = 9\n m.recalculate()\n self.assertEqual(DischargeBegins['As is'], 9)\n self.assertEqual(DischargeDuration['To be'], 5)\n\nclass ForeachDict(unittest.TestCase):\n \"\"\"For testing the foreach construct with dictionaries\"\"\"\n def test_simple(self):\n \"\"\"Does the simplest possible foreach work?\"\"\"\n with mn.model():\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n Quz = mn.variable('Quz', mn.foreach(lambda f: f + 1), 'Baz')\n self.assertEqual(Quz[''], {'foo': 13, 'bar': 14})\n\n def test_two_arg_foreach(self):\n \"\"\"Does a two arg callable to a foreach work?\"\"\"\n with mn.model():\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Corge', {'foo': 0, 'bar': 99})\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], {'foo': 12, 'bar': 112})\n\n def test_foreach_with_mismatch(self):\n \"\"\"Does a two arg foreach with mismatched dicts error correctly?\"\"\"\n with self.assertRaisesRegex(mn.MinnetonkaError, \n 'Foreach encountered mismatched dicts'):\n with mn.model():\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Corge', {'foo': 0, 'wtf': 99})\n Quz = mn.variable('Quz', \n mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n\n def test_big_dict_foreach(self):\n \"\"\"Does foreach work with a 1000 element dict?\"\"\"\n with mn.model():\n mn.variable('Biggus', {'ind{:03}'.format(n): n for n in range(1000)})\n Dickus = mn.variable('Dickus', mn.foreach(lambda x: x*2), 'Biggus')\n self.assertEqual(Dickus['']['ind002'], 4)\n self.assertEqual(Dickus['']['ind999'], 1998)\n\n def test_foreach_nondict_error(self):\n \"\"\"Does foreach raise error when first variable is not a dict?\"\"\"\n with self.assertRaisesRegex(mn.MinnetonkaError,\n 'First arg of foreach 23 must be dictionary or tuple'):\n with mn.model():\n mn.variable('Baz', 23)\n Quz = mn.variable('Quz', mn.foreach(lambda f: f + 1), 'Baz')\n\n def test_foreach_nondict_sunny_day(self):\n \"\"\"Does foreach do the right thing with a nondict as second element?\"\"\"\n with mn.model():\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Corge', 12)\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], {'foo': 24, 'bar': 25})\n\n def test_foreach_stock(self):\n \"\"\"Does foreach work with stocks and dicts?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Waldo', {'foo': 1, 'bar': 2})\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n mn.foreach(lambda w: w), ('Waldo',))\n m.step()\n self.assertEqual(Corge[''], {'foo':15, 'bar': 17} )\n m.step(2)\n self.assertEqual(Corge[''], {'foo':43, 'bar': 47} )\n\n def test_nested_foreach_stock(self):\n \"\"\"Do nested foreaches work with stocks and dicts?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n {'drg001': {'trad': 7, 'rrc': 9},\n 'drg003': {'trad': 18, 'rrc': 4},\n 'drg257': {'trad': 6, 'rrc': 11}})\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n {'drg001': {'trad': 0, 'rrc': 0},\n 'drg003': {'trad': 0, 'rrc': 0},\n 'drg257': {'trad': 0, 'rrc': 0}})\n m.step()\n self.assertEqual(\n Corge[''], \n {'drg001': {'trad': 8, 'rrc': 10},\n 'drg003': {'trad': 19, 'rrc': 5},\n 'drg257': {'trad': 7, 'rrc': 12}})\n m.step(2)\n self.assertEqual(\n Corge[''], \n {'drg001': {'trad': 24, 'rrc': 30},\n 'drg003': {'trad': 57, 'rrc': 15},\n 'drg257': {'trad': 21, 'rrc': 36}})\n\n def test_foreach_stock_timestep(self):\n \"\"\"Does foreach work with stocks and dicts, and smaller timestep?\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n {'foo': 0, 'bar': 0})\n m.step()\n self.assertEqual(Corge[''], {'foo':7, 'bar': 7.5} )\n m.step(2)\n self.assertEqual(Corge[''], {'foo':21, 'bar': 22.5} )\n\n def test_foreach_stock_multivariable(self):\n \"\"\"Does foreach work with stocks that have multiple variables?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Quz', {'foo': 1, 'bar': 2})\n Corge = mn.stock('Corge', \n mn.foreach(lambda b, q: b+q), ('Baz', 'Quz'), \n {'foo': 0, 'bar': 0})\n m.step()\n self.assertEqual(Corge[''], {'foo':13, 'bar': 15} )\n m.step(2)\n self.assertEqual(Corge[''], {'foo':39, 'bar': 45} )\n\n def test_foreach_accum(self):\n \"\"\"Does foreach work with accums and dicts?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', {'foo': 12, 'bar': 13})\n mn.variable('Waldo', {'foo': 1, 'bar': 2})\n Corge = mn.accum('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n mn.foreach(lambda w: w), ('Waldo',))\n m.step()\n self.assertEqual(Corge[''], {'foo':15, 'bar': 17} )\n m.step(2)\n self.assertEqual(Corge[''], {'foo':43, 'bar': 47} )\n\n def test_nested_foreach_accum(self):\n \"\"\"Do nested foreaches work with accums and dicts?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n {'drg001': {'trad': 7, 'rrc': 9},\n 'drg003': {'trad': 18, 'rrc': 4},\n 'drg257': {'trad': 6, 'rrc': 11}})\n Corge = mn.accum('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n {'drg001': {'trad': 0, 'rrc': 0},\n 'drg003': {'trad': 0, 'rrc': 0},\n 'drg257': {'trad': 0, 'rrc': 0}})\n m.step()\n self.assertEqual(\n Corge[''], \n {'drg001': {'trad': 8, 'rrc': 10},\n 'drg003': {'trad': 19, 'rrc': 5},\n 'drg257': {'trad': 7, 'rrc': 12}})\n m.step(2)\n self.assertEqual(\n Corge[''], \n {'drg001': {'trad': 24, 'rrc': 30},\n 'drg003': {'trad': 57, 'rrc': 15},\n 'drg257': {'trad': 21, 'rrc': 36}})\n\nclass ForeachTuples(unittest.TestCase):\n \"\"\"For testing the foreach construct with tuples\"\"\"\n def test_simple(self):\n \"\"\"Does the simplest possible foreach work with named tuples?\"\"\"\n with mn.model():\n mn.variable('Baz', (12, 13, 15))\n Quz = mn.variable('Quz', mn.foreach(lambda f: f + 1), 'Baz')\n self.assertEqual(Quz[''], (13, 14, 16))\n\n def test_two_arg_foreach(self):\n \"\"\"Does a two arg callable to a foreach work?\"\"\"\n with mn.model():\n mn.variable('Baz', (12, 13, 0))\n mn.variable('Corge', (0, 99, 12))\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], (12, 112, 12))\n\n def test_foreach_with_mismatched_tuples(self):\n \"\"\"Does a two arg foreach with mismatched tuples error correctly?\"\"\"\n with mn.model():\n mn.variable('Baz', (12, 13, 0))\n mn.variable('Corge', (0, 99))\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], (12, 112))\n\n def test_big_tuple_foreach(self):\n \"\"\"Does foreach work with a 1000 element tuple?\"\"\"\n with mn.model():\n mn.variable('Biggus', tuple(range(1000)))\n Dickus = mn.variable('Dickus', mn.foreach(lambda x: x*2), 'Biggus')\n self.assertEqual(Dickus[''][3], 6)\n self.assertEqual(Dickus[''][999], 1998)\n\n def test_foreach_nontuple_sunny_day(self):\n \"\"\"Does foreach do the right thing with a nontuple as second element?\"\"\"\n with mn.model():\n mn.variable('Baz', (12, 13))\n mn.variable('Corge', 12)\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], (24, 25))\n\n def test_foreach_stock(self):\n \"\"\"Does foreach work with stocks?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', (12, 13))\n mn.variable('Waldo', (1, 2))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n lambda w: w, ('Waldo',))\n m.step()\n self.assertEqual(Corge[''], (15, 17))\n m.step(2)\n self.assertEqual(Corge[''], (43, 47))\n\n def test_nested_foreach_stock(self):\n \"\"\"Do nested foreaches work with stocks and tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', ((7, 9), (18, 4), (6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n ((0, 0), (0, 0), (0, 0)))\n m.step()\n self.assertEqual(Corge[''], ((8, 10), (19, 5), (7, 12)))\n m.step(2)\n self.assertEqual(Corge[''], ((24, 30), (57, 15), (21, 36)))\n\n def test_foreach_stock_timestep(self):\n \"\"\"Does foreach work with stocks?\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.variable('Baz', (12, 13))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n (0, 0))\n m.step()\n self.assertEqual(Corge[''], (7, 7.5))\n m.step(2)\n self.assertEqual(Corge[''], (21, 22.5))\n\n def test_foreach_stock_multivariable(self):\n \"\"\"Does foreach work with stocks that have multiple variables?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', (12, 13))\n mn.variable('Quz', (1, 2))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b, q: b+q), ('Baz', 'Quz'), \n (0, 0))\n m.step()\n self.assertEqual(Corge[''], (13, 15))\n m.step(2)\n self.assertEqual(Corge[''], (39, 45))\n\n def test_foreach_accum(self):\n \"\"\"Does foreach work with accums?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', (12, 13))\n mn.variable('Waldo', (1, 2))\n Corge = mn.accum('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n lambda w: w, ('Waldo',))\n m.step()\n self.assertEqual(Corge[''], (15, 17))\n m.step(2)\n self.assertEqual(Corge[''], (43, 47))\n\n def test_nested_foreach_accum(self):\n \"\"\"Do nested foreaches work with accums and tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', ((7, 9), (18, 4), (6, 11)))\n Corge = mn.accum('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n ((0, 0), (0, 0), (0, 0)))\n m.step()\n self.assertEqual(Corge[''], ((8, 10), (19, 5), (7, 12)))\n m.step(2)\n self.assertEqual(Corge[''], ((24, 30), (57, 15), (21, 36)))\n\n\nclass ForeachNamedTuples(unittest.TestCase):\n \"\"\"For testing the foreach construct with named tuples\"\"\"\n def setUp(self):\n self.drg = collections.namedtuple('drg', ['drg001', 'drg003', 'drg257'])\n self.site = collections.namedtuple('site', ['traditional', 'rrc'])\n\n def test_simple(self):\n \"\"\"Does the simplest possible foreach work with named tuples?\"\"\"\n with mn.model():\n mn.variable('Baz', self.drg(12, 13, 15))\n Quz = mn.variable('Quz', mn.foreach(lambda f: f + 1), 'Baz')\n self.assertEqual(Quz[''], self.drg(13, 14, 16))\n\n def test_two_arg_foreach(self):\n \"\"\"Does a two arg callable to a foreach work?\"\"\"\n with mn.model():\n mn.variable('Baz', self.drg(12, 13, 0))\n mn.variable('Corge', self.drg(0, 99, 12))\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], self.drg(12, 112, 12))\n\n def test_foreach_scalar_sunny_day(self):\n \"\"\"Does foreach do the right thing with a scalar as second element?\"\"\"\n with mn.model():\n mn.variable('Baz', self.drg(12, 13, 19))\n mn.variable('Corge', 12)\n Quz = mn.variable('Quz', mn.foreach(lambda b, c: b + c), 'Baz', 'Corge')\n self.assertEqual(Quz[''], self.drg(24, 25, 31))\n\n def test_foreach_scalar_sunny_day_third_elt(self):\n \"\"\"Does foreach do the right thing with a scalar as third element?\"\"\"\n with mn.model():\n mn.variable('Baz', self.drg(12, 13, 19))\n mn.variable('Grault', self.drg(0, 0, 2))\n mn.variable('Corge', 12)\n Quz = mn.variable('Quz', \n mn.foreach(lambda b, g, c: b + g + c), 'Baz', 'Grault', 'Corge')\n self.assertEqual(Quz[''], self.drg(24, 25, 33))\n\n def test_nested_foreach(self):\n \"\"\"Do nested namedtuple foreaches work?\"\"\" \n with mn.model():\n mn.variable('Baz', \n self.drg(self.site(12, 9), self.site(13, 4), self.site(19, 18)))\n mn.variable('Grault', \n self.drg(self.site(1, 2), self.site(3, 4), self.site(5, 6)))\n Qux = mn.variable('Qux',\n mn.foreach(mn.foreach(lambda b, g: b+g)), 'Baz', 'Grault')\n self.assertEqual(\n Qux[''], \n self.drg(self.site(13, 11), self.site(16, 8), self.site(24, 24)))\n\n def test_nested_foreach_one_level_const(self):\n \"\"\"Do nested namedtuple foreaches work, with one level const?\"\"\" \n with mn.model():\n mn.variable('Baz', \n self.drg(self.site(12, 9), self.site(13, 4), self.site(19, 18)))\n mn.variable('Grault', self.drg(1, 2, 3))\n Qux = mn.variable('Qux',\n mn.foreach(mn.foreach(lambda b, g: b+g)), 'Baz', 'Grault')\n self.assertEqual(\n Qux[''], \n self.drg(self.site(13, 10), self.site(15, 6), self.site(22, 21)))\n\n def test_nested_foreach_two_levels_const(self):\n \"\"\"Do nested namedtuple foreaches work, with two levels const?\"\"\" \n with mn.model():\n mn.variable('Baz', \n self.drg(self.site(12, 9), self.site(13, 4), self.site(19, 18)))\n mn.variable('Grault', 9)\n Qux = mn.variable('Qux',\n mn.foreach(mn.foreach(lambda b, g: b+g)), 'Baz', 'Grault')\n self.assertEqual(\n Qux[''], \n self.drg(self.site(21, 18), self.site(22, 13), self.site(28, 27)))\n\n def test_foreach_stock(self):\n \"\"\"Does foreach work with stocks and mn named tuples?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', self.drg(12, 13, 19))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n self.drg(0, 0, 0))\n m.step()\n self.assertEqual(Corge[''], self.drg(14, 15, 21))\n m.step(2)\n self.assertEqual(Corge[''], self.drg(42, 45, 63))\n\n def test_nested_foreach_stock(self):\n \"\"\"Do nested foreaches work with stocks and named tuples?\"\"\" \n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg(self.site(7, 9), self.site(18, 4), self.site(6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg(self.site(0, 0), self.site(0, 0), self.site(0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg(self.site(8, 10), self.site(19, 5), self.site(7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg(self.site(24, 30), self.site(57, 15), self.site(21, 36)))\n\n def test_foreach_stock_timestep(self):\n \"\"\"Does foreach work with stocks and mn named tuples?\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.variable('Baz', self.drg(12, 13, 19))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n self.drg(0, 0, 0))\n m.step()\n self.assertEqual(Corge[''], self.drg(7, 7.5, 10.5))\n m.step(2)\n self.assertEqual(Corge[''], self.drg(21, 22.5, 31.5))\n\n def test_foreach_stock_multivariable(self):\n \"\"\"Does foreach work with stocks that have multiple variables?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', self.drg(12, 13, 19))\n mn.variable('Quz', self.drg(1, 2, 3))\n Corge = mn.stock('Corge', \n mn.foreach(lambda b, q: b+q), ('Baz', 'Quz'), \n self.drg(0, 0, 0))\n m.step()\n self.assertEqual(Corge[''], self.drg(13, 15, 22))\n m.step(2)\n self.assertEqual(Corge[''], self.drg(39, 45, 66))\n\n def test_foreach_accum(self):\n \"\"\"Does foreach work with accums and mn named tuples?\"\"\"\n with mn.model() as m:\n mn.variable('Baz', self.drg(12, 13, 19))\n Corge = mn.accum('Corge', \n mn.foreach(lambda b: b+2), ('Baz',), \n self.drg(0, 0, 0))\n m.step()\n self.assertEqual(Corge[''], self.drg(14, 15, 21))\n m.step(2)\n self.assertEqual(Corge[''], self.drg(42, 45, 63))\n\n def test_nested_foreach_accum(self):\n \"\"\"Do nested foreaches work with accums and named tuples?\"\"\" \n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg(self.site(7, 9), self.site(18, 4), self.site(6, 11)))\n Corge = mn.accum('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg(self.site(0, 0), self.site(0, 0), self.site(0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg(self.site(8, 10), self.site(19, 5), self.site(7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg(self.site(24, 30), self.site(57, 15), self.site(21, 36)))\n\n\nclass ForeachMixed(unittest.TestCase):\n \"\"\"For testing the foreach construct on mixed data.\"\"\"\n def setUp(self):\n self.drg = collections.namedtuple('drg', ['drg001', 'drg003', 'drg257'])\n self.site = collections.namedtuple('site', ['traditional', 'rrc'])\n\n def test_dict_tuple(self):\n \"\"\"Do nested foreaches work with tuples inside dicts?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n {'drg001': (7, 9), 'drg003': (18, 4), 'drg257': (6, 11)})\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n {'drg001': (0, 0), 'drg003': (0, 0), 'drg257': (0,0)})\n m.step()\n self.assertEqual(\n Corge[''], \n {'drg001': (8, 10), 'drg003': (19, 5), 'drg257': (7, 12)})\n m.step(2)\n self.assertEqual(\n Corge[''], \n {'drg001': (24, 30), 'drg003': (57, 15), 'drg257': (21, 36)})\n\n def test_dict_namedtuple(self):\n \"\"\"Does nested foreaches work with named tuples inside dicts?\"\"\"\n with mn.model():\n Baz = mn.variable('Baz', mn.foreach(mn.foreach(lambda x: x+1)), 'Grault')\n Grault = mn.constant('Grault',\n {'drg001': self.site(7, 9),\n 'drg003': self.site(18, 4),\n 'drg257': self.site(6, 11)})\n self.assertEqual(\n Baz[''],\n {'drg001': self.site(8, 10),\n 'drg003': self.site(19, 5),\n 'drg257': self.site(7, 12)})\n\n def test_namedtuple_tuple(self):\n \"\"\"Do nested foreaches work with tuples inside named tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg((7, 9), (18, 4), (6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg((0, 0), (0, 0), (0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg((8, 10), (19, 5), (7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg((24, 30), (57, 15), (21, 36)))\n\n def test_namedtuple_dict(self):\n \"\"\"Do nested foreaches work with dicts inside named tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg({'trad': 7, 'rrc': 9}, \n {'trad': 18, 'rrc': 4}, \n {'trad': 6, 'rrc': 11}))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg({'trad': 0, 'rrc': 0}, \n {'trad': 0, 'rrc': 0}, \n {'trad': 0, 'rrc': 0}))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg({'trad': 8, 'rrc': 10}, \n {'trad': 19, 'rrc': 5}, \n {'trad': 7, 'rrc': 12}))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg({'trad': 24, 'rrc': 30}, \n {'trad': 57, 'rrc': 15}, \n {'trad': 21, 'rrc': 36}))\n\n def test_tuple_namedtuple(self):\n \"\"\"Do nested foreaches work with named tuples inside tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n (self.site(7, 9), self.site(18, 4), self.site(6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n (self.site(0, 0), self.site(0, 0), self.site(0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n (self.site(8, 10), self.site(19, 5), self.site(7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n (self.site(24, 30), self.site(57, 15), self.site(21, 36)))\n\n def test_tuple_dict(self):\n \"\"\"Do nested foreaches work with dicts inside tuples?\"\"\"\n with mn.model() as m:\n Baz = mn.variable('Baz', \n ({'trad': 7, 'rrc': 9}, {'trad': 18, 'rrc': 4}, \n {'trad': 6, 'rrc': 11}))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n ({'trad': 0, 'rrc': 0}, {'trad': 0, 'rrc': 0},\n {'trad': 0, 'rrc': 0}))\n m.step()\n self.assertEqual(\n Corge[''], \n ({'trad': 8, 'rrc': 10}, {'trad': 19, 'rrc': 5},\n {'trad': 7, 'rrc': 12}))\n m.step(2)\n self.assertEqual(\n Corge[''], \n ({'trad': 24, 'rrc': 30}, {'trad': 57, 'rrc': 15},\n {'trad': 21, 'rrc': 36}))\n\nclass Previous(unittest.TestCase):\n \"\"\"For testing previous\"\"\"\n def test_previous(self):\n \"\"\"Does a simple value of previous work, with a stock?\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'Foo')\n\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0)\n\n def test_previous_reversed_order(self):\n \"\"\"Does a simple value of previous work, with a stock?\"\"\"\n with mn.model() as m:\n LastFoo = mn.previous('LastFoo', 'Baz')\n mn.variable('Baz', lambda x: x, 'Foo')\n mn.stock('Foo', 1, 0)\n\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0)\n\n def test_previous_with_docstring(self):\n \"\"\"Does a simple value of previous work, with a stock?\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'Simple previous', 'Foo')\n\n self.assertEqual(LastFoo.__doc__, 'Simple previous')\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0)\n\n def test_previous_small_timestep(self):\n \"\"\"Does a simple value of previous work, with non-1 timestep?\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'Foo')\n\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0.5)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.reset()\n self.assertEqual(LastFoo[''], 0)\n\n def test_previous_with_treatments(self):\n \"\"\"Does a simple value of previous work, with treatments?\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n mn.stock('Foo', mn.PerTreatment({'As is': 1, 'To be': 2}), 0)\n LastFoo = mn.previous('LastFoo', 'Foo')\n\n self.assertEqual(LastFoo['As is'], 0)\n self.assertEqual(LastFoo['To be'], 0)\n m.step()\n self.assertEqual(LastFoo['As is'], 0)\n self.assertEqual(LastFoo['To be'], 0) \n m.step()\n self.assertEqual(LastFoo['As is'], 1)\n self.assertEqual(LastFoo['To be'], 2)\n m.step()\n self.assertEqual(LastFoo['As is'], 2)\n self.assertEqual(LastFoo['To be'], 4)\n m.reset()\n self.assertEqual(LastFoo['As is'], 0)\n self.assertEqual(LastFoo['To be'], 0)\n\n def test_previous_with_namedtuple(self):\n \"\"\"Does a simple value of previous work, with a mn_namedtuple?\"\"\"\n Payer = mn.mn_namedtuple(\n 'Payer', ['Medicare', 'Medicaid', 'Commercial'])\n with mn.model() as m:\n mn.stock('Foo', Payer(1, 2, 3), Payer(0, 0, 0))\n LastFoo = mn.previous('LastFoo', 'Foo')\n\n self.assertEqual(LastFoo[''], Payer(0, 0, 0))\n m.step()\n self.assertEqual(LastFoo[''], Payer(0, 0, 0))\n m.step()\n self.assertEqual(LastFoo[''], Payer(1, 2, 3))\n m.step()\n self.assertEqual(LastFoo[''], Payer(2, 4, 6))\n m.reset()\n self.assertEqual(LastFoo[''], Payer(0, 0, 0))\n\n def test_previous_with_initial_value(self):\n \"\"\"Does a simple value of previous work, with an initial value?\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'Foo', 0.3)\n\n self.assertEqual(LastFoo[''], 0.3)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0.3)\n\n def test_previous_with_initial_value_reversed_order(self):\n \"\"\"Does a simple value of previous work, with an initial value?\"\"\"\n with mn.model() as m:\n LastFoo = mn.previous('LastFoo', 'Foo', 0.3)\n mn.stock('Foo', 1, 0)\n\n self.assertEqual(LastFoo[''], 0.3)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0.3)\n\n def test_previous_with_initial_value_and_docstring(self):\n \"\"\"Does a simple value of previous work, with an initial value?\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'docstring', 'Foo', 0.3)\n\n self.assertEqual(LastFoo.__doc__, 'docstring')\n self.assertEqual(LastFoo[''], 0.3)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n m.step()\n self.assertEqual(LastFoo[''], 2)\n m.reset()\n self.assertEqual(LastFoo[''], 0.3)\n\n def test_previous_with_constant(self):\n \"\"\"Does a previous of a constant work?\"\"\"\n with mn.model() as m:\n mn.constant('Foo', 12)\n LastFoo = mn.previous('LastFoo', 'Foo', 0)\n\n self.assertEqual(LastFoo[''], 12)\n m.step()\n self.assertEqual(LastFoo[''], 12)\n m.step()\n self.assertEqual(LastFoo[''], 12)\n m.reset()\n self.assertEqual(LastFoo[''], 12)\n m.step()\n self.assertEqual(LastFoo[''], 12)\n\n def test_previous_with_circularity(self):\n \"\"\"Does a previous work when it defines an apparent circularity?\"\"\"\n with mn.model() as m:\n mn.previous('LastFoo', 'Foo', 0)\n Foo = mn.variable('Foo', lambda x: x + 2, 'LastFoo')\n\n self.assertEqual(Foo[''], 2)\n m.step()\n self.assertEqual(Foo[''], 4)\n m.step()\n self.assertEqual(Foo[''], 6)\n m.reset()\n self.assertEqual(Foo[''], 2)\n m.step()\n self.assertEqual(Foo[''], 4)\n\n def test_self_previous(self):\n \"\"\"Does a previous work when it refers to itself?\"\"\"\n with mn.model() as m:\n Foo = mn.previous('Foo', 'Foo', 0)\n\n self.assertEqual(Foo[''], 0)\n m.step()\n self.assertEqual(Foo[''], 0)\n m.step()\n self.assertEqual(Foo[''], 0)\n m.reset()\n self.assertEqual(Foo[''], 0)\n m.step()\n self.assertEqual(Foo[''], 0)\n\n def test_set_previous(self):\n \"\"\"Does setting the amount of a previous raise an error?\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'docstring', 'Foo', 0.3)\n with self.assertRaises(mn.MinnetonkaError) as me:\n LastFoo[''] = 12\n self.assertEqual(\n me.exception.message, \n 'Amount of <Previous LastFoo> cannot be changed outside model logic'\n )\n\n\nclass OldValues(unittest.TestCase):\n \"\"\"For checking that values are stored every step\"\"\"\n def test_stock_old_values(self):\n \"\"\"Does a stock keep all the old values around?\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n Foo = mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n\n m.step(6)\n self.assertEqual(Foo.history('Bar', 0), 0)\n self.assertEqual(Foo.history('Baz', 0), 0)\n self.assertEqual(Foo.history('Bar', 1), 1)\n self.assertEqual(Foo.history('Baz', 1), 2)\n self.assertEqual(Foo.history('Bar', 2), 2)\n self.assertEqual(Foo.history('Baz', 2), 4)\n self.assertEqual(Foo.history('Bar', 3), 3)\n self.assertEqual(Foo.history('Baz', 3), 6)\n self.assertEqual(Foo.history('Bar', 5), 5)\n self.assertEqual(Foo.history('Baz', 5), 10)\n m.reset()\n m.step(2)\n self.assertEqual(Foo.history('Bar', 0), 0)\n self.assertEqual(Foo.history('Baz', 0), 0)\n self.assertEqual(Foo.history('Bar', 1), 1)\n self.assertEqual(Foo.history('Baz', 1), 2)\n with self.assertRaises(mn.MinnetonkaError) as me:\n Foo.history('Bar', 3)\n self.assertEqual(\n me.exception.message, \"Foo['Bar'] has no value for step 3\")\n\n def test_variable_old_values(self):\n \"\"\"Does a variable keep all the old values around?\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n Quz = mn.variable('Quz', lambda x: x, 'Foo')\n\n m.step(6)\n self.assertEqual(Quz.history('Bar', 0), 0)\n self.assertEqual(Quz.history('Baz', 0), 0)\n self.assertEqual(Quz.history('Bar', 1), 1)\n self.assertEqual(Quz.history('Baz', 1), 2)\n self.assertEqual(Quz.history('Bar', 2), 2)\n self.assertEqual(Quz.history('Baz', 2), 4)\n self.assertEqual(Quz.history('Bar', 3), 3)\n self.assertEqual(Quz.history('Baz', 3), 6)\n self.assertEqual(Quz.history('Bar', 5), 5)\n self.assertEqual(Quz.history('Baz', 5), 10)\n m.reset()\n m.step(2)\n self.assertEqual(Quz.history('Bar', 0), 0)\n self.assertEqual(Quz.history('Baz', 0), 0)\n self.assertEqual(Quz.history('Bar', 1), 1)\n self.assertEqual(Quz.history('Baz', 1), 2)\n with self.assertRaises(mn.MinnetonkaError) as me:\n Quz.history('Bar', 3)\n self.assertEqual(\n me.exception.message, \"Quz['Bar'] has no value for step 3\")\n\n def test_accum_old_values(self):\n \"\"\"Does an accum keep all the old values around?\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n Foo = mn.accum('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n\n m.step(6)\n self.assertEqual(Foo.history('Bar', 0), 0)\n self.assertEqual(Foo.history('Baz', 0), 0)\n self.assertEqual(Foo.history('Bar', 1), 1)\n self.assertEqual(Foo.history('Baz', 1), 2)\n self.assertEqual(Foo.history('Bar', 2), 2)\n self.assertEqual(Foo.history('Baz', 2), 4)\n self.assertEqual(Foo.history('Bar', 3), 3)\n self.assertEqual(Foo.history('Baz', 3), 6)\n self.assertEqual(Foo.history('Bar', 5), 5)\n self.assertEqual(Foo.history('Baz', 5), 10)\n m.reset()\n m.step(2)\n self.assertEqual(Foo.history('Bar', 0), 0)\n self.assertEqual(Foo.history('Baz', 0), 0)\n self.assertEqual(Foo.history('Bar', 1), 1)\n self.assertEqual(Foo.history('Baz', 1), 2)\n with self.assertRaises(mn.MinnetonkaError) as me:\n Foo.history('Bar', 3)\n self.assertEqual(\n me.exception.message, \"Foo['Bar'] has no value for step 3\")\n\n def test_constant_old_values(self):\n \"\"\"Does a constant do the right thing for history() calls?\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n Quz = mn.constant('Quz', mn.PerTreatment({'Bar': 9, 'Baz':10}))\n\n m.step(6)\n self.assertEqual(Quz.history('Bar', 0), 9)\n self.assertEqual(Quz.history('Baz', 0), 10) \n self.assertEqual(Quz.history('Bar', 3), 9)\n self.assertEqual(Quz.history('Baz', 3), 10)\n m.reset()\n m.step(2)\n self.assertEqual(Quz.history('Bar', 0), 9)\n self.assertEqual(Quz.history('Baz', 0), 10) \n self.assertEqual(Quz.history('Bar', 99), 9)\n self.assertEqual(Quz.history('Baz', 99), 10)\n\n def test_old_derived_values(self):\n \"\"\"Does history do the right thing if the treatment is derived?\"\"\"\n with mn.model(treatments=['Bar', 'Baz'],\n derived_treatments={'Quz': mn.AmountBetter('Baz', 'Bar')}\n ) as m:\n Foo = mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0\n ).derived()\n\n m.step(6)\n self.assertEqual(Foo.history('Quz', 0), 0) \n self.assertEqual(Foo.history('Quz', 1), 1) \n self.assertEqual(Foo.history('Quz', 2), 2) \n self.assertEqual(Foo.history('Baz', 1), 2)\n \n\nclass ModelHistory(unittest.TestCase):\n \"\"\"Testing history of the whole model.\"\"\"\n def test_history(self):\n \"\"\"Test history of several variables and two treatments.\"\"\"\n with mn.model(treatments=['Bar', 'Baz']) as m:\n Foo = mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n Quz = mn.variable('Quz', lambda x: x, 'Foo')\n Corge = mn.accum('Corge', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n Grault = mn.constant('Grault', mn.PerTreatment({'Bar': 9, 'Baz':10}))\n Thud = mn.variable('Thud', lambda x: x, 'Foo').no_history()\n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {'Bar': [0], 'Baz': [0]},\n 'Quz': {'Bar': [0], 'Baz': [0]},\n 'Corge': {'Bar': [0], 'Baz': [0]} \n })\n\n m.step(10)\n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'Baz': [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n },\n 'Quz': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'Baz': [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n },\n 'Corge': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'Baz': [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n }\n })\n\n\n def test_derived_history(self):\n \"\"\"Test history of several variables and two treatments.\"\"\"\n with mn.model(treatments=['Bar', 'Baz'], \n derived_treatments={\n 'Plugh': mn.AmountBetter('Baz', 'Bar')}\n ) as m:\n Foo = mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0\n ).derived()\n Quz = mn.variable('Quz', lambda x: x, 'Foo'\n ).derived(scored_as='golf')\n Corge = mn.accum('Corge', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0\n ).derived()\n Grault = mn.constant('Grault', mn.PerTreatment({'Bar': 9, 'Baz':10})\n ).derived()\n Thud = mn.variable('Thud', lambda x: x, 'Foo').no_history(\n ).derived()\n Fred = mn.variable('Fred', \n lambda foo, quz: {'foo': foo, 'quz': quz},\n 'Foo', 'Quz'\n ).derived(scored_as='combo')\n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {'Plugh': [0]},\n 'Quz': {'Plugh': [0]},\n 'Corge': {'Plugh': [0]},\n 'Fred': {'Plugh': [{'foo': 0, 'quz': 0}]}\n })\n\n m.step(10)\n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {'Plugh': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},\n 'Quz': {'Plugh': [0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10]},\n 'Corge': {'Plugh': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},\n 'Fred': {'Plugh': [\n {'foo': 0, 'quz': 0}, {'foo': 1, 'quz': -1}, \n {'foo': 2, 'quz': -2}, {'foo': 3, 'quz': -3}, \n {'foo': 4, 'quz': -4}, {'foo': 5, 'quz': -5}, \n {'foo': 6, 'quz': -6}, {'foo': 7, 'quz': -7}, \n {'foo': 8, 'quz': -8}, {'foo': 9, 'quz': -9}, \n {'foo': 10, 'quz': -10}]}\n })\n\n self.assertEqual(\n m.history(base=True)['Foo']['Bar'],\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] )\n self.assertEqual(\n m.history(base=True)['Quz']['Baz'],\n [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20] )\n self.assertEqual(\n m.history(base=True)['Corge']['Bar'],\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] )\n\n\nclass Unitary(unittest.TestCase):\n \"\"\"For testing variables that do not differ by treatment.\"\"\"\n\n def assert_unitary(self, variable):\n \"\"\"Assert that this variable is the same for 'As is' and 'To be'.\"\"\"\n self.assertEqual(\n variable.by_treatment('As is'), variable.by_treatment('To be'))\n\n def assert_not_unitary(self, variable):\n \"\"\"Assert that this variable is not the same for 'As is' and 'To be'.\"\"\"\n self.assertNotEqual(\n variable.by_treatment('As is'), variable.by_treatment('To be'))\n\n def test_simple_variable(self):\n \"\"\"Test whether a simple variable can be unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n Bar = mn.variable('Bar', 12)\n\n self.assert_unitary(Bar)\n self.assertEqual(Bar['As is'], 12)\n self.assertEqual(Bar['To be'], 12)\n Bar['As is'] = 13\n self.assertEqual(Bar['As is'], 13)\n self.assertEqual(Bar['To be'], 13)\n\n def test_per_treatment(self):\n \"\"\"Test whether a variable defined with PerTreatment is unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n Foo = mn.variable('Foo', mn.PerTreatment({'As is': 12, 'To be': 13}))\n\n self.assert_not_unitary(Foo)\n self.assertEqual(Foo['As is'], 12)\n self.assertEqual(Foo['To be'], 13)\n Foo['As is'] = 14\n self.assertEqual(Foo['As is'], 14)\n self.assertEqual(Foo['To be'], 13)\n\n def test_variables_that_depend(self):\n \"\"\"Test whether a variable that depends on a unitary is unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n Unitary = mn.variable('Unitary', 12)\n Fragmented = mn.variable('Fragmented', \n mn.PerTreatment({'As is': 12, 'To be': 13}))\n DependsOnUnitary = mn.variable('DependsOnUnitary', \n lambda x: x, 'Unitary')\n DependsOnFragmented = mn.variable('DependsOnFragmented', \n lambda x: x, 'Fragmented')\n \n self.assert_unitary(DependsOnUnitary)\n self.assert_not_unitary(DependsOnFragmented)\n\n def test_unitary_stock(self):\n \"\"\"Test whether a stock can be unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n SimpleStock = mn.stock('SimpleStock', 1, 1)\n UnitaryVar = mn.variable('UnitaryVar', 2)\n FragmentedVar = mn.variable('FragmentedVar', \n mn.PerTreatment({'As is': 2, 'To be': 3}))\n UnitaryStock = mn.stock('UnitaryStock', \n lambda x: x, ('UnitaryVar',), 0)\n FragmentedStock1 = mn.stock('FragmentedStock1',\n lambda x: x, ('FragmentedVar',), \n lambda x: x, ('UnitaryVar',))\n FragmentedStock2 = mn.stock('FragmentedStock2',\n lambda x: x, ('UnitaryVar',),\n lambda x: x, ('FragmentedVar',))\n FragmentedStock3 = mn.stock('FragmentedStock3',\n lambda x: x, ('UnitaryVar',),\n lambda x: x, ('FragmentedVar',))\n\n self.assert_unitary(SimpleStock)\n self.assert_unitary(UnitaryStock)\n self.assert_not_unitary(FragmentedStock1)\n self.assert_not_unitary(FragmentedStock2)\n self.assert_not_unitary(FragmentedStock3)\n\n def test_unitary_accum(self):\n \"\"\"Test whether an accum can be unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n SimpleAccum = mn.accum('SimpleAccum', 1, 1)\n UnitaryVar = mn.variable('UnitaryVar', 2)\n FragmentedVar = mn.variable('FragmentedVar', \n mn.PerTreatment({'As is': 2, 'To be': 3}))\n UnitaryAccum = mn.accum('UnitaryAccum', \n lambda x: x, ('UnitaryVar',), 0)\n FragmentedAccum1 = mn.accum('FragmentedAccum1',\n lambda x: x, ('FragmentedVar',), \n lambda x: x, ('UnitaryVar',))\n FragmentedAccum2 = mn.accum('FragmentedAccum2',\n lambda x: x, ('UnitaryVar',),\n lambda x: x, ('FragmentedVar',))\n FragmentedAccum3 = mn.accum('FragmentedAccum3',\n lambda x: x, ('UnitaryVar',),\n lambda x: x, ('FragmentedVar',))\n\n self.assert_unitary(SimpleAccum)\n self.assert_unitary(UnitaryAccum)\n self.assert_not_unitary(FragmentedAccum1)\n self.assert_not_unitary(FragmentedAccum2)\n self.assert_not_unitary(FragmentedAccum3)\n\n def test_previous(self):\n \"\"\"Test whether a previous can be unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n UnitaryVar = mn.variable('UnitaryVar', 2)\n FragmentedVar = mn.variable('FragmentedVar', \n mn.PerTreatment({'As is': 2, 'To be': 3}))\n UnitaryPrevious = mn.previous('UnitaryPrevious', 'UnitaryVar')\n FragmentedPrevious = mn.previous('FragmentedPrevious', 'FragmentedVar')\n\n self.assert_unitary(UnitaryPrevious)\n self.assert_not_unitary(FragmentedPrevious)\n\n def test_causal_loop_unitary(self):\n \"\"\"Test that a simple model with a causal loop is unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m2:\n InterestRate = mn.constant('InterestRate', 0.04)\n Interest = mn.variable('Interest', \n lambda s, ir: s * ir, 'Savings', 'InterestRate')\n Savings = mn.stock('Savings', lambda i: i, ('Interest',), 1000)\n self.assert_unitary(InterestRate)\n self.assert_unitary(Interest)\n self.assert_unitary(Savings)\n\n def test_causal_loop_not_unitary(self):\n \"\"\"Test that a simple model with a causal loop is not unitary.\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m2:\n InterestRate = mn.constant('InterestRate', \n mn.PerTreatment({'As is': 0.04, 'To be': 0.15}))\n Interest = mn.variable('Interest', \n lambda s, ir: s * ir, 'Savings', 'InterestRate')\n Savings = mn.stock('Savings', lambda i: i, ('Interest',), 1000)\n self.assert_not_unitary(InterestRate)\n self.assert_not_unitary(Interest)\n self.assert_not_unitary(Savings)\n\n def test_unitary_set_warning(self):\n \"\"\"Test that setting a unitary var in one treatment issues warning.\"\"\"\n with mn.model(treatments=['As is', 'To be']):\n InterestRate = mn.constant('InterestRate', 0.03)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n InterestRate['To be'] = 0.05\n self.assertEqual(len(w), 1)\n self.assertEqual(w[-1].category, mn.MinnetonkaWarning)\n self.assertEqual(str(w[-1].message), \n 'Setting amount of unitary variable InterestRate '+\n 'in only one treatment')\n\n\nclass SafeDiv(unittest.TestCase):\n \"\"\"For testing safe_div\"\"\"\n def test_safe_div(self):\n \"\"\"Testing safe_div\"\"\"\n self.assertEqual(mn.safe_div(5, 4), 1.25)\n self.assertEqual(mn.safe_div(5, 0), 0)\n self.assertEqual(mn.safe_div(5, 0, 1), 1)\n\n\nclass ArrayGraphXYandYX(unittest.TestCase):\n \"\"\"For testing array_graph_xy\"\"\"\n # should test decreasing Xs and neither increasing nor decreasing errors\n def test_array_graph_xy_tuple(self):\n \"\"\"Testing array_graph_xy() with tuple argument\"\"\"\n XYs = (\n (1, 100), (1.5, 97.24), (2, 92.34), (2.5, 88.41), (3, 85.07), \n (3.5, 80.42), (4, 75.39), (4.5, 66.52), (5, 57.80), (5.5, 47.95), \n (6, 36.47), (6.5, 25.31), (7, 16.71), (7.5, 10.04), (8, 6.19), \n (8.5, 3.35), (9, 2.10), (9.5, 1.01), (10, 0)\n )\n self.assertEqual(mn.array_graph_xy(2, XYs), 92.34)\n self.assertAlmostEqual(mn.array_graph_xy(7.4, XYs), 11.374)\n self.assertEqual(mn.array_graph_xy(11, XYs), 0)\n self.assertEqual(mn.array_graph_xy(1, XYs), 100)\n\n def test_array_graph_xy_array(self):\n \"\"\"Testing array_graph_xy with numpy array argument\"\"\"\n XYs_tuple = (\n (1, 100), (1.5, 97.24), (2, 92.34), (2.5, 88.41), (3, 85.07), \n (3.5, 80.42), (4, 75.39), (4.5, 66.52), (5, 57.80), (5.5, 47.95), \n (6, 36.47), (6.5, 25.31), (7, 16.71), (7.5, 10.04), (8, 6.19), \n (8.5, 3.35), (9, 2.10), (9.5, 1.01), (10, 0)\n )\n XYs = np.array(XYs_tuple).transpose()\n self.assertEqual(mn.array_graph_xy(2, XYs), 92.34)\n self.assertAlmostEqual(mn.array_graph_xy(7.4, XYs), 11.374)\n self.assertEqual(mn.array_graph_xy(11, XYs), 0)\n self.assertEqual(mn.array_graph_xy(1, XYs), 100)\n\n def test_array_graph_yx_typle(self):\n \"\"\"Testing array_graph_yx() on a tuple\"\"\"\n XYs = (\n (1, 100), (1.5, 97.24), (2, 92.34), (2.5, 88.41), (3, 85.07), \n (3.5, 80.42), (4, 75.39), (4.5, 66.52), (5, 57.80), (5.5, 47.95), \n (6, 36.47), (6.5, 25.31), (7, 16.71), (7.5, 10.04), (8, 6.19), \n (8.5, 3.35), (9, 2.10), (9.5, 1.01), (10, 0)\n )\n self.assertEqual(mn.array_graph_yx(92.34, XYs), 2)\n self.assertAlmostEqual(mn.array_graph_yx(11.374, XYs), 7.4)\n self.assertEqual(mn.array_graph_yx(0, XYs), 10)\n self.assertEqual(mn.array_graph_yx(100, XYs), 1)\n\n def test_array_graph_yx_array(self):\n \"\"\"Testing array_graph_yx() on numpy array argument\"\"\"\n XYs_tuple = (\n (1, 100), (1.5, 97.24), (2, 92.34), (2.5, 88.41), (3, 85.07), \n (3.5, 80.42), (4, 75.39), (4.5, 66.52), (5, 57.80), (5.5, 47.95), \n (6, 36.47), (6.5, 25.31), (7, 16.71), (7.5, 10.04), (8, 6.19), \n (8.5, 3.35), (9, 2.10), (9.5, 1.01), (10, 0)\n )\n XYs = np.array(XYs_tuple).transpose()\n self.assertEqual(mn.array_graph_yx(92.34, XYs), 2)\n self.assertAlmostEqual(mn.array_graph_yx(11.374, XYs), 7.4)\n self.assertEqual(mn.array_graph_yx(0, XYs), 10)\n self.assertEqual(mn.array_graph_yx(100, XYs), 1)\n\n\nclass AllAmounts(unittest.TestCase):\n def test_all_amounts(self):\n \"\"\"Test the all_amounts() for a variety of variables\"\"\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n Savings = mn.stock(\n 'Savings', lambda interest: interest, ('Interest',), 1000)\n Rate = mn.variable(\n 'Rate', mn.PerTreatment({'As is': 0.05, 'To be': 0.06}))\n Interest = mn.variable(\n 'Interest', lambda savings, rate: savings * rate, \n 'Savings', 'Rate')\n PreviousInterest = mn.previous('PreviousInterest', 'Interest', 0)\n AccumInterest = mn.accum('AccumInterest', \n lambda i: i, ('Interest',), 0)\n\n self.assertEqual(Savings.all(), {'As is': 1000, 'To be': 1000})\n self.assertEqual(Rate.all(), {'As is': 0.05, 'To be': 0.06})\n self.assertEqual(Interest.all(), {'As is': 50.0, 'To be': 60.0})\n self.assertEqual(PreviousInterest.all(), {'As is': 0, 'To be': 0})\n self.assertEqual(AccumInterest.all(), {'As is': 0, 'To be': 0})\n m.step()\n self.assertEqual(Savings.all(), {'As is': 1050, 'To be': 1060})\n self.assertEqual(Rate.all(), {'As is': 0.05, 'To be': 0.06})\n self.assertAlmostEqual(Interest.all()['As is'], 52.5)\n self.assertAlmostEqual(Interest.all()['To be'], 63.6)\n self.assertEqual(PreviousInterest.all(), {'As is': 50, 'To be': 60})\n self.assertAlmostEqual(AccumInterest.all()['As is'], 52.5)\n self.assertAlmostEqual(AccumInterest.all()['To be'], 63.6)\n m.step()\n self.assertAlmostEqual(Savings.all()['As is'], 1102.5)\n self.assertAlmostEqual(Savings.all()['To be'], 1123.6)\n self.assertEqual(Rate.all(), {'As is': 0.05, 'To be': 0.06})\n self.assertAlmostEqual(Interest.all()['As is'], 55.125)\n self.assertAlmostEqual(Interest.all()['To be'], 67.416)\n self.assertAlmostEqual(PreviousInterest.all()['As is'], 52.5)\n self.assertAlmostEqual(PreviousInterest.all()['To be'], 63.6)\n self.assertAlmostEqual(AccumInterest.all()['As is'], 107.625)\n self.assertAlmostEqual(AccumInterest.all()['To be'], 131.016)\n m.reset()\n self.assertEqual(Savings.all(), {'As is': 1000, 'To be': 1000})\n self.assertEqual(Rate.all(), {'As is': 0.05, 'To be': 0.06})\n self.assertEqual(Interest.all(), {'As is': 50.0, 'To be': 60.0})\n self.assertEqual(PreviousInterest.all(), {'As is': 0, 'To be': 0})\n self.assertEqual(AccumInterest.all(), {'As is': 0, 'To be': 0})\n m.step(2)\n self.assertAlmostEqual(Savings.all()['As is'], 1102.5)\n self.assertAlmostEqual(Savings.all()['To be'], 1123.6)\n self.assertEqual(Rate.all(), {'As is': 0.05, 'To be': 0.06})\n self.assertAlmostEqual(Interest.all()['As is'], 55.125)\n self.assertAlmostEqual(Interest.all()['To be'], 67.416)\n self.assertAlmostEqual(PreviousInterest.all()['As is'], 52.5)\n self.assertAlmostEqual(PreviousInterest.all()['To be'], 63.6)\n self.assertAlmostEqual(AccumInterest.all()['As is'], 107.625)\n self.assertAlmostEqual(AccumInterest.all()['To be'], 131.016)\n\nclass StrAndRepr(unittest.TestCase):\n def test_str(self):\n \"\"\"Test str() for a variety of variables\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n Savings = mn.stock(\n 'Savings', lambda interest: interest, ('Interest',), 1000)\n Rate = mn.constant(\n 'Rate', mn.PerTreatment({'As is': 0.05, 'To be': 0.06}))\n Interest = mn.variable(\n 'Interest', lambda savings, rate: savings * rate, \n 'Savings', 'Rate')\n PreviousInterest = mn.previous('PreviousInterest', 'Interest', 0)\n AccumInterest = mn.accum('AccumInterest', \n lambda i: i, ('Interest',), 0)\n\n self.assertEqual(str(Savings), \"<Stock Savings>\")\n self.assertEqual(str(Rate), \"<Constant Rate>\")\n self.assertEqual(str(Interest), \"<Variable Interest>\")\n self.assertEqual(str(PreviousInterest), \"<Previous PreviousInterest>\")\n self.assertEqual(str(AccumInterest), \"<Accum AccumInterest>\")\n\n def test_repr(self):\n \"\"\"Test repr() for a variety of variables\"\"\"\n with mn.model(treatments=['As is', 'To be']) as m:\n Savings = mn.stock(\n 'Savings', lambda interest: interest, ('Interest',), 1000)\n Rate = mn.constant(\n 'Rate', mn.PerTreatment({'As is': 0.05, 'To be': 0.06}))\n Interest = mn.variable(\n 'Interest', lambda savings, rate: savings * rate, \n 'Savings', 'Rate')\n PreviousInterest = mn.previous('PreviousInterest', 'Interest', 0)\n AccumInterest = mn.accum('AccumInterest', \n lambda i: i, ('Interest',), 0)\n\n self.assertEqual(repr(Savings), \"stock('Savings')\")\n self.assertEqual(repr(Rate), \"constant('Rate')\")\n self.assertEqual(repr(Interest), \"variable('Interest')\")\n self.assertEqual(repr(PreviousInterest), \"previous('PreviousInterest')\")\n self.assertEqual(repr(AccumInterest), \"accum('AccumInterest')\")\n\ndef bolded(string):\n \"\"\"Return the string in bold.\"\"\"\n return '\\033[1m' + string + '\\033[0m'\n\nclass ShowVariable(unittest.TestCase):\n \"\"\"Test .show() on varaibles.\"\"\"\n\n # adapted from https://bit.ly/2vXanhu\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def assert_show(self, variable, expected_output, expected_result, \n mock_stdout):\n \"\"\"Test that var.show() matches expectations.\"\"\"\n res = variable.show()\n self.assertEqual(mock_stdout.getvalue(), expected_output)\n self.assertEqual(res, expected_result)\n\n def test_show(self):\n \"\"\"Test that show provides everything important about a variable\"\"\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n Savings = mn.stock(\n 'Savings', \n lambda interest: interest, \n ('Interest',), 1000)\n Rate = mn.variable(\n 'Rate', mn.PerTreatment({'As is': 0.05, 'To be': 0.06}))\n Interest = mn.variable(\n 'Interest', \n lambda savings, rate: savings * rate, \n 'Savings', 'Rate')\n PreviousInterest = mn.previous('PreviousInterest', 'Interest', 0)\n AccumInterest = mn.accum('AccumInterest', \n lambda i: i,\n ('Interest',), 0)\n NoFutureSavings = mn.variable(\n 'NoFutureSavings', lambda s: s, 'Savings'\n ).undefined_in('To be')\n\n self.assert_show(\n Savings, \n bolded(\"Stock: Savings\") + \"\"\"\n\nAmounts: {'As is': 1000, 'To be': 1000}\n\nInitial definition: 1000\nInitial depends on: []\n\nIncremental definition: lambda interest: interest\nIncremental depends on: ['Interest']\n\"\"\",\n [Interest])\n\n self.assert_show(\n NoFutureSavings, \n bolded(\"Variable: NoFutureSavings\") + \"\"\"\n\nAmounts: {'As is': 1000}\n\nDefinition: 'NoFutureSavings', lambda s: s, 'Savings'\nDepends on: ['Savings']\n\"\"\",\n [Savings])\n\n self.assert_show(\n Rate, \n bolded(\"Variable: Rate\") + \"\"\"\n\nAmounts: {'As is': 0.05, 'To be': 0.06}\n\nDefinition: PerTreatment({\"As is\": 0.05, \"To be\": 0.06})\nDepends on: []\n\"\"\",\n [])\n\n self.assert_show(\n Interest,\n bolded(\"Variable: Interest\") + \"\"\"\n\nAmounts: {'As is': 50.0, 'To be': 60.0}\n\nDefinition: lambda savings, rate: savings * rate\nDepends on: ['Savings', 'Rate']\n\"\"\",\n [Savings, Rate])\n\n self.assert_show(\n AccumInterest,\n bolded(\"Accum: AccumInterest\") + \"\"\"\n\nAmounts: {'As is': 0, 'To be': 0}\n\nInitial definition: 0\nInitial depends on: []\n\nIncremental definition: lambda i: i\nIncremental depends on: ['Interest']\n\"\"\",\n [Interest])\n\n self.assert_show(\n PreviousInterest,\n bolded(\"Previous: PreviousInterest\") + \"\"\"\n\nAmounts: {'As is': 0, 'To be': 0}\n\nPrevious variable: Interest\n\"\"\",\n [Interest])\n\n\nclass ValidateAndSetTest(unittest.TestCase):\n \"\"\"Test Model.validate_and_set().\"\"\"\n def test_no_validator(self):\n \"\"\"Test Model.validate_and_set() when no validator is defined.\"\"\"\n with mn.model() as m:\n InterestRate = mn.constant('InterestRate', 0.04)\n self.assertEqual(\n m.validate_and_set('InterestRate', '', 0.05),\n {\n 'success': True, \n 'variable': 'InterestRate',\n 'treatment': '',\n 'amount': 0.05\n })\n self.assertEqual(InterestRate[''], 0.05)\n\n def test_bad_variable(self):\n \"\"\"Test Model.validate_and_set() with a bad variable.\"\"\"\n with mn.model() as m:\n mn.constant('InterestRate', 0.04)\n self.assertEqual(\n m.validate_and_set('InterestRat', '', 0.05),\n {\n 'success': False, \n 'variable': 'InterestRat',\n 'treatment': '',\n 'amount': 0.05,\n 'error_code': 'UnknownVariable',\n 'error_message': 'Variable InterestRat not known.',\n })\n\n def test_bad_treatment(self):\n \"\"\"Test Model.validate_and_set() with a bad treatment.\"\"\"\n with mn.model(treatments=['foo']) as m:\n mn.constant('InterestRate', 0.04)\n self.assertEqual(\n m.validate_and_set('InterestRate', 'bar', 0.05),\n {\n 'success': False, \n 'variable': 'InterestRate',\n 'treatment': 'bar',\n 'amount': 0.05,\n 'error_code': 'UnknownTreatment',\n 'error_message': 'Treatment bar not known.',\n })\n\n def test_one_validator(self):\n \"\"\"Test Model.validate_and_set() with a validator defined.\"\"\"\n with mn.model(treatments=['current', 'possible']) as m:\n mn.constant('InterestRate', 0.04).constraint(\n lambda amt: amt > 0,\n \"TooSmall\",\n lambda amt, nm: f'{nm} is {amt}; must be greater than 0.',\n 0.01)\n self.assertEqual(\n m.validate_and_set('InterestRate', '__all__', 0.05),\n {\n 'success': True,\n 'variable': 'InterestRate',\n 'treatment': '__all__',\n 'amount': 0.05\n })\n self.assertEqual(\n m.validate_and_set('InterestRate', 'possible', 0.0),\n {\n 'success': False, \n 'variable': 'InterestRate',\n 'treatment': 'possible',\n 'amount': 0,\n 'error_code': 'TooSmall',\n 'error_message': 'InterestRate is 0.0; must be greater than 0.',\n 'suggested_amount': 0.01\n })\n\n def test_two_validators(self):\n \"\"\"Test Model.validate_and_set() with two validators defined.\"\"\"\n with mn.model(treatments=['current', 'possible']) as m:\n mn.constant('InterestRate', 0.04).constraint(\n lambda amt: amt > 0,\n \"TooSmall\",\n lambda amt, nm: f'{nm} is {amt}; must be greater than 0.',\n 0.01\n ).constraint(\n lambda amt: amt <= 1.0,\n \"TooLarge\",\n lambda amt, nm: f'{nm} is {amt}; should be less than 100%.'\n )\n self.assertEqual(\n m.validate_and_set('InterestRate', '__all__', 0.05),\n {\n 'success': True,\n 'variable': 'InterestRate',\n 'treatment': '__all__',\n 'amount': 0.05\n })\n self.assertEqual(\n m.validate_and_set('InterestRate', 'possible', 0.0),\n {\n 'success': False, \n 'variable': 'InterestRate',\n 'treatment': 'possible',\n 'amount': 0,\n 'error_code': 'TooSmall',\n 'error_message': 'InterestRate is 0.0; must be greater than 0.',\n 'suggested_amount': 0.01\n })\n self.assertEqual(\n m.validate_and_set('InterestRate', 'current', 2.5),\n {\n 'success': False, \n 'variable': 'InterestRate',\n 'treatment': 'current',\n 'amount': 2.5,\n 'error_code': 'TooLarge',\n 'error_message': 'InterestRate is 2.5; should be less than 100%.'\n })\n\n def test_alternative_validator(self):\n \"\"\"Test something that implements the validate().\"\"\"\n class _FakeValidator:\n @classmethod\n def validate(cls, amount, name):\n if amount > 0:\n return True, None, None, None\n else:\n return False, \"Bad\", \"Really bad\", 1 \n\n with mn.model(treatments=['current', 'possible']) as m:\n mn.constant('InterestRate', 0.04).constraint(_FakeValidator)\n\n self.assertEqual(\n m.validate_and_set('InterestRate', '__all__', 0.5),\n {\n 'success': True,\n 'variable': 'InterestRate',\n 'treatment': '__all__',\n 'amount': 0.5\n })\n\n self.assertEqual(\n m.validate_and_set('InterestRate', '__all__', -99),\n {\n 'success': False,\n 'variable': 'InterestRate',\n 'treatment': '__all__',\n 'amount': -99,\n 'error_code': 'Bad',\n 'error_message': 'Really bad',\n 'suggested_amount': 1\n })\n\n def test_multiple_treatments(self):\n \"\"\"Test Model.validate_and_set() when with multiple treatments.\"\"\"\n with mn.model(treatments=['current', 'imagined']) as m:\n InterestRate = mn.constant('InterestRate', \n mn.PerTreatment({'current': 0.04, 'imagined': 0.04}))\n self.assertEqual(\n m.validate_and_set('InterestRate', 'current', 0.05),\n {\n 'success': True, \n 'variable': 'InterestRate',\n 'treatment': 'current',\n 'amount': 0.05\n })\n self.assertEqual(InterestRate['current'], 0.05)\n self.assertEqual(InterestRate['imagined'], 0.04)\n\n def test_reset_model(self):\n \"\"\"Test change value then, then Model.reset())\"\"\"\n with mn.model() as m:\n InterestRate = mn.constant('InterestRate', 0.04)\n\n self.assertEqual(InterestRate[''], 0.04)\n m.validate_and_set('InterestRate', '', 0.05)\n self.assertEqual(InterestRate[''], 0.05) \n m.reset(reset_external_vars=False)\n self.assertEqual(InterestRate[''], 0.05) \n m.reset()\n self.assertEqual(InterestRate[''], 0.04)\n\n \n\nclass ValidateAndSetAttributeTest(unittest.TestCase):\n \"\"\"Test setting and validating attributes of variables.\"\"\"\n def test_attribute_without_validator(self):\n \"\"\"Test setting an atribute without a validator.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n with mn.model() as m:\n Size = mn.constant('Size', _Size(18, 16, 14))\n self.assertEqual(Size[''].length, 18)\n self.assertEqual(\n m.validate_and_set('Size', '', 17, excerpt='.length'),\n {\n 'success': True,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': '',\n 'amount': 17\n })\n self.assertEqual(Size[''].length, 17)\n\n def test_attribute_without_validator_multiple_treatments(self):\n \"\"\"Test setting attribute without validaotr in multiple treatments.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n with mn.model(treatments=['current', 'imagined']) as m:\n Size = mn.constant('Size', \n mn.PerTreatment(\n {'current': _Size(18, 16, 14),'imagined': _Size(18, 16, 14)}\n ))\n self.assertEqual(Size['current'].length, 18)\n self.assertEqual(Size['imagined'].length, 18)\n self.assertEqual(\n m.validate_and_set('Size', 'current', 17, excerpt='.length'),\n {\n 'success': True,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': 'current',\n 'amount': 17\n })\n self.assertEqual(Size['current'].length, 17)\n self.assertEqual(Size['imagined'].length, 18)\n with self.assertRaisesRegex(mn.MinnetonkaError,\n 'validate_and_set for Size on multiple treatments'):\n m.validate_and_set('Size', '__all__', 20, excerpt='.length')\n\n with mn.model(treatments=['current', 'imagined']) as m:\n Size = mn.constant('Size', _Size(18, 16, 14))\n\n self.assertEqual(\n m.validate_and_set('Size', '__all__', 17, excerpt='.length'),\n {\n 'success': True,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': '__all__',\n 'amount': 17\n })\n\n self.assertEqual(Size['current'].length, 17)\n self.assertEqual(Size['imagined'].length, 17)\n\n\n def test_valid_attribute(self):\n \"\"\"Test setting an atribute with a validator.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n def validate(self, attr, amount):\n return True, '', '', None\n\n with mn.model() as m:\n Size = mn.constant('Size', _Size(18, 16, 14))\n self.assertEqual(Size[''].length, 18)\n self.assertEqual(\n m.validate_and_set('Size', '', 17, excerpt='.length'),\n {\n 'success': True,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': '',\n 'amount': 17\n })\n self.assertEqual(Size[''].length, 17)\n\n def test_invalid_attribute(self):\n \"\"\"Test setting an attribute that does not pass validation.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n def validate(self, attr, amount):\n return False, 'Bad', 'Really quite bad', None\n\n with mn.model() as m:\n Size = mn.constant('Size', _Size(18, 16, 14))\n self.assertEqual(Size[''].length, 18)\n self.assertEqual(\n m.validate_and_set('Size', '', 17, excerpt='.length'),\n {\n 'success': False,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': '',\n 'error_code': 'Bad',\n 'error_message': 'Really quite bad',\n 'suggested_amount': None,\n 'amount': 17\n\n })\n self.assertEqual(Size[''].length, 18)\n\n def test_unsettable_property(self):\n \"\"\"Test setting a property that cannot be set.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self._length = length\n self._width = width\n self._height = height\n\n @property\n def length(self):\n return self._length\n\n with mn.model() as m:\n Size = mn.constant('Size', _Size(18, 16, 14))\n self.assertEqual(Size[''].length, 18)\n self.assertEqual(\n m.validate_and_set('Size', '', 17, excerpt='.length'),\n {\n 'success': False,\n 'variable': 'Size',\n 'excerpt': '.length',\n 'treatment': '',\n 'error_code': 'Unsettable',\n 'error_message': \"Error can't set attribute raised when setting amount of _Size to 17\", \n 'amount': 17\n\n })\n self.assertEqual(Size[''].length, 18)\n\n def test_attribute_chain_without_validator(self):\n \"\"\"Test setting a chain of atributes without a validator.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n class _Measure:\n def __init__(self, metric, customary):\n self.metric = metric\n self.customary = customary\n\n class _Interval:\n def __init__(self, begin, end):\n self.begin = begin \n self.end = end \n\n with mn.model() as m:\n Size = mn.constant('Size', \n _Size(_Measure(18, _Interval(1.0, 2.0)), 16, 14))\n self.assertEqual(Size[''].length.customary.begin, 1.0)\n self.assertEqual(\n m.validate_and_set(\n 'Size', '', 1.3, excerpt='.length.customary.begin'),\n {\n 'success': True,\n 'variable': 'Size',\n 'excerpt': '.length.customary.begin',\n 'treatment': '',\n 'amount': 1.3\n })\n self.assertEqual(Size[''].length.customary.begin, 1.3)\n\n def test_reset_model(self):\n \"\"\"Test change attribute, then Model.reset())\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n with mn.model() as m:\n Size = mn.constant('Size', lambda: _Size(18, 16, 14))\n\n self.assertEqual(Size[''].length, 18)\n m.validate_and_set('Size', '', 19, excerpt='.length')\n self.assertEqual(Size[''].length, 19)\n m.recalculate()\n self.assertEqual(Size[''].length, 19)\n m.reset(reset_external_vars=False)\n self.assertEqual(Size[''].length, 19)\n m.reset()\n self.assertEqual(Size[''].length, 18)\n\nclass ValidateAllTest(unittest.TestCase):\n \"\"\"Test validate_all().\"\"\"\n def test_nothing_to_validate(self):\n \"\"\"Test validate_all() when no constraints are defined.\"\"\"\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constant('X5Allowed', False)\n mn.constant('X4Allowed', False)\n\n self.assertEqual(m.validate_all(), {'success': True})\n\n def test_simple(self):\n \"\"\"Test validate_all() with a simple constraint.\"\"\"\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constant('X5Allowed', False)\n X4 = mn.constant('X4Allowed', False)\n\n mn.constraint(\n ['X7Allowed', 'X5Allowed', 'X4Allowed'],\n lambda *machines: any(machines),\n \"AtLeastOneTruthy\",\n lambda names, amounts, trt: \n f'All machines are disabled: {\", \".join(names)}')\n\n self.assertEqual(\n m.validate_all(),\n {\n 'success': False,\n 'errors': [\n {\n 'error_code': 'AtLeastOneTruthy',\n 'inconsistent_variables': [\n 'X7Allowed', 'X5Allowed', 'X4Allowed'],\n 'error_message': 'All machines are disabled: X7Allowed, X5Allowed, X4Allowed',\n 'treatment': ''\n }\n\n ]\n })\n\n X4[''] = True\n self.assertEqual(m.validate_all(), {'success': True})\n\n def test_one_treatment(self):\n \"\"\"Test validate_all() that fails in one treatment only.\"\"\"\n with mn.model(treatments=['current', 'future']) as m:\n mn.constant('X7Allowed', False)\n mn.constant('X5Allowed', False)\n X4 = mn.constant(\n 'X4Allowed', mn.PerTreatment({'current': True, 'future': True}))\n\n mn.constraint(\n ['X7Allowed', 'X5Allowed', 'X4Allowed'],\n lambda *machines: any(machines),\n \"AtLeastOneTruthy\",\n lambda names, amounts, trt: \n f'All machines are disabled: {\", \".join(names)}')\n\n\n self.assertEqual(m.validate_all(), {'success': True})\n X4['future'] = False\n self.assertEqual(\n m.validate_all(),\n {\n 'success': False,\n 'errors': [\n {\n 'error_code': 'AtLeastOneTruthy',\n 'inconsistent_variables': [\n 'X7Allowed', 'X5Allowed', 'X4Allowed'],\n 'error_message': 'All machines are disabled: X7Allowed, X5Allowed, X4Allowed',\n 'treatment': 'future'\n }\n\n ]\n })\n\n\n def test_two_constraints(self):\n \"\"\"Test validate_all() with two different constraints.\"\"\"\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n X5 = mn.constant('X5Allowed', False)\n mn.constant('X4Allowed', False)\n\n mn.constraint(\n ['X7Allowed', 'X5Allowed', 'X4Allowed'],\n lambda *machines: any(machines),\n \"AtLeastOneTruthy\",\n lambda names, amounts, trt: \n f'All machines are disabled: {\", \".join(names)}')\n\n mn.constant('Small', 0.4)\n mn.constant('Medium', 0.5)\n Large = mn.constant('Large', 0.05)\n\n mn.constraint(\n ['Small', 'Medium', 'Large'],\n lambda *sizes: sum(sizes) == 1.0,\n 'InvalidDistribution',\n lambda names, amounts, treatment: \n 'Distribution of {} sums to {}, not 1.0, in {}'.format(\n \", \".join(names), round(sum(amounts), 3), treatment))\n\n vresult = m.validate_all()\n self.assertEqual(vresult['success'], False)\n self.assertEqual(len(vresult['errors']), 2)\n self.assertIn(\n {\n 'error_code': 'AtLeastOneTruthy',\n 'inconsistent_variables': [\n 'X7Allowed', 'X5Allowed', 'X4Allowed'],\n 'error_message': 'All machines are disabled: X7Allowed, X5Allowed, X4Allowed',\n 'treatment': ''\n },\n vresult['errors'])\n self.assertIn(\n {\n 'error_code': 'InvalidDistribution',\n 'inconsistent_variables': ['Small', 'Medium', 'Large'],\n 'error_message': 'Distribution of Small, Medium, Large sums to 0.95, not 1.0, in ',\n 'treatment': ''\n },\n vresult['errors'])\n\n X5[''] = True\n Large[''] = 0.1\n self.assertEqual(m.validate_all(), {'success': True})\n\n def test_constraint_raising_error(self):\n \"\"\"Test validate_all() with a broken constraint that raises error.\"\"\"\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constraint(\n ['X7Allowed'],\n lambda machine: 1 / 0,\n \"Whatever\",\n lambda names, amounts, trt: 'whatever')\n\n vresult = m.validate_all()\n self.assertEqual(\n m.validate_all(),\n {\n 'success': False,\n 'errors': [\n {\n 'error_code': 'Whatever',\n 'inconsistent_variables': ['X7Allowed'],\n 'error_message': \n 'Constraint raised exception division by zero',\n 'treatment': ''\n }\n\n ]\n })\n\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constraint(\n ['X7Allowed'],\n lambda x: False,\n \"Whatever\",\n lambda names, amounts, trt: 1 / 0)\n\n self.assertEqual(\n m.validate_all(),\n {\n 'success': False,\n 'errors': [\n {\n 'error_code': 'Whatever',\n 'inconsistent_variables': ['X7Allowed'],\n 'error_message': \n 'Constraint raised exception division by zero',\n 'treatment': ''\n }\n\n ]\n })\n \n\nclass DerivedTreatmentTest(unittest.TestCase):\n \"\"\"Test derived treatments.\"\"\"\n def test_simple(self):\n \"\"\"Test simple application of derived treatment.\"\"\"\n with mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'at-risk': mn.AmountBetter('possible', 'current')}\n ) as m:\n Revenue = mn.constant('Revenue', \n mn.PerTreatment({'current': 20, 'possible': 25})\n ).derived()\n Cost = mn.constant('Cost',\n mn.PerTreatment({'current': 19, 'possible': 18})\n ).derived(scored_as='golf')\n Earnings = mn.variable('Earnings',\n lambda r, c: r-c,\n 'Revenue', 'Cost'\n ).derived()\n\n self.assertEqual(Revenue['at-risk'], 5)\n self.assertEqual(Cost['at-risk'], 1)\n self.assertEqual(Earnings['at-risk'], 6)\n self.assertEqual(Earnings['current'], 1)\n\n def test_derived_treatment_name_dupes_treatment(self):\n \"\"\"Test derived treatment with name that is already treatemnt.\"\"\"\n with self.assertRaisesRegex(mn.MinnetonkaError, \n 'Derived treatment possible is also a treatment'):\n mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'possible': mn.AmountBetter('possible', 'current')})\n\n def test_setting_derived_treatment(self):\n with mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'at-risk': mn.AmountBetter('possible', 'current')}\n ) as m:\n Revenue = mn.constant('Revenue', \n mn.PerTreatment({'current': 20, 'possible': 25}))\n\n with self.assertRaisesRegex(mn.MinnetonkaError, \n 'Cannot set Revenue in derived treatment at-risk.'):\n Revenue['at-risk'] = 2.7\n\n def test_mix(self):\n \"\"\"Test a variable that is scored as a mix.\"\"\"\n with mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'at-risk': mn.AmountBetter('possible', 'current')}\n ) as m:\n Revenue = mn.constant('Revenue', \n mn.PerTreatment({'current': 20, 'possible': 25})\n ).derived()\n Cost = mn.constant('Cost',\n mn.PerTreatment({'current': 19, 'possible': 18})\n ).derived(scored_as='golf')\n Summary = mn.variable('Summary',\n lambda r, c: {'revenue':r, 'cost':c},\n 'Revenue', 'Cost'\n ).derived(scored_as='combo')\n\n self.assertEqual(Revenue['at-risk'], 5)\n self.assertEqual(Cost['at-risk'], 1)\n self.assertEqual(Summary['at-risk'], {'revenue': 5, 'cost': 1})\n\n def test_attempt_to_access_derived_of_nonderived(self):\n \"\"\"Attempt to access derived treatment of non-derived variable.\"\"\"\n with mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'at-risk': mn.AmountBetter('possible', 'current')}\n ) as m:\n Revenue = mn.constant('Revenue', \n mn.PerTreatment({'current': 20, 'possible': 25})\n ) \n Cost = mn.constant('Cost',\n mn.PerTreatment({'current': 19, 'possible': 18})\n ).derived(scored_as='golf')\n Earnings = mn.variable('Earnings',\n lambda r, c: r-c,\n 'Revenue', 'Cost'\n ).derived()\n\n with self.assertRaisesRegex(mn.MinnetonkaError, \n 'Unknown treatment at-risk for variable Revenue'):\n Revenue['at-risk']\n\n def test_is_derived(self):\n \"\"\"Test the function is_derived.\"\"\" \n with mn.model(treatments=['current', 'possible'], \n derived_treatments={\n 'at-risk': mn.AmountBetter('possible', 'current')}\n ) as m:\n Revenue = mn.constant('Revenue', \n mn.PerTreatment({'current': 20, 'possible': 25})\n ) \n Cost = mn.constant('Cost',\n mn.PerTreatment({'current': 19, 'possible': 18})\n ).derived(scored_as='golf')\n Earnings = mn.variable('Earnings',\n lambda r, c: r-c,\n 'Revenue', 'Cost'\n ).derived()\n\n self.assertTrue(Cost.is_derived())\n self.assertFalse(Revenue.is_derived())\n\n\nclass ReplayTest(unittest.TestCase):\n \"\"\"Test replaying a model.\"\"\"\n def test_simple_replay(self):\n \"\"\"Test Model.recording() and Model.replay() of simple variables.\"\"\"\n def create_model():\n with mn.model(treatments=['then', 'now']) as m:\n mn.constant('Foo', mn.PerTreatment({'then': 9, 'now': 10}))\n mn.constant('Bar', 2)\n mn.variable('Baz', lambda a, b: a+b, 'Foo', 'Bar')\n mn.constant('Quz', 99)\n return m\n\n m = create_model() \n m.validate_and_set('Foo', 'then', 99)\n m.validate_and_set('Bar', '__all__', 3)\n m.validate_and_set('Baz', 'now', 15) \n m.validate_and_set('Foo', 'then', 11)\n m.recalculate()\n self.assertEqual(m['Foo']['then'], 11)\n self.assertEqual(m['Foo']['now'], 10)\n self.assertEqual(m['Bar']['then'], 3)\n self.assertEqual(m['Bar']['now'], 3)\n self.assertEqual(m['Baz']['then'], 14)\n self.assertEqual(m['Baz']['now'], 15) \n\n recording = m.recording()\n m2 = create_model()\n m2.replay(recording) \n self.assertEqual(m2['Foo']['then'], 11)\n self.assertEqual(m2['Foo']['now'], 10)\n self.assertEqual(m2['Bar']['then'], 3)\n self.assertEqual(m2['Bar']['now'], 3)\n self.assertEqual(m2['Baz']['then'], 14)\n self.assertEqual(m2['Baz']['now'], 15) \n\n def test_replay_excerpts(self):\n \"\"\"Test Model.recording() and Model.replay, including excerpts.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n class _Measure:\n def __init__(self, metric, customary):\n self.metric = metric\n self.customary = customary\n\n class _Interval:\n def __init__(self, begin, end):\n self.begin = begin \n self.end = end \n\n def create_model():\n with mn.model() as m:\n mn.constant('Size', \n _Size(_Measure(18, _Interval(1.0, 2.0)), 16, 14))\n return m\n\n m = create_model()\n m.validate_and_set('Size', '', 99, excerpt='.width')\n m.validate_and_set('Size', '', 19, excerpt='.length.metric')\n m.validate_and_set('Size', '', 15, excerpt='.length.customary.begin')\n m.validate_and_set('Size', '', 17, excerpt='.length.metric')\n self.assertEqual(m['Size'][''].width, 99)\n self.assertEqual(m['Size'][''].length.metric, 17)\n self.assertEqual(m['Size'][''].length.customary.begin, 15)\n self.assertEqual(m['Size'][''].height, 14)\n \n m2 = create_model()\n m2.replay(m.recording())\n self.assertEqual(m['Size'][''].width, 99)\n self.assertEqual(m['Size'][''].length.metric, 17)\n self.assertEqual(m['Size'][''].length.customary.begin, 15)\n self.assertEqual(m['Size'][''].height, 14)\n\n def test_complex_amount(self):\n \"\"\"Test Model.recording() on amount that cannot be recorded.\"\"\"\n class _Size:\n def __init__(self, length, width, height):\n self.length = length\n self.width = width\n self.height = height\n\n class _Measure:\n def __init__(self, metric, customary):\n self.metric = metric\n self.customary = customary\n\n class _Interval:\n def __init__(self, begin, end):\n self.begin = begin \n self.end = end \n\n def create_model():\n with mn.model() as m:\n mn.constant('Size', \n _Size(_Measure(18, _Interval(1.0, 2.0)), 16, 14))\n return m\n\n m = create_model()\n with self.assertRaisesRegex(mn.MinnetonkaError,\n 'Cannot save amount for later playback'):\n m.validate_and_set('Size', '', _Measure(10, 12), excerpt='.width')\n \n m.validate_and_set(\n 'Size', '', _Measure(10, 12), excerpt='.width', record=False)\n m.validate_and_set('Size', '', 19, excerpt='.length.metric')\n\n m2 = create_model()\n m2.replay(m.recording())\n\n self.assertEqual(m2['Size'][''].length.metric, 19)\n self.assertEqual(m2['Size'][''].width, 16)\n\n def test_included_step(self):\n \"\"\"Test Model.recording with a step or two involved.\"\"\"\n def create_model():\n with mn.model(end_time=10) as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',))\n return m \n\n m = create_model()\n self.assertEqual(m['S'][''], 22)\n m.step()\n self.assertEqual(m['S'][''], 23) \n recording2 = m.recording()\n\n m.validate_and_set('X', '', 2)\n m.recalculate()\n m.step(2)\n recording3 = m.recording()\n\n m.step(to_end=True)\n recording4 = m.recording()\n\n m2 = create_model()\n m2.replay(recording2)\n self.assertEqual(m2['S'][''], 23)\n\n m3 = create_model()\n m3.replay(recording3)\n self.assertEqual(m3['S'][''], 27)\n\n m4 = create_model()\n m4.replay(recording4)\n self.assertEqual(m4['S'][''], 41)\n\n def test_reset(self):\n \"\"\"Test model recording with a reset or two.\"\"\"\n def create_model():\n with mn.model() as m:\n mn.variable('X', 1) \n return m \n\n m = create_model() \n m.validate_and_set('X', '', 2)\n m.reset()\n recording2 = m.recording()\n\n m.validate_and_set('X', '', 2)\n m.reset(reset_external_vars=False)\n recording3 = m.recording()\n\n m2 = create_model()\n m2.replay(recording2)\n self.assertEqual(m2['X'][''], 1)\n\n m3 = create_model()\n m3.replay(recording3)\n self.assertEqual(m3['X'][''], 2)\n\n def test_multiple_resets(self):\n \"\"\"Test that multiple resets and multiple steps are only done once.\"\"\"\n def create_model():\n with mn.model(end_time=10) as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',))\n return m \n\n\n m = create_model() \n m.validate_and_set('X', '', 2)\n m.reset(reset_external_vars=False)\n m.recalculate()\n m.reset(reset_external_vars=False)\n m.step(3)\n m.reset(reset_external_vars=False)\n m.reset(reset_external_vars=False)\n recording = m.recording()\n\n m2 = create_model()\n m2.replay(recording)\n self.assertEqual(m2['X'][''], 2)\n self.assertEqual(\n m2._user_actions.thaw_recording(recording),\n # just two actions\n [\n {\n 'type': 'validate_and_set',\n 'treatment_name': '',\n 'excerpt': '',\n 'amount': 2,\n 'variable_name': 'X'\n },\n {'type': 'reset', 'reset_external_vars': False}\n ])\n\n def test_ignore_step(self):\n \"\"\"Test replay while ignoring steps.\"\"\"\n def create_model():\n with mn.model(end_time=10) as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',))\n return m \n\n m = create_model()\n self.assertEqual(m['S'][''], 22)\n m.step()\n self.assertEqual(m['S'][''], 23) \n recording2 = m.recording()\n\n m.validate_and_set('X', '', 2)\n m.recalculate()\n m.step(2)\n recording3 = m.recording()\n\n m.step(to_end=True)\n recording4 = m.recording()\n\n m2 = create_model()\n m2.replay(recording2, ignore_step=True)\n self.assertEqual(m2['S'][''], 22)\n\n m3 = create_model()\n m3.replay(recording3, ignore_step=True)\n self.assertEqual(m3['S'][''], 22)\n\n m4 = create_model()\n m4.replay(recording4, ignore_step=True)\n self.assertEqual(m4['S'][''], 22)\n\n\nclass CrossTreatmentTest(unittest.TestCase):\n \"\"\"Test a cross.\"\"\"\n def test_cross_constant(self):\n with mn.model(treatments=['As is', 'To be']) as m: \n Bar = mn.constant('Bar', mn.PerTreatment({'As is': 1, 'To be': 2}))\n Foo = mn.cross('Foo', 'Bar', 'As is')\n\n self.assertEqual(Foo['As is'], 1)\n self.assertEqual(Foo['To be'], 1)\n\n def test_cross_variable(self):\n with mn.model(treatments=['As is', 'To be']) as m: \n Bar = mn.constant('Bar', mn.PerTreatment({'As is': 1, 'To be': 2}))\n Baz = mn.variable('Baz', lambda x: x+2, 'Bar')\n Foo = mn.cross('Foo', 'Baz', 'As is')\n\n self.assertEqual(Foo['As is'], 3)\n self.assertEqual(Foo['To be'], 3)\n\n def test_cross_model_variable(self):\n with mn.model(treatments=['As is', 'To be']) as m: \n S = mn.stock('S', mn.PerTreatment({'As is': 1, 'To be': 2}))\n Foo = mn.cross('Foo', 'S', 'As is')\n\n m.step()\n self.assertEqual(Foo['As is'], 1)\n self.assertEqual(Foo['To be'], 1)\n m.step()\n self.assertEqual(Foo['As is'], 2)\n self.assertEqual(Foo['To be'], 2)\n\nclass UndefinedInTest(unittest.TestCase):\n \"\"\"Test defining variable undefined in some treatments.\"\"\"\n def test_undefined_value(self):\n \"\"\"Test the value of something undefined.\"\"\"\n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']):\n Foo = mn.constant('Foo', 12 ).undefined_in('conjecture')\n Bar = mn.variable('Bar',\n lambda x: x + 1,\n 'Foo').undefined_in('conjecture', 'current')\n self.assertEqual(Foo['current'], 12)\n self.assertEqual(Foo['conjecture'], None)\n self.assertEqual(Bar['current'], None)\n self.assertEqual(Bar['conjecture'], None)\n self.assertEqual(Bar['possible'], 13)\n\n def test_undefined_stock(self):\n \"\"\"Test a stock that is undefined for some treatments.\"\"\" \n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n mn.variable('X', 1)\n mn.variable('Y', \n mn.PerTreatment({'conjecture': 22, 'current': 22, 'possible': 22})\n ).undefined_in('design')\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',)\n ).undefined_in('design')\n P = mn.variable('P', lambda x: x, 'S'\n ).undefined_in('possible', 'design')\n\n self.assertEqual(S['current'], 22)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(S['current'], 23)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 23)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step()\n self.assertEqual(S['current'], 24)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 24)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.reset()\n self.assertEqual(S['current'], 22)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(S['current'], 23)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 23)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n\n def test_undefined_accum(self):\n \"\"\"Test an accum that is undefined for some treatments.\"\"\" \n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n mn.variable('X', 1)\n mn.variable('Y', 22)\n A = mn.stock('A',\n \"\"\"Start at 23 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',)\n ).undefined_in('design')\n P = mn.variable('P', lambda x: x, 'A'\n ).undefined_in('possible', 'design')\n\n self.assertEqual(A['current'], 22)\n self.assertEqual(A['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(A['current'], 23)\n self.assertEqual(A['design'], None)\n self.assertEqual(P['conjecture'], 23)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step()\n self.assertEqual(A['current'], 24)\n self.assertEqual(A['design'], None)\n self.assertEqual(P['conjecture'], 24)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.reset()\n self.assertEqual(A['current'], 22)\n self.assertEqual(A['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(A['current'], 23)\n self.assertEqual(A['design'], None)\n self.assertEqual(P['conjecture'], 23)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n\n def test_partial_per_treatment(self):\n \"\"\"Test PerTreatment that is not defined for undefined treatments.\"\"\"\n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']):\n Foo = mn.constant('Foo', 12 ).undefined_in('conjecture')\n Bar = mn.variable('Bar',\n mn.PerTreatment({\n 'current': lambda x: x+1,\n 'possible': lambda x: x+2\n }),\n 'Foo'\n ).undefined_in('conjecture', 'design') \n self.assertEqual(Bar['current'], 13)\n self.assertEqual(Bar['conjecture'], None)\n self.assertEqual(Bar['possible'], 14)\n\n def test_recalculate(self):\n \"\"\"Test recalculating with undefined.\"\"\"\n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n Foo = mn.constant('Foo', 12 ).undefined_in('conjecture')\n Bar = mn.variable('Bar',\n lambda x: x + 1,\n 'Foo').undefined_in('conjecture', 'current')\n self.assertEqual(Foo['current'], 12)\n self.assertEqual(Foo['conjecture'], None)\n self.assertEqual(Bar['current'], None)\n self.assertEqual(Bar['conjecture'], None)\n self.assertEqual(Bar['possible'], 13)\n m.step()\n Foo['__all__'] = 19\n m.recalculate()\n self.assertEqual(Foo['current'], 19)\n self.assertEqual(Foo['conjecture'], None)\n self.assertEqual(Bar['current'], None)\n self.assertEqual(Bar['conjecture'], None)\n self.assertEqual(Bar['possible'], 20)\n\n def test_previous(self):\n \"\"\"Test a previous that is undefined for some treatments.\"\"\" \n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n mn.variable('X', 1)\n mn.variable('Y', 22).undefined_in('design')\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',)\n ).undefined_in('design')\n P = mn.previous('P', 'S').undefined_in('possible', 'design')\n\n self.assertEqual(S['current'], 22)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(S['current'], 23)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 22)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n m.step() \n self.assertEqual(S['current'], 24)\n self.assertEqual(S['design'], None)\n self.assertEqual(P['conjecture'], 23)\n self.assertEqual(P['design'], None)\n self.assertEqual(P['possible'], None)\n\n def test_all_amounts(self):\n \"\"\"Test all amounts with undefined.\"\"\"\n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n X = mn.variable('X', 1)\n Y = mn.variable('Y', 22).undefined_in('design')\n S = mn.stock('S',\n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda x: x, ('X',), lambda x: x, ('Y',)\n ).undefined_in('design', 'possible')\n\n self.assertEqual(\n X.all(), \n {'conjecture': 1, 'current': 1, 'possible': 1, 'design': 1})\n self.assertEqual(\n Y.all(),\n {'conjecture': 22, 'current': 22, 'possible': 22})\n self.assertEqual(S.all(), {'conjecture': 22, 'current': 22} )\n m.step()\n self.assertEqual(\n X.all(), \n {'conjecture': 1, 'current': 1, 'possible': 1, 'design': 1})\n self.assertEqual(\n Y.all(),\n {'conjecture': 22, 'current': 22, 'possible': 22})\n self.assertEqual(S.all(), {'conjecture': 23, 'current': 23} )\n\n def test_dispatch_function(self):\n \"\"\"Test dispatch function with undefined.\"\"\"\n with mn.model(treatments=['conjecture', 'current', 'possible', 'design']\n ) as m:\n X = mn.variable('X', 1).undefined_in('conjecture', 'current')\n Y = mn.variable('Y', 22).undefined_in('possible', 'design')\n XorY = mn.variable('XorY',\n mn.PerTreatment({\n 'conjecture': lambda _, y: y,\n 'current': lambda _, y: y,\n 'possible': lambda x, _: x,\n 'design': lambda x, _: x\n }),\n 'X',\n 'Y')\n self.assertEqual(\n XorY.all(),\n {'conjecture': 22, 'current': 22, 'possible': 1, 'design': 1})\n\n def test_history(self):\n \"\"\"Test history with some variables undefined.\"\"\"\n\n with mn.model(treatments=['Bar', 'Baz']) as m:\n Foo = mn.stock('Foo', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0)\n Quz = mn.variable('Quz', lambda x: x, 'Foo').undefined_in('Baz')\n Corge = mn.accum('Corge', mn.PerTreatment({'Bar': 1, 'Baz': 2}), 0) \n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {'Bar': [0], 'Baz': [0]},\n 'Quz': {'Bar': [0]},\n 'Corge': {'Bar': [0], 'Baz': [0]} \n })\n\n m.step(10)\n\n self.assertEqual(\n m.history(),\n {\n 'Foo': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'Baz': [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n },\n 'Quz': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] \n },\n 'Corge': {\n 'Bar': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'Baz': [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n }\n })\n\n def test_validate_all(self):\n \"\"\"Test validate_all(), with a variable in one treatment.\"\"\"\n with mn.model(treatments=['all good', 'one bad']) as m:\n mn.constant('Small', 0.4)\n mn.constant('Medium', 0.5).undefined_in('one bad')\n Large = mn.constant('Large', 0.05)\n\n mn.constraint(\n ['Small', 'Medium', 'Large'],\n lambda *sizes: sum(sizes) == 1.0,\n 'InvalidDistribution',\n lambda names, amounts, treatment: \n 'Distribution of {} sums to {}, not 1.0, in {}'.format(\n \", \".join(names), round(sum(amounts), 3), treatment))\n\n\n vresult = m.validate_all()\n self.assertEqual(vresult['success'], False)\n Large['__all__'] = 0.1\n self.assertEqual(m.validate_all(), {'success': True})\n\n # The existing error handling is OK, better than trying to catch this\n # explicitly and not allowing dispatch functions.\n \n # def test_undefined_rainy_day(self):\n # \"\"\"Test error handling when a variable uses an undefined variable.\"\"\"\n # with self.assertRaisesRegex(mn.MinnetonkaError,\n # \"Bar uses undefined conjecture amount of Foo\"):\n # with mn.model(treatments=['conjecture', 'current']):\n # Foo = mn.constant('Foo', 12 ).undefined_in('conjecture')\n # Bar = mn.variable('Bar', lambda x: x + 1, 'Foo')\n\nclass OnInitTest(unittest.TestCase):\n \"\"\"Test on_init and on_reset.\"\"\"\n def test_simple(self):\n \"\"\"Test on_init and on_reset.\"\"\"\n def set_seed(md):\n random.seed(99)\n\n with mn.model(on_init=set_seed, on_reset=set_seed) as m:\n foo = mn.variable('Foo', lambda: random.randint(0, 999))\n\n foo1a = foo['']\n m.step()\n foo2a = foo['']\n m.reset()\n foo1b = foo['']\n m.step()\n foo2b = foo['']\n\n self.assertEqual(foo1a, foo1b)\n self.assertEqual(foo2a, foo2b)\n\n\nclass DetailsTest(unittest.TestCase):\n \"\"\"Test the details function.\"\"\"\n def assertDictAlmostEqual(self, x, y):\n if isinstance(x, (int, float, complex)):\n self.assertAlmostEqual(x, y)\n elif isinstance(x, dict):\n for k in x.keys():\n self.assertDictAlmostEqual(x[k], y[k])\n # in case some keys in y are not in x\n for k in y.keys():\n self.assertDictAlmostEqual(x[k], y[k])\n else:\n self.assertEqual(x, y)\n\n def test_constant(self):\n \"\"\"Test details for a constant.\"\"\"\n\n with mn.model() as m:\n foo = mn.constant('Foo', 12)\n bar = mn.constant('Bar', 99\n ).substitute_description_for_amount(\"Bar is a pathetic constant\")\n\n self.assertEqual(\n foo.details(),\n {\n 'name': 'Foo',\n 'varies over time': False,\n 'amount': {\"\": 12},\n 'caucus': {\"\": 12}\n })\n\n self.assertEqual(\n bar.details(),\n {\n 'name': 'Bar',\n 'varies over time': False,\n 'summary description': 'Bar is a pathetic constant',\n 'caucus': 'Bar is a pathetic constant'\n })\n\n with mn.model(\n treatments=['As is', 'To be'], \n derived_treatments={'Improvement': mn.AmountBetter('To be', 'As is')}\n ) as m:\n\n foo = mn.constant('Foo', mn.PerTreatment({'As is': 2, 'To be': 2.6}))\n baz = mn.constant('Baz', lambda x: x+x, 'Foo', \n ).derived()\n bar = mn.constant('Bar', mn.PerTreatment({'As is': 20})\n ).undefined_in('To be'\n ).summarizer(\n \"Number as English\",\n lambda x, _: \"Twenty\" if x == 20 else \"Not Twenty\")\n\n foo_deets = foo.details() \n self.assertEqual(\n foo_deets,\n {\n 'name': 'Foo',\n 'varies over time': False,\n 'amount':{'To be': 2.6, 'As is': 2},\n 'caucus': {'To be': 2.6, 'As is': 2}\n })\n\n self.assertEqual(\n bar.details(),\n {\n 'name': 'Bar',\n 'varies over time': False,\n 'summary description': \"Number as English\",\n 'summary':{'As is': \"Twenty\"},\n 'caucus': {'As is': \"Twenty\"}\n })\n\n self.assertDictAlmostEqual(\n baz.details(),\n {\n 'name': 'Baz',\n 'varies over time': False, \n 'amount':{'To be': 5.2, 'As is': 4, 'Improvement': 1.2},\n 'caucus': {'To be': 5.2, 'As is': 4, 'Improvement': 1.2}\n })\n\n def test_normal_variable(self):\n \"\"\"Test details for a normal variable.\"\"\"\n def _convert_to_english(num, _):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num]\n except KeyError:\n return \"Many\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n week = mn.variable('Week', lambda md: md.TIME, '__model__'\n ).summarizer(\"As English\", _convert_to_english\n ).caucuser(lambda amts: 'complex')\n next_week = mn.variable('NextWeek', lambda x: x+1, 'Week'\n ).caucuser(sum)\n week_after = mn.variable('WeekAfter', lambda x: x+2, 'Week'\n ).substitute_description_for_amount(\n \"Sometime in the distant future\")\n\n m.step(4)\n self.assertEqual(\n week.details(),\n {\n 'name': 'Week',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'As is': ['Zero', 'One', 'Two', 'Three', 'Four'],\n 'To be': ['Zero', 'One', 'Two', 'Three', 'Four']},\n 'caucus': {'As is': 'complex', 'To be': 'complex'}\n })\n\n self.assertEqual(\n next_week.details(),\n {\n 'name': 'NextWeek',\n 'varies over time': True,\n 'amounts': {'As is': [1, 2, 3, 4, 5],\n 'To be': [1, 2, 3, 4, 5]},\n 'caucus': {'As is': 15.0, 'To be': 15.0}\n })\n\n self.assertEqual(\n week_after.details(),\n {\n 'name': 'WeekAfter',\n 'varies over time': True,\n 'summary description': 'Sometime in the distant future',\n 'caucus': 'Sometime in the distant future'\n })\n\n def test_use_treatment(self):\n \"\"\"Test details with a summarizer that uses the treatment.\"\"\"\n def _convert_to_english(num, trt):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num] + ' ' + trt \n except KeyError:\n return \"Many\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n week = mn.variable('Week', lambda md: md.TIME, '__model__'\n ).summarizer(\"As English\", _convert_to_english\n ).caucuser(lambda amts: 'complex')\n next_week = mn.variable('NextWeek', lambda x: x+1, 'Week')\n week_after = mn.variable('WeekAfter', lambda x: x+2, 'Week'\n ).substitute_description_for_amount(\n \"Sometime in the distant future\")\n\n m.step(4)\n self.assertEqual(\n week.details(),\n {\n 'name': 'Week',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'As is': ['Zero As is', 'One As is', 'Two As is', \n 'Three As is', 'Four As is'],\n 'To be': ['Zero To be', 'One To be', 'Two To be', \n 'Three To be', 'Four To be']},\n 'caucus': {'As is': 'complex', 'To be': 'complex'}\n })\n\n def test_normal_derived(self):\n \"\"\"Test details with a variable that has a derived treatment.\"\"\"\n\n with mn.model(\n treatments=['As is', 'To be'], \n derived_treatments={\n 'Improvement': mn.AmountBetter('To be', 'As is')}\n ) as m:\n foo = mn.stock('Foo', mn.PerTreatment({'As is': 1, 'To be': 2})\n ).derived().caucuser(sum)\n bar = mn.variable('Bar', lambda x: x, 'Foo'\n ).derived(scored_as='golf')\n\n m.step(3)\n self.assertEqual(\n foo.details(),\n {\n 'name': 'Foo',\n 'varies over time': True,\n 'amounts': {\n 'As is': [0, 1, 2, 3],\n 'To be': [0, 2, 4, 6],\n 'Improvement': [0, 1, 2, 3]},\n 'caucus': {'As is': 6.0, 'To be': 12.0, 'Improvement': 6.0}\n })\n\n self.assertEqual(\n bar.details(),\n {\n 'name': 'Bar',\n 'varies over time': True,\n 'amounts': {\n 'As is': [0, 1, 2, 3],\n 'To be': [0, 2, 4, 6],\n 'Improvement': [0, -1, -2, -3]\n },\n 'caucus': {'As is': 1.5, 'To be': 3.0, 'Improvement': -1.5}\n })\n\n\n def test_stocks(self):\n \"\"\"Test details for a stock.\"\"\"\n def _convert_to_english(num, _):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num]\n except KeyError:\n return \"Many\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n first = mn.stock('First', mn.PerTreatment({'As is': 1, 'To be': 2})\n ).summarizer('As English', _convert_to_english\n ).caucuser(lambda amts: 'complex')\n second = mn.stock('Second', lambda f: f, ('First',), 0\n ).caucuser(sum)\n third = mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0\n ).substitute_description_for_amount('A lot')\n\n m.step(4)\n self.assertEqual(\n first.details(),\n {\n 'name': 'First',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'As is': ['Zero', 'One', 'Two', 'Three', 'Four'],\n 'To be': ['Zero', 'Two', 'Four', 'Six', 'Eight']},\n 'caucus': {'As is': 'complex', 'To be': 'complex'}\n })\n\n self.assertEqual(\n second.details(),\n {\n 'name': 'Second',\n 'varies over time': True,\n 'amounts': {'As is': [0, 0, 1, 3, 6],\n 'To be': [0, 0, 2, 6, 12]},\n 'caucus': {'As is': 10.0, 'To be': 20.0}\n })\n\n self.assertEqual(\n third.details(),\n {\n 'name': 'Third',\n 'varies over time': True,\n 'summary description': 'A lot',\n 'caucus': 'A lot'\n })\n\n def test_accums(self):\n \"\"\"Test details for an accum.\"\"\"\n def _convert_to_english(num, _):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num]\n except KeyError:\n return \"Many\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n first = mn.accum('First', mn.PerTreatment({'As is': 1, 'To be': 2})\n ).summarizer('As English', _convert_to_english\n ).caucuser(lambda amts: 'complex')\n second = mn.accum('Second', lambda f: f, ('First',), 0)\n third = mn.accum('Third', lambda f, s: f + s, ('First', 'Second'), 0\n ).substitute_description_for_amount('A lot')\n\n m.step(4)\n self.assertEqual(\n first.details(),\n {\n 'name': 'First',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'As is': ['Zero', 'One', 'Two', 'Three', 'Four'],\n 'To be': ['Zero', 'Two', 'Four', 'Six', 'Eight']},\n 'caucus': {'As is': 'complex', 'To be': 'complex'}\n })\n\n self.assertEqual(\n second.details(),\n {\n 'name': 'Second',\n 'varies over time': True,\n 'amounts': {'As is': [0, 1, 3, 6, 10],\n 'To be': [0, 2, 6, 12, 20]},\n 'caucus': {'As is': 4.0, 'To be': 8.0}\n })\n\n self.assertEqual(\n third.details(),\n {\n 'name': 'Third',\n 'varies over time': True,\n 'caucus': 'A lot',\n 'summary description': 'A lot'\n })\n\n def test_previous(self):\n \"\"\"Test details for a normal variable.\"\"\"\n def _convert_to_english(num, _):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num]\n except KeyError:\n return \"Many\"\n\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n lf = mn.previous('LastFoo', 'Foo'\n ).summarizer('As English', _convert_to_english\n ).caucuser(lambda amts: 'complex')\n lf2 = mn.previous('LastFoo2', 'Foo').caucuser(sum)\n lf3 = mn.previous('LastFoo3', 'Foo'\n ).substitute_description_for_amount('A lot')\n\n m.step(4)\n self.assertEqual(\n lf.details(),\n {\n 'name': 'LastFoo',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'': ['Zero', 'Zero', 'One', 'Two', 'Three']},\n 'caucus': {'': 'complex'}\n })\n\n self.assertEqual(\n lf2.details(),\n {\n 'name': 'LastFoo2',\n 'varies over time': True,\n 'amounts': {'': [0, 0, 1, 2, 3]},\n 'caucus': {'': 6.0}\n })\n\n self.assertEqual(\n lf3.details(),\n {\n 'name': 'LastFoo3',\n 'varies over time': True,\n 'summary description': 'A lot',\n 'caucus': 'A lot'\n })\n\n def test_cross(self):\n \"\"\"Test details for a cross variable.\"\"\"\n def _convert_to_english(num, _):\n num2words = {\n 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', \n 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten', \n 11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', \n 15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', \n 19: 'Nineteen', 20: 'Twenty', 0: 'Zero'}\n try:\n return num2words[num]\n except KeyError:\n return \"Many\"\n\n with mn.model(treatments=['As is', 'To be']) as m:\n s = mn.stock('S', mn.PerTreatment({'As is': 1, 'To be': 2}))\n foo = mn.cross('Foo', 'S', 'As is'\n ).summarizer('As English', _convert_to_english\n ).caucuser(lambda amts: 'complex') \n\n m.step(4)\n self.assertEqual(\n foo.details(),\n {\n 'name': 'Foo',\n 'varies over time': True,\n 'summary description': 'As English',\n 'summary': {'As is': ['Zero', 'One', 'Two', 'Three', 'Four'],\n 'To be': ['Zero', 'One', 'Two', 'Three', 'Four']},\n 'caucus': {'As is': 'complex', 'To be': 'complex'}\n })\n \n\nclass ModifiedTest(unittest.TestCase):\n \"\"\"Test the is_modified() function on a variable instance.\"\"\"\n def test_constant(self):\n \"\"\"Test is_modified on a constant instance.\"\"\"\n with mn.model() as m:\n foo = mn.constant('Foo', 12)\n\n self.assertFalse(m.is_modified('Foo', ''))\n foo[''] = 13\n self.assertTrue(m.is_modified('Foo', ''))\n\n with mn.model(treatments=['As is', 'To be']) as m:\n bar = mn.constant('Bar', mn.PerTreatment({'As is': 12, 'To be': 13}))\n\n self.assertFalse(m.is_modified('Bar', 'As is'))\n self.assertFalse(m.is_modified('Bar', 'To be'))\n bar['To be'] = 14\n self.assertFalse(m.is_modified('Bar', 'As is'))\n self.assertTrue(m.is_modified('Bar', 'To be'))\n bar['As is'] = 13\n self.assertTrue(m.is_modified('Bar', 'As is'))\n self.assertTrue(m.is_modified('Bar', 'To be'))\n\n def test_constant_reset(self):\n \"\"\"Test is_modified on a constant after being reset.\"\"\"\n with mn.model() as m:\n foo = mn.constant('Foo', 12) \n\n foo[''] = 13\n self.assertTrue(m.is_modified('Foo', ''))\n m.reset()\n self.assertFalse(m.is_modified('Foo', ''))\n foo[''] = 14\n self.assertTrue(m.is_modified('Foo', ''))\n m.reset(reset_external_vars=False)\n self.assertTrue(m.is_modified('Foo', ''))\n\n def test_variable(self):\n \"\"\"Test is_modified on a variable instance.\"\"\"\n with mn.model() as m:\n foo = mn.constant('Foo', 12)\n bar = mn.variable('Bar', lambda x: x + 2, 'Foo')\n\n self.assertFalse(m.is_modified('Bar', ''))\n foo[''] = 13 \n self.assertFalse(m.is_modified('Bar', ''))\n bar[''] = 99 \n self.assertTrue(m.is_modified('Bar', ''))\n\n\nclass VelocityTest(unittest.TestCase):\n \"\"\"Testing velocity()\"\"\"\n def test_simple(self):\n \"\"\"Test a simple use of velocity work, with a stock\"\"\"\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n\n self.assertEqual(FooVelocity[''], 0)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.reset()\n self.assertEqual(FooVelocity[''], 0)\n\n def test_timestep(self):\n \"\"\"Test a simple use of velocity work, with a timestep that is not 1\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Foo', 1, 0)\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n\n self.assertEqual(FooVelocity[''], 0)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.step()\n self.assertEqual(FooVelocity[''], 1)\n m.reset()\n self.assertEqual(FooVelocity[''], 0)\n\n def test_treatments(self):\n \"\"\"Test velocity works across treatments\"\"\"\n with mn.model(treatments=['as is', 'to be']) as m:\n Bar = mn.stock('Bar', 1, 0)\n Foo = mn.variable('Foo', \n mn.PerTreatment({\n 'as is': lambda x: x * x,\n 'to be': lambda x: x * x * x\n }),\n 'Bar')\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n\n self.assertEqual(FooVelocity['as is'], 0)\n self.assertEqual(FooVelocity['to be'], 0)\n m.step() \n self.assertEqual(FooVelocity['as is'], 1)\n self.assertEqual(FooVelocity['to be'], 1) \n m.step()\n self.assertEqual(FooVelocity['as is'], 3)\n self.assertEqual(FooVelocity['to be'], 7) \n m.step()\n self.assertEqual(FooVelocity['as is'], 5)\n self.assertEqual(FooVelocity['to be'], 19) \n m.reset()\n self.assertEqual(FooVelocity['as is'], 0)\n self.assertEqual(FooVelocity['to be'], 0)\n m.step() \n self.assertEqual(FooVelocity['as is'], 1)\n self.assertEqual(FooVelocity['to be'], 1) \n\n def test_cycle(self):\n \"\"\"Test that a cycle is caught.\"\"\"\n with self.assertRaises(mn.MinnetonkaError) as me:\n with mn.model() as m:\n Foo = mn.variable('Foo', lambda x: x+1, 'FooVelocity')\n FooVelocity = mn.velocity('FooVelocity', 'Foo')\n self.assertEqual(me.exception.message,\n 'Circularity among variables: Foo <- FooVelocity <- Foo')\n\n def test_array(self):\n \"\"\"Test velocity with numpy arrays.\"\"\"\n with mn.model() as m:\n mn.stock('Savings', \n lambda x: x, 'Interest', np.array([1000, 1000, 1000]))\n mn.variable('Interest',\n lambda savings, rate: savings * rate,\n 'Savings',\n 'Rate')\n mn.constant('Rate', np.array([0.08, 0.09, 0.1]))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n assert_array_equal(SavingsVelocity[''], np.array([0, 0, 0]))\n m.step()\n assert_array_equal(SavingsVelocity[''], np.array([80, 90, 100]))\n m.step()\n assert_array_almost_equal(\n SavingsVelocity[''], np.array([86.4, 98.1, 110]))\n m.step()\n assert_array_almost_equal(\n SavingsVelocity[''], np.array([93.312, 106.929, 121]))\n m.reset()\n assert_array_equal(SavingsVelocity[''], np.array([0, 0, 0]))\n m.step()\n assert_array_equal(SavingsVelocity[''], np.array([80, 90, 100]))\n\n def test_array_timestep(self):\n \"\"\"Test velocity with numpy arrays and nonzero timestep.\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n lambda x: x, 'Interest', np.array([1000, 1000, 1000]))\n mn.variable('Interest',\n lambda savings, rate: savings * rate,\n 'Savings',\n 'Rate')\n mn.constant('Rate', np.array([0.08, 0.09, 0.1]))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n assert_array_equal(SavingsVelocity[''], np.array([0, 0, 0]))\n m.step()\n assert_array_equal(SavingsVelocity[''], np.array([80, 90, 100]))\n m.step()\n assert_array_almost_equal(\n SavingsVelocity[''], np.array([83.2, 94.05, 105]))\n m.step()\n assert_array_almost_equal(\n SavingsVelocity[''], np.array([86.528, 98.28225, 110.25]))\n m.reset()\n assert_array_equal(SavingsVelocity[''], np.array([0, 0, 0]))\n m.step()\n assert_array_equal(SavingsVelocity[''], np.array([80, 90, 100]))\n\n def test_tuple(self):\n \"\"\"Test velocity with tuple values.\"\"\"\n with mn.model() as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', (1000, 1000, 1000))\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', (0.08, 0.09, 0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], (0, 0, 0))\n m.step()\n self.assertEqual(SavingsVelocity[''], (80, 90, 100))\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''][0], 86.4)\n self.assertAlmostEqual(SavingsVelocity[''][1], 98.1)\n self.assertAlmostEqual(SavingsVelocity[''][2], 110)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''][0], 93.312)\n self.assertAlmostEqual(SavingsVelocity[''][1], 106.929)\n self.assertAlmostEqual(SavingsVelocity[''][2], 121)\n m.reset()\n self.assertEqual(SavingsVelocity[''], (0, 0, 0))\n m.step()\n self.assertEqual(SavingsVelocity[''], (80, 90, 100))\n\n def test_tuple_timestep(self):\n \"\"\"Test velocity with tuple values and nonzero timestep.\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', (1000, 1000, 1000))\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', (0.08, 0.09, 0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], (0, 0, 0))\n m.step()\n self.assertEqual(SavingsVelocity[''], (80, 90, 100))\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''][0], 83.2)\n self.assertAlmostEqual(SavingsVelocity[''][1], 94.05)\n self.assertAlmostEqual(SavingsVelocity[''][2], 105)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''][0], 86.528)\n self.assertAlmostEqual(SavingsVelocity[''][1], 98.28225)\n self.assertAlmostEqual(SavingsVelocity[''][2], 110.25)\n m.reset()\n self.assertEqual(SavingsVelocity[''], (0, 0, 0))\n m.step()\n self.assertEqual(SavingsVelocity[''], (80, 90, 100))\n\n def test_dict(self):\n \"\"\"Test velocity with dict values.\"\"\"\n with mn.model() as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', \n {'foo': 1000, 'bar': 900, 'baz': 800})\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', {'foo': 0.08, 'bar': 0.09, 'baz': 0.1})\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], {'foo': 0, 'bar': 0, 'baz': 0})\n m.step()\n self.assertEqual(SavingsVelocity[''], {'foo': 80, 'bar': 81, 'baz': 80})\n m.step()\n self.assertAlmostEqual(SavingsVelocity['']['foo'], 86.4)\n self.assertAlmostEqual(SavingsVelocity['']['bar'], 88.29)\n self.assertAlmostEqual(SavingsVelocity['']['baz'], 88.0)\n m.step()\n self.assertAlmostEqual(SavingsVelocity['']['foo'], 93.312)\n self.assertAlmostEqual(SavingsVelocity['']['bar'], 96.2361)\n self.assertAlmostEqual(SavingsVelocity['']['baz'], 96.8)\n m.reset()\n self.assertEqual(SavingsVelocity[''], {'foo': 0, 'bar': 0, 'baz': 0})\n m.step()\n self.assertEqual(SavingsVelocity[''], {'foo': 80, 'bar': 81, 'baz': 80})\n\n def test_dict_timestep(self):\n \"\"\"Test velocity with dict values and nonzero timestep.\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', \n {'foo': 1000, 'bar': 900, 'baz': 800})\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', {'foo': 0.08, 'bar': 0.09, 'baz': 0.1})\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], {'foo': 0, 'bar': 0, 'baz': 0})\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity['']['foo'], 83.2)\n self.assertAlmostEqual(SavingsVelocity['']['bar'], 84.645)\n self.assertAlmostEqual(SavingsVelocity['']['baz'], 84.0)\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity['']['foo'], 89.98912)\n self.assertAlmostEqual(SavingsVelocity['']['bar'], 92.4344561)\n self.assertAlmostEqual(SavingsVelocity['']['baz'], 92.61)\n m.step()\n self.assertAlmostEqual(SavingsVelocity['']['foo'], 93.5886848)\n self.assertAlmostEqual(SavingsVelocity['']['bar'], 96.5940067)\n self.assertAlmostEqual(SavingsVelocity['']['baz'], 97.2405)\n m.reset()\n self.assertEqual(SavingsVelocity[''], {'foo': 0, 'bar': 0, 'baz': 0})\n m.step()\n self.assertEqual(SavingsVelocity[''], {'foo': 80, 'bar': 81, 'baz': 80})\n\n def test_named_tuple(self):\n \"\"\"Test velocity with named tuple values.\"\"\"\n FBB = collections.namedtuple('FBB', ['foo', 'bar', 'baz'])\n with mn.model() as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', \n FBB(foo=1000, bar=900, baz=800))\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', FBB(foo=0.08, bar=0.09, baz=0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80))\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 86.4)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 88.29)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 88.0)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 93.312)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 96.2361)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 96.8)\n m.reset()\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80))\n\n def test_named_tuple_timestep(self):\n \"\"\"Test velocity with named tuple values and nonzero timestep.\"\"\"\n FBB = collections.namedtuple('FBB', ['foo', 'bar', 'baz'])\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n mn.foreach(lambda x: x), 'Interest', \n FBB(foo=1000, bar=900, baz=800))\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: savings * rate),\n 'Savings',\n 'Rate')\n mn.constant('Rate', FBB(foo=0.08, bar=0.09, baz=0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity[''].foo, 83.2)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 84.645)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 84.0)\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity[''].foo, 89.98912)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 92.4344561)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 92.61)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 93.5886848)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 96.5940067)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 97.2405)\n m.reset()\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80)) \n\n def test_mn_named_tuple(self):\n \"\"\"Test velocity with mn_namedtuple values.\"\"\"\n FBB = mn.mn_namedtuple('FBB', ['foo', 'bar', 'baz'])\n with mn.model() as m:\n mn.stock('Savings', \n lambda x: x, 'Interest', \n FBB(foo=1000, bar=900, baz=800))\n mn.variable('Interest',\n lambda savings, rate: savings * rate,\n 'Savings',\n 'Rate')\n mn.constant('Rate', FBB(foo=0.08, bar=0.09, baz=0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80))\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 86.4)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 88.29)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 88.0)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 93.312)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 96.2361)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 96.8)\n m.reset()\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80))\n\n def test_mn_named_tuple_timestep(self):\n \"\"\"Test velocity with mn_namedtuple values and nonzero timestep.\"\"\"\n FBB = mn.mn_namedtuple('FBB', ['foo', 'bar', 'baz'])\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n lambda x: x, 'Interest', \n FBB(foo=1000, bar=900, baz=800))\n mn.variable('Interest',\n lambda savings, rate: savings * rate,\n 'Savings',\n 'Rate')\n mn.constant('Rate', FBB(foo=0.08, bar=0.09, baz=0.1))\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity[''].foo, 83.2)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 84.645)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 84.0)\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity[''].foo, 89.98912)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 92.4344561)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 92.61)\n m.step()\n self.assertAlmostEqual(SavingsVelocity[''].foo, 93.5886848)\n self.assertAlmostEqual(SavingsVelocity[''].bar, 96.5940067)\n self.assertAlmostEqual(SavingsVelocity[''].baz, 97.2405)\n m.reset()\n self.assertEqual(SavingsVelocity[''], FBB(foo=0, bar=0, baz=0))\n m.step()\n self.assertEqual(SavingsVelocity[''], FBB(foo=80, bar=81, baz=80)) \n\n def test_dict_tuple(self):\n \"\"\"Test velocity with dicts of tuples.\"\"\"\n with mn.model() as m:\n mn.stock('Savings', \n mn.foreach(mn.foreach(lambda x: x)), 'Interest', \n {'foo': (1000, 1050), 'bar': (900, 950), 'baz': (800, 850)})\n mn.variable('Interest',\n mn.foreach(lambda savings, rate: tuple(s * rate for s in savings)),\n 'Savings',\n 'Rate')\n mn.constant('Rate', {'foo': 0.08, 'bar': 0.09, 'baz': 0.1})\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings')\n\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (0, 0), 'bar': (0, 0), 'baz': (0, 0)})\n m.step()\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (80.0, 84.0), 'bar': (81.0, 85.5), 'baz': (80.0, 85.0)})\n m.step()\n self.assertAlmostEqual(SavingsVelocity['']['foo'][0], 86.4)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][0], 88.29)\n self.assertAlmostEqual(SavingsVelocity['']['baz'][0], 88.0) \n self.assertAlmostEqual(SavingsVelocity['']['foo'][1], 90.72)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][1], 93.195)\n self.assertAlmostEqual(SavingsVelocity['']['baz'][1], 93.5) \n m.reset()\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (0, 0), 'bar': (0, 0), 'baz': (0, 0)})\n m.step()\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (80.0, 84.0), 'bar': (81.0, 85.5), 'baz': (80.0, 85.0)}) \n\n def test_dict_tuple_timestep(self):\n \"\"\"Test velocity with dict of tuples and nonzero timestep.\"\"\"\n with mn.model(timestep=0.5) as m:\n mn.stock('Savings', \n mn.foreach(mn.foreach(lambda x: x)), 'Interest', \n {'foo': (1000, 1050), 'bar': (900, 950), 'baz': (800, 850)})\n mn.variable('Interest',\n mn.foreach(\n lambda savings, rate: tuple(s * rate for s in savings)),\n 'Savings',\n 'Rate')\n mn.constant('Rate', {'foo': 0.08, 'bar': 0.09, 'baz': 0.1})\n SavingsVelocity = mn.velocity('SavingsVelocity', 'Savings') \n\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (0, 0), 'bar': (0, 0), 'baz': (0, 0)})\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity['']['foo'][0], 83.2) \n self.assertAlmostEqual(SavingsVelocity['']['foo'][1], 87.36)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][0], 84.645)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][1], 89.3475)\n self.assertAlmostEqual(SavingsVelocity['']['baz'][0], 84.0) \n self.assertAlmostEqual(SavingsVelocity['']['baz'][1], 89.25) \n m.step(2)\n self.assertAlmostEqual(SavingsVelocity['']['foo'][0], 89.98912) \n self.assertAlmostEqual(SavingsVelocity['']['foo'][1], 94.488576)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][0], 92.4344561)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][1], 97.5697037)\n self.assertAlmostEqual(SavingsVelocity['']['baz'][0], 92.61) \n self.assertAlmostEqual(SavingsVelocity['']['baz'][1], 98.398125) \n m.reset()\n self.assertEqual(\n SavingsVelocity[''], \n {'foo': (0, 0), 'bar': (0, 0), 'baz': (0, 0)})\n m.step(2)\n self.assertAlmostEqual(SavingsVelocity['']['foo'][0], 83.2) \n self.assertAlmostEqual(SavingsVelocity['']['foo'][1], 87.36)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][0], 84.645)\n self.assertAlmostEqual(SavingsVelocity['']['bar'][1], 89.3475)\n self.assertAlmostEqual(SavingsVelocity['']['baz'][0], 84.0) \n self.assertAlmostEqual(SavingsVelocity['']['baz'][1], 89.25) \n\nclass PerTreatmentAlternative(unittest.TestCase):\n \"\"\"Test alternative syntax for PerTreatment.\"\"\"\n def test_alt_syntax(self):\n \"\"\"Test per_treatment.\"\"\"\n with mn.model(treatments=['as_is', 'to_be']) as m:\n Foo = mn.variable('Foo', mn.per_treatment(as_is=12, to_be=9))\n Bar = mn.stock('Bar', \n mn.per_treatment(as_is=lambda x: x, to_be=lambda x: -x),\n 'Foo',\n 100)\n self.assertEqual(Foo['as_is'], 12)\n self.assertEqual(Foo['to_be'], 9)\n m.step()\n self.assertEqual(Bar['as_is'], 112)\n self.assertEqual(Bar['to_be'], 91)\n m.step()\n self.assertEqual(Bar['as_is'], 124)\n self.assertEqual(Bar['to_be'], 82)\n\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose",
"numpy.ndindex",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Narcissuscyn/neural-motifs | [
"3b92237a305b4ad054851f81f48f6610707dbda7"
] | [
"data/stanford_filtered/vg_to_roidb.py"
] | [
"# coding=utf8\n# --------------------------------------------------------\n# Scene Graph Generation by Iterative Message Passing\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Danfei Xu\n# --------------------------------------------------------\n\nimport argparse, json, string\nfrom collections import Counter\nimport math\n\nfrom math import floor\nimport h5py as h5\nimport numpy as np\nimport pprint\n\n\"\"\"\nA script for generating an hdf5 ROIDB from the VisualGenome dataset\n\"\"\"\n\ndef preprocess_object_labels(data, alias_dict={}):\n for img in data:\n for obj in img['objects']:\n obj['ids'] = [obj['object_id']]\n names = []\n for name in obj['names']:\n label = sentence_preprocess(name)\n if label in alias_dict:\n label = alias_dict[label]\n names.append(label)\n obj['names'] = names\n\n\ndef preprocess_predicates(data, alias_dict={}):\n for img in data:\n for relation in img['relationships']:\n predicate = sentence_preprocess(relation['predicate'])\n if predicate in alias_dict:\n predicate = alias_dict[predicate]\n relation['predicate'] = predicate\n\n\ndef extract_object_token(data, num_tokens, obj_list=[], verbose=True):\n \"\"\" Builds a set that contains the object names. Filters infrequent tokens. \"\"\"\n token_counter = Counter()\n for img in data:\n for region in img['objects']:\n for name in region['names']:\n if not obj_list or name in obj_list:\n token_counter.update([name])\n tokens = set()\n # pick top N tokens\n token_counter_return = {}\n for token, count in token_counter.most_common():\n tokens.add(token)\n token_counter_return[token] = count\n if len(tokens) == num_tokens:\n break\n if verbose:\n print(('Keeping %d / %d objects'\n % (len(tokens), len(token_counter))))\n return tokens, token_counter_return\n\n\ndef extract_predicate_token(data, num_tokens, pred_list=[], verbose=True):\n \"\"\" Builds a set that contains the relationship predicates. Filters infrequent tokens. \"\"\"\n token_counter = Counter()\n total = 0\n for img in data:\n for relation in img['relationships']:\n predicate = relation['predicate']\n if not pred_list or predicate in pred_list:\n token_counter.update([predicate])\n total += 1\n tokens = set()\n token_counter_return = {}\n for token, count in token_counter.most_common():\n tokens.add(token)\n token_counter_return[token] = count\n if len(tokens) == num_tokens:\n break\n if verbose:\n print(('Keeping %d / %d predicates with enough instances'\n % (len(tokens), len(token_counter))))\n return tokens, token_counter_return\n\n\ndef merge_duplicate_boxes(data):\n def IoU(b1, b2):\n if b1[2] <= b2[0] or \\\n b1[3] <= b2[1] or \\\n b1[0] >= b2[2] or \\\n b1[1] >= b2[3]:\n return 0\n\n b1b2 = np.vstack([b1,b2])\n minc = np.min(b1b2, 0)\n maxc = np.max(b1b2, 0)\n union_area = (maxc[2]-minc[0])*(maxc[3]-minc[1])\n int_area = (minc[2]-maxc[0])*(minc[3]-maxc[1])\n return float(int_area)/float(union_area)\n\n def to_x1y1x2y2(obj):\n x1 = obj['x']\n y1 = obj['y']\n x2 = obj['x'] + obj['w']\n y2 = obj['y'] + obj['h']\n return np.array([x1, y1, x2, y2], dtype=np.int32)\n\n def inside(b1, b2):\n return b1[0] >= b2[0] and b1[1] >= b2[1] \\\n and b1[2] <= b2[2] and b1[3] <= b2[3]\n\n def overlap(obj1, obj2):\n b1 = to_x1y1x2y2(obj1)\n b2 = to_x1y1x2y2(obj2)\n iou = IoU(b1, b2)\n if all(b1 == b2) or iou > 0.9: # consider as the same box\n return 1\n elif (inside(b1, b2) or inside(b2, b1))\\\n and obj1['names'][0] == obj2['names'][0]: # same object inside the other\n return 2\n elif iou > 0.6 and obj1['names'][0] == obj2['names'][0]: # multiple overlapping same object\n return 3\n else:\n return 0 # no overlap\n\n num_merged = {1:0, 2:0, 3:0}\n print('merging boxes..')\n for img in data:\n # mark objects to be merged and save their ids\n objs = img['objects']\n num_obj = len(objs)\n for i in range(num_obj):\n if 'M_TYPE' in objs[i]: # has been merged\n continue\n merged_objs = [] # circular refs, but fine\n for j in range(i+1, num_obj):\n if 'M_TYPE' in objs[j]: # has been merged\n continue\n overlap_type = overlap(objs[i], objs[j])\n if overlap_type > 0:\n objs[j]['M_TYPE'] = overlap_type\n merged_objs.append(objs[j])\n objs[i]['mobjs'] = merged_objs\n\n # merge boxes\n filtered_objs = []\n merged_num_obj = 0\n for obj in objs:\n if 'M_TYPE' not in obj:\n ids = [obj['object_id']]\n dims = [to_x1y1x2y2(obj)]\n prominent_type = 1\n for mo in obj['mobjs']:\n ids.append(mo['object_id'])\n obj['names'].extend(mo['names'])\n dims.append(to_x1y1x2y2(mo))\n if mo['M_TYPE'] > prominent_type:\n prominent_type = mo['M_TYPE']\n merged_num_obj += len(ids)\n obj['ids'] = ids\n mdims = np.zeros(4)\n if prominent_type > 1: # use extreme\n mdims[:2] = np.min(np.vstack(dims)[:,:2], 0)\n mdims[2:] = np.max(np.vstack(dims)[:,2:], 0)\n else: # use mean\n mdims = np.mean(np.vstack(dims), 0)\n obj['x'] = int(mdims[0])\n obj['y'] = int(mdims[1])\n obj['w'] = int(mdims[2] - mdims[0])\n obj['h'] = int(mdims[3] - mdims[1])\n\n num_merged[prominent_type] += len(obj['mobjs'])\n\n obj['mobjs'] = None\n obj['names'] = list(set(obj['names'])) # remove duplicates\n\n filtered_objs.append(obj)\n else:\n assert 'mobjs' not in obj\n\n img['objects'] = filtered_objs\n assert(merged_num_obj == num_obj)\n\n print('# merged boxes per merging type:')\n print(num_merged)\n\n\ndef build_token_dict(vocab):\n \"\"\" build bi-directional mapping between index and token\"\"\"\n token_to_idx, idx_to_token = {}, {}\n next_idx = 1\n vocab_sorted = sorted(list(vocab)) # make sure it's the same order everytime\n for token in vocab_sorted:\n token_to_idx[token] = next_idx\n idx_to_token[next_idx] = token\n next_idx = next_idx + 1\n\n return token_to_idx, idx_to_token\n\n\ndef encode_box(region, org_h, org_w, im_long_size):\n x = region['x']\n y = region['y']\n w = region['w']\n h = region['h']\n scale = float(im_long_size) / max(org_h, org_w)\n image_size = im_long_size\n # recall: x,y are 1-indexed\n x, y = math.floor(scale*(region['x']-1)), math.floor(scale*(region['y']-1))\n w, h = math.ceil(scale*region['w']), math.ceil(scale*region['h'])\n\n # clamp to image\n if x < 0: x = 0\n if y < 0: y = 0\n\n # box should be at least 2 by 2\n if x > image_size - 2:\n x = image_size - 2\n if y > image_size - 2:\n y = image_size - 2\n if x + w >= image_size:\n w = image_size - x\n if y + h >= image_size:\n h = image_size - y\n\n # also convert to center-coord oriented\n box = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32)\n assert box[2] > 0 # width height should be positive numbers\n assert box[3] > 0\n return box\n\n\ndef encode_objects(obj_data, token_to_idx, token_counter, org_h, org_w, im_long_sizes):\n encoded_labels = []\n encoded_boxes = {}\n for size in im_long_sizes:\n encoded_boxes[size] = []\n im_to_first_obj = np.zeros(len(obj_data), dtype=np.int32)#记录对于每个图像的box在所有图像box这个list中的起始位置\n im_to_last_obj = np.zeros(len(obj_data), dtype=np.int32)#记录对于每个图像的box在所有图像box这个list中的结束位置\n obj_counter = 0\n\n for i, img in enumerate(obj_data):\n im_to_first_obj[i] = obj_counter\n img['id_to_idx'] = {} # object id to region idx\n for obj in img['objects']:\n # pick a label for the object\n max_occur = 0\n obj_label = None\n for name in obj['names']:\n # pick the name that has maximum occurance\n if name in token_to_idx and token_counter[name] > max_occur:\n obj_label = name\n max_occur = token_counter[obj_label]\n\n if obj_label is not None:\n # encode region\n for size in im_long_sizes:\n encoded_boxes[size].append(encode_box(obj, org_h[i], org_w[i], size))\n\n encoded_labels.append(token_to_idx[obj_label])\n\n for obj_id in obj['ids']: # assign same index for merged ids\n img['id_to_idx'][obj_id] = obj_counter\n\n obj_counter += 1\n\n\n if im_to_first_obj[i] == obj_counter:\n im_to_first_obj[i] = -1\n im_to_last_obj[i] = -1\n else:\n im_to_last_obj[i] = obj_counter - 1\n\n for k, boxes in encoded_boxes.items():\n encoded_boxes[k] = np.vstack(boxes)\n return np.vstack(encoded_labels), encoded_boxes, im_to_first_obj, im_to_last_obj\n\n\ndef encode_relationship(sub_id, obj_id, id_to_idx):\n # builds a tuple of the index of object and subject in the object list\n sub_idx = id_to_idx[sub_id]\n obj_idx = id_to_idx[obj_id]\n return np.asarray([sub_idx, obj_idx], dtype=np.int32)\n\n\ndef encode_relationships(rel_data, token_to_idx, obj_data):\n \"\"\"MUST BE CALLED AFTER encode_objects!!!\"\"\"\n encoded_pred = [] # encoded predicates\n encoded_rel = [] # encoded relationship tuple\n im_to_first_rel = np.zeros(len(rel_data), dtype=np.int32)\n im_to_last_rel = np.zeros(len(rel_data), dtype=np.int32)\n rel_idx_counter = 0\n\n no_rel_counter = 0\n obj_filtered = 0\n predicate_filtered = 0\n duplicate_filtered = 0\n for i, img in enumerate(rel_data):\n im_to_first_rel[i] = rel_idx_counter\n id_to_idx = obj_data[i]['id_to_idx'] # object id to object list idx\n for relation in img['relationships']:\n subj = relation['subject']\n obj = relation['object']\n predicate = relation['predicate']\n if subj['object_id'] not in id_to_idx or obj['object_id'] not in id_to_idx:\n obj_filtered += 1\n continue\n elif predicate not in token_to_idx:\n predicate_filtered += 1\n continue\n elif id_to_idx[subj['object_id']] == id_to_idx[obj['object_id']]: # sub and obj can't be the same box\n duplicate_filtered += 1\n continue\n else:\n encoded_pred.append(token_to_idx[predicate])\n encoded_rel.append(\n encode_relationship(subj['object_id'],\n obj['object_id'],\n id_to_idx\n ))\n rel_idx_counter += 1 # accumulate counter\n\n if im_to_first_rel[i] == rel_idx_counter:\n # if no qualifying relationship\n im_to_first_rel[i] = -1\n im_to_last_rel[i] = -1\n no_rel_counter += 1\n else:\n im_to_last_rel[i] = rel_idx_counter - 1\n print('%i rel is filtered by object' % obj_filtered)\n print('%i rel is filtered by predicate' % predicate_filtered)\n print('%i rel is filtered by duplicate' % duplicate_filtered)\n print('%i rel remains ' % len(encoded_pred))\n\n print('%i out of %i valid images have relationships' % (len(rel_data)-no_rel_counter, len(rel_data)))\n return np.vstack(encoded_pred), np.vstack(encoded_rel), im_to_first_rel, im_to_last_rel\n\n\ndef sentence_preprocess(phrase):\n \"\"\" preprocess a sentence: lowercase, clean up weird chars, remove punctuation \"\"\"\n replacements = {\n '½': 'half',\n '—' : '-',\n '™': '',\n '¢': 'cent',\n 'ç': 'c',\n 'û': 'u',\n 'é': 'e',\n '°': ' degree',\n 'è': 'e',\n '…': '',\n }\n phrase = phrase.encode('utf-8')\n phrase = phrase.lstrip(' ').rstrip(' ')\n for k, v in replacements.iteritems():\n phrase = phrase.replace(k, v)\n return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore')\n\n\ndef encode_splits(obj_data, opt=None):\n if opt is not None:\n val_begin_idx = opt['val_begin_idx']\n test_begin_idx = opt['test_begin_idx']\n split = np.zeros(len(obj_data), dtype=np.int32)\n for i, info in enumerate(obj_data):\n splitix = 0\n if opt is None: # use encode from input file\n s = info['split']\n if s == 'val': splitix = 1\n if s == 'test': splitix = 2\n else: # use portion split\n if i >= val_begin_idx: splitix = 1\n if i >= test_begin_idx: splitix = 2\n split[i] = splitix\n if opt is not None and opt['shuffle']:\n np.random.shuffle(split)\n\n print(('assigned %d/%d/%d to train/val/test split' % (np.sum(split==0), np.sum(split==1), np.sum(split==2))))\n return split\n\n\ndef make_alias_dict(dict_file):\n \"\"\"create an alias dictionary from a file\"\"\"\n out_dict = {}\n vocab = []\n for line in open(dict_file, 'r'):\n alias = line.strip('\\n').strip('\\r').split(',')\n alias_target = alias[0] if alias[0] not in out_dict else out_dict[alias[0]]\n for a in alias:\n out_dict[a] = alias_target # use the first term as the aliasing target\n vocab.append(alias_target)\n return out_dict, vocab\n\n\ndef make_list(list_file):\n \"\"\"create a blacklist list from a file\"\"\"\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]\n\n\ndef filter_object_boxes(data, heights, widths, area_frac_thresh):\n \"\"\"\n filter boxes by a box area-image area ratio threshold\n \"\"\"\n thresh_count = 0\n all_count = 0\n for i, img in enumerate(data):\n filtered_obj = []\n area = float(heights[i]*widths[i])\n for obj in img['objects']:\n if float(obj['h'] * obj['w']) > area * area_frac_thresh:\n filtered_obj.append(obj)\n thresh_count += 1\n all_count += 1\n img['objects'] = filtered_obj\n print('box threshod: keeping %i/%i boxes' % (thresh_count, all_count))\n\n\ndef filter_by_idx(data, valid_list):\n return [data[i] for i in valid_list]\n\n\ndef obj_rel_cross_check(obj_data, rel_data, verbose=False):\n \"\"\"\n make sure all objects that are in relationship dataset\n are in object dataset\n \"\"\"\n num_img = len(obj_data)\n num_correct = 0\n total_rel = 0\n for i in range(num_img):\n assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])\n objs = obj_data[i]['objects']\n rels = rel_data[i]['relationships']\n ids = [obj['object_id'] for obj in objs]\n for rel in rels:\n if rel['subject']['object_id'] in ids \\\n and rel['object']['object_id'] in ids:\n num_correct += 1\n elif verbose:\n if rel['subject']['object_id'] not in ids:\n print(str(rel['subject']['object_id']) + 'cannot be found in ' + str(i))\n if rel['object']['object_id'] not in ids:\n print(str(rel['object']['object_id']) + 'cannot be found in ' + str(i))\n total_rel += 1\n print('cross check: %i/%i relationship are correct' % (num_correct, total_rel))\n\n\ndef sync_objects(obj_data, rel_data):\n num_img = len(obj_data)\n for i in range(num_img):\n assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])\n objs = obj_data[i]['objects']\n rels = rel_data[i]['relationships']\n\n ids = [obj['object_id'] for obj in objs]\n for rel in rels:\n if rel['subject']['object_id'] not in ids:\n rel_obj = rel['subject']\n rel_obj['names'] = [rel_obj['name']]\n objs.append(rel_obj)\n if rel['object']['object_id'] not in ids:\n rel_obj = rel['object']\n rel_obj['names'] = [rel_obj['name']]\n objs.append(rel_obj)\n\n obj_data[i]['objects'] = objs\n\n\ndef main(args):\n print('start')\n pprint.pprint(args)\n\n obj_alias_dict = {}\n if len(args.object_alias) > 0:\n print('using object alias from %s' % (args.object_alias))\n obj_alias_dict, obj_vocab_list = make_alias_dict(args.object_alias)\n\n pred_alias_dict = {}\n if len(args.pred_alias) > 0:\n print('using predicate alias from %s' % (args.pred_alias))\n pred_alias_dict, pred_vocab_list = make_alias_dict(args.pred_alias)\n\n obj_list = []\n if len(args.object_list) > 0:\n print('using object list from %s' % (args.object_list))\n obj_list = make_list(args.object_list)\n assert(len(obj_list) >= args.num_objects)\n\n pred_list = []\n if len(args.pred_list) > 0:\n print('using predicate list from %s' % (args.pred_list))\n pred_list = make_list(args.pred_list)\n assert(len(obj_list) >= args.num_predicates)\n\n # read in the annotation data\n print('loading json files..')\n obj_data = json.load(open(args.object_input))\n rel_data = json.load(open(args.relationship_input))\n img_data = json.load(open(args.metadata_input))\n assert(len(rel_data) == len(obj_data) and\n len(obj_data) == len(img_data))\n\n print('read image db from %s' % args.imdb)\n imdb = h5.File(args.imdb, 'r')\n num_im, _, _, _ = imdb['images'].shape\n img_long_sizes = [512, 1024]\n valid_im_idx = imdb['valid_idx'][:] # valid image indices\n img_ids = imdb['image_ids'][:]\n\n ##去掉那些没有有效图片的object\\relationship\\image_metadata的信息\n obj_data = filter_by_idx(obj_data, valid_im_idx)#valid_im_idx是一个list,表示哪些位置的图片是有效的;\n # obj_data则是以一张图片为单元的list,每个单元对应了物体的信息,包括bounding box等。\n rel_data = filter_by_idx(rel_data, valid_im_idx)\n img_data = filter_by_idx(img_data, valid_im_idx)\n\n # sanity check\n for i in range(num_im):\n assert(obj_data[i]['image_id'] \\\n == rel_data[i]['image_id'] \\\n == img_data[i]['image_id'] \\\n == img_ids[i]\n )\n\n # may only load a fraction of the data\n if args.load_frac < 1:\n num_im = int(num_im*args.load_frac)\n obj_data = obj_data[:num_im]\n rel_data = rel_data[:num_im]\n print('processing %i images' % num_im)\n\n # sync objects from rel to obj_data\n sync_objects(obj_data, rel_data)\n\n obj_rel_cross_check(obj_data, rel_data)\n\n # preprocess label data\n preprocess_object_labels(obj_data, alias_dict=obj_alias_dict)\n preprocess_predicates(rel_data, alias_dict=pred_alias_dict)\n\n heights, widths = imdb['original_heights'][:], imdb['original_widths'][:]\n if args.min_box_area_frac > 0:\n # filter out invalid small boxes\n print('threshold bounding box by %f area fraction' % args.min_box_area_frac)\n filter_object_boxes(obj_data, heights, widths, args.min_box_area_frac) # filter by box dimensions\n\n merge_duplicate_boxes(obj_data)\n\n # build vocabulary\n object_tokens, object_token_counter = extract_object_token(obj_data, args.num_objects,\n obj_list)\n\n label_to_idx, idx_to_label = build_token_dict(object_tokens)\n\n predicate_tokens, predicate_token_counter = extract_predicate_token(rel_data,\n args.num_predicates,\n pred_list)\n predicate_to_idx, idx_to_predicate = build_token_dict(predicate_tokens)\n\n # print out vocabulary\n print('objects: ')\n print(object_token_counter)\n print('relationships: ')\n print(predicate_token_counter)\n\n # write the h5 file\n f = h5.File(args.h5_file, 'w')\n\n # encode object\n encoded_label, encoded_boxes, im_to_first_obj, im_to_last_obj = \\\n encode_objects(obj_data, label_to_idx, object_token_counter, \\\n heights, widths, img_long_sizes)\n\n f.create_dataset('labels', data=encoded_label)\n for k, boxes in encoded_boxes.items():\n f.create_dataset('boxes_%i' % k, data=boxes)\n f.create_dataset('img_to_first_box', data=im_to_first_obj)\n f.create_dataset('img_to_last_box', data=im_to_last_obj)\n\n encoded_predicate, encoded_rel, im_to_first_rel, im_to_last_rel = \\\n encode_relationships(rel_data, predicate_to_idx, obj_data)\n\n f.create_dataset('predicates', data=encoded_predicate)\n f.create_dataset('relationships', data=encoded_rel)\n f.create_dataset('img_to_first_rel', data=im_to_first_rel)\n f.create_dataset('img_to_last_rel', data=im_to_last_rel)\n\n # build train/val/test splits\n\n print('num objects = %i' % encoded_label.shape[0])\n print('num relationships = %i' % encoded_predicate.shape[0])\n\n\n opt = None\n if not args.use_input_split:\n opt = {}\n opt['val_begin_idx'] = int(len(obj_data) * args.train_frac)\n opt['test_begin_idx'] = int(len(obj_data) * args.val_frac)\n opt['shuffle'] = args.shuffle\n split = encode_splits(obj_data, opt)\n\n if split is not None:\n f.create_dataset('split', data=split) # 1 = test, 0 = train\n\n # and write the additional json file\n json_struct = {\n 'label_to_idx': label_to_idx,\n 'idx_to_label': idx_to_label,\n 'predicate_to_idx': predicate_to_idx,\n 'idx_to_predicate': idx_to_predicate,\n 'predicate_count': predicate_token_counter,\n 'object_count': object_token_counter\n }\n\n with open(args.json_file, 'w') as f:\n json.dump(json_struct, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--imdb', default='VG/imdb_1024.h5', type=str)\n parser.add_argument('--object_input', default='VG/objects.json', type=str)\n parser.add_argument('--relationship_input', default='VG/relationships.json', type=str)\n parser.add_argument('--metadata_input', default='VG/image_data.json', type=str)\n parser.add_argument('--object_alias', default='VG/object_alias.txt', type=str)\n parser.add_argument('--pred_alias', default='VG/predicate_alias.txt', type=str)\n parser.add_argument('--object_list', default='VG/object_list.txt', type=str)\n parser.add_argument('--pred_list', default='VG/predicate_list.txt', type=str)\n parser.add_argument('--num_objects', default=150, type=int, help=\"set to 0 to disable filtering\")\n parser.add_argument('--num_predicates', default=50, type=int, help=\"set to 0 to disable filtering\")\n parser.add_argument('--min_box_area_frac', default=0.002, type=float)\n parser.add_argument('--json_file', default='VG-dicts.json')\n parser.add_argument('--h5_file', default='VG.h5')\n parser.add_argument('--load_frac', default=1, type=float)\n parser.add_argument('--use_input_split', default=False, type=bool)\n parser.add_argument('--train_frac', default=0.7, type=float)\n parser.add_argument('--val_frac', default=0.7, type=float)\n parser.add_argument('--shuffle', default=False, type=bool)\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.min",
"numpy.asarray",
"numpy.random.shuffle",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shukali/dimensionality-reduction-comparison | [
"3c2de0d7c48516dc5af6cfd9ec58307a1ed2e417"
] | [
"Datasets/AutoMpgLoader.py"
] | [
"import pandas as pd\nfrom sklearn import datasets\n\ndef load_autompg():\n ''' Gets the Auto MPG dataset. Returns a tuple (data, target) containing the dataset and the labels.\n\n data: The 392 x 8 data matrix containing the features. (n_samples = 392, n_features = 8)\n Order: (mpg, cylinders, displacement, horsepower, weight, acceleration, model year, origin)\n target: The 392 x 1 label vector containing the names of the cars, each being unique\n\n https://archive.ics.uci.edu/ml/datasets/auto+mpg\n \n NOTE: The horsepower feature has 6 missing values which are marked with '?' in the original dataset. \n The original dataset has 398 entries but we only use the complete entries (392).\n '''\n data = pd.read_csv('Datasets/AutoMPG/auto-mpg.csv', index_col='name')\n data = data[data.horsepower != '?']\n data.horsepower = data.horsepower.astype('float')\n return data.values, data.index"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Similarities/shot_discriminator-pandas | [
"c893d92c92e30a34a5ac55dc9f1d66e0546f3922"
] | [
"Search_mean_single_sorted03.py"
] | [
"__author__ = 'similarities'\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\n\n\noriginal_df = pd.read_excel(\"fundamentalshifts9_selection.xlsx\")\n\nclass Sort_and_Copy_Dataframe:\n\n def __init__(self, database):\n self.orginal = database\n self.copy = self.orginal.copy()\n #self.copy = self.copy.replace(np.nan, True, regex=True)\n self.tricks = []\n self.auswahl = []\n self.label_set = []\n\n\n\n\n def drop_unnamed_columns(self):\n\n self.auswahl = self.copy.columns.str.match('Unnamed')\n empty_column_index = [i for i, x in enumerate(self.auswahl) if x]\n\n for i in range(len(empty_column_index)):\n\n number = empty_column_index[i]\n\n delete_column = \"Unnamed: \"+str(number)\n\n self.copy.drop(labels = [delete_column], axis = 1, inplace = True)\n\n return self.copy\n\n\n def get_label_names(self):\n self.label_set = set(self.copy.columns)\n #print(label_set, \"labels in dfs..\")\n print(\"number:\", len(self.label_set))\n print(self.label_set)\n #eturn label_set\n\n\n def drop_columns(self, trick):\n\n self.tricks = trick\n self.copy.drop(labels=self.tricks, axis=1, inplace = True)\n self.label_set = set(self.copy.columns)\n\n\n\n\n\n\n def get_new_df(self):\n\n return self.copy\n\n\n\nmam = Sort_and_Copy_Dataframe(original_df)\nmam.drop_unnamed_columns()\nmam.get_label_names()\nmam.drop_columns([\"z steps\", 'Comment 1', \"side peaks\", \"EL (corrected)\", \"comment2\", \"GVD\", \"CWE central\", \"2w center\", \"central2\", \"divergence\", 'category, x,y,z', 'HHG_E in uJ full range'])\nmam.get_label_names()\nRESULT = mam.get_new_df()\n\nprint(RESULT)\n\nclass Sorting_by_parameters:\n\n def __init__(self, database, name, save_bool):\n self.orginal = database\n self.copy = self.orginal.copy()\n self.tricks = []\n self.auswahl1 = []\n self.auswahl2 =[]\n self.auswahl = []\n self.auswahl3 = []\n self.label_set = []\n self.GVDneg = self.copy['GVD in fs^2'] < 0\n self.GVD0 = self.copy['GVD in fs^2'] < 301\n self.GVD0_2 = self.copy['GVD in fs^2'] >= 0\n self.GVD300 = self.copy['GVD in fs^2'] > 300\n self.GVD600 = self.copy['GVD in fs^2'] > 300\n self.GVD600_2 = self.copy['GVD in fs^2'] < 700\n self.GVD900 = self.copy['GVD in fs^2'] >= 900\n\n self.N_CWE = self.copy[\"Nmax\"] > 22\n self.ROM_2 = self.copy[\"Nmax\"] >= 25\n self.EL_low = self.copy['EL on target']<1.8\n self.EL_high = self.copy['EL on target'] >= 1.8\n self.z_1000 = self.copy['z um'] <= 1000.\n self.z_2000 = self.copy['z um'] > 1000.\n self.z_3000 = self.copy['z um'] > 2000.\n self.auswahl_GVD0 = []\n self.auswahl_GVD600 = []\n self.auswahl_GVD900 = []\n self.auswahl_GVDneg = []\n\n self.day = int\n\n self.name = name\n self.name_original = name\n self.save = save_bool\n self.name_attributes =\"_\"\n self.marker = int\n self.selection_members = ()\n\n\n def reset_name(self):\n\n self.name = self.name_original\n\n return self.name\n\n\n\n\n def PP_out(self):\n is_out = self.copy[\"PP in out\"] == \"out\"\n # print(self.copy[\"PP in out\"])\n self.copy = self.copy[is_out][self.copy.columns]\n self.name_attributes = self.name_attributes +\"PPout\"\n\n return self.copy, self.name_attributes\n\n\n\n def PP_in(self):\n is_in = self.copy[\"PP in out\"] != \"out\"\n # print(self.copy[\"PP in out\"])\n self.copy = self.copy[is_in][self.copy.columns]\n self.name_attributes = self.name_attributes +\"PPin\"\n\n\n return self.copy, self.name_attributes\n\n\n\n\n def sort_by_day(self, day):\n\n if day == 0:\n\n self.day = 0\n\n return self.copy, self.day\n\n\n else:\n self.day = day\n day_sort = self.copy[\"day\"] == day\n self.copy = self.copy[day_sort][self.copy.columns]\n #print(self.copy[[\"day\"]])\n return self.copy, self.day\n\n\n\n\n\n\n def reset_auswahl(self):\n\n self.auswahl = []\n self.auswahl1 = []\n self.auswahl2 = []\n self.auswahl3 = []\n\n return self.auswahl, self.auswahl1, self.auswahl2, self.auswahl3\n\n\n def high_energy(self, energy):\n above_EL = self.copy[\"EL on target\"] >= energy\n self.copy = self.copy[above_EL][self.copy.columns]\n self.name_attributes = self.name_attributes +\"_highEL\"\n\n\n return self.copy, self.name_attributes\n\n\n\n\n\n\n def low_energy(self, energy):\n\n below_EL = self.copy[\"EL on target\"] <= energy\n\n self.copy = self.copy[below_EL][self.copy.columns]\n\n print(self.copy[\"EL on target\"], \"low Energy range\")\n\n self.name_attributes = self.name_attributes +\"_lowEL\"\n\n\n return self.copy, self.name_attributes\n\n\n\n\n\n def set_nan_to_number(self, dataframe, C, column_name):\n\n\n dataframe= dataframe.replace({column_name:np.nan},0)\n\n #print(dataframe[[column_name]], \"nan inside?\")\n\n\n return dataframe\n\n\n\n\n def all_or_one_day(self):\n\n\n if self.day == 0:\n\n self.day = \"_All_\"\n\n else:\n self.day = self.day\n\n return self.day\n\n\n\n\n\n def save_picture(self, y_label):\n\n\n self.all_or_one_day()\n\n\n if self.save == True:\n\n\n print(\"picture saved: \", self.name+str(self.day)+y_label+ self.name_attributes+\".png\")\n\n\n plt.savefig(self.name+str(self.day)+y_label+ self.name_attributes+\".png\", bbox_inches=\"tight\", dpi = 1000)\n\n\n\n else:\n\n\n print(\"no picture saved -- set bool to True\")\n\n\n\n\n def energy_content_vs_z_highE(self, energy_column_name):\n\n\n self.GVD_selection_dataframe()\n\n\n self.auswahl_GVDneg.dropna()\n\n\n self.auswahl_GVD0.dropna()\n\n\n self.auswahl_GVD600.dropna()\n\n self.auswahl_GVD900.dropna()\n\n\n\n x_label = \"z um\"\n\n y_label= energy_column_name\n\n\n self.reset_marker()\n\n\n\n self.plot_together_in_one_graph( \"b\", self.selection_members[0], self.auswahl_GVDneg, x_label, y_label)\n\n self.plot_together_in_one_graph( \"m\", self.selection_members[1], self.auswahl_GVD0, x_label, y_label)\n\n self.plot_together_in_one_graph( \"g\", self.selection_members[2], self.auswahl_GVD600, x_label, y_label)\n\n self.plot_together_in_one_graph( \"r\", self.selection_members[3], self.auswahl_GVD900, x_label, y_label)\n\n\n\n\n\n\n self.save_picture(y_label)\n\n self.reset_name_list()\n\n\n plt.show()\n\n\n\n\n\n def GVD_selection_dataframe(self):\n\n\n self.auswahl_GVDneg= self.copy[self.GVDneg][self.copy.columns]\n\n\n self.auswahl_GVD0 = self.copy[self.GVD0][self.copy.columns]\n self.auswahl_GVD0 = self.auswahl_GVD0[self.GVD0_2][self.copy.columns]\n\n\n self.auswahl_GVD600 = self.copy[self.GVD600_2][self.copy.columns]\n self.auswahl_GVD600 = self.auswahl_GVD600[ self.GVD600][self.copy.columns]\n\n\n self.auswahl_GVD900 = self.copy[self.GVD900][self.copy.columns]\n\n\n print(self.auswahl_GVDneg[[\"shot\", \"z um\", \"GVD in fs^2\"]], \"GVD<0\")\n\n print(self.auswahl_GVD0[[\"shot\", \"z um\", \"GVD in fs^2\"]], \"GVD0\")\n\n print(self.auswahl_GVD600[[\"shot\", \"z um\", \"GVD in fs^2\"]], \"GVD 600\")\n\n print( self.auswahl_GVD900[[\"shot\", \"z um\", \"GVD in fs^2\"]], \"GVD900\")\n\n self.selection_members = (\"GVD -600 - 0\", \"GVD 0 - 300\",\"GVD 400 600\",\"GVD 700-1100\")\n\n\n return self.auswahl_GVDneg, self.auswahl_GVD0, self.auswahl_GVD600, self.auswahl_GVD900, self.selection_members\n\n\n\n\n\n def mean_energy_GVD_vs_z(self, energy_coloumnlabel):\n\n\n\n x_label = \"z um\"\n\n z_values = list(range(-40,50))\n\n scale_factor_x = 100\n\n energy_column = energy_coloumnlabel\n\n self.GVD_selection_dataframe()\n\n\n\n self.mean_of_something_vs_something(self.auswahl_GVDneg, z_values, scale_factor_x, x_label, energy_column, energy_coloumnlabel+\"[uJ]\", \"GVD -600 - (-300)\")\n\n self.mean_of_something_vs_something(self.auswahl_GVD0, z_values, scale_factor_x, x_label, energy_column, energy_coloumnlabel+\"[uJ]\", \"GVD 0 - 300\")\n\n self.mean_of_something_vs_something(self.auswahl_GVD600, z_values, scale_factor_x, x_label, energy_column, energy_coloumnlabel+\"[uJ]\", \"GVD 400 - 600\")\n\n self.mean_of_something_vs_something(self.auswahl_GVD900, z_values, scale_factor_x, x_label, energy_column, energy_coloumnlabel+\"[uJ]\", \"GVD 700 - 1100\")\n\n\n\n self.reset_name()\n\n self.name = self.name+\"mean\"\n\n self.save_picture(energy_coloumnlabel)\n\n\n plt.show()\n\n\n\n\n\n\n\n def mean_of_something_vs_something(self, auswahl, parameter_x, scale_factor_x, x_label, parameter_sorted, y_label, name):\n\n #auswahl.sort_values(by=['z um'])\n\n\n\n result_array_z_meanEnergy = np.zeros([1,3])\n\n #print(result_array_z_meanEnergy)\n\n self.reset_auswahl()\n\n #print(auswahl, \"reseted\")\n\n for x in range(0, len(parameter_x)):\n #print(auswahl[x_label])\n\n\n criteria = auswahl[x_label] == parameter_x[x]*scale_factor_x\n\n self.auswahl= auswahl[criteria][parameter_sorted]\n\n\n #print(auswahl[criteria][parameter_sorted], \"entries for mean value\", parameter_x[x], name)\n\n\n mean_var = self.auswahl.mean(axis = 0, skipna = True)\n\n #print(mean_var, \"mittelwert x_paramerter at:\", parameter_x[x])\n #print (result_array_z_meanEnergy)\n\n\n std_var=self.auswahl.std()\n\n\n\n\n if np.isnan(mean_var) or mean_var == 0:\n\n\n None\n\n\n\n elif np.isnan(std_var):\n\n None\n\n\n else:\n\n result_array_z_meanEnergy = np.vstack((result_array_z_meanEnergy,np.array([parameter_x[x]*scale_factor_x, mean_var, std_var])))\n\n #print (result_array_z_meanEnergy)\n\n\n\n if len(result_array_z_meanEnergy) < 1:\n\n\n print(\"nothing to plot for mean in: \", name)\n\n\n\n else:\n\n\n plt.figure(3)\n\n errY = result_array_z_meanEnergy[1::,2]\n\n errX = 200\n #plt.scatter(result_array_z_meanEnergy[1::,0],result_array_z_meanEnergy[1::,1],color = \"c\", marker=\".\", label=name)\n\n plt.errorbar(result_array_z_meanEnergy[1::,0] + errX, result_array_z_meanEnergy[1::,1]+errY, xerr=errX, yerr=errY, fmt='o', label= name, alpha=0.3)\n\n plt.xlabel(x_label)\n\n plt.ylabel(y_label)\n\n plt.legend()\n\n\n\n #print(parameter_x[:], \"schritte\")\n\n #print(result_array_z_meanEnergy[1::], \"mean_value for\", parameter_sorted, \" vs \", x_label, \" criteria: \", name)\n\n\n\n\n\n\n\n\n def plot_result(self, color, name, auswahl, x_label, y_label):\n\n\n\n\n\n #print(auswahl[x_label], \"x Achse\")\n\n #print(auswahl[y_label], \"y Achse\")\n\n print(auswahl[x_label].count(), \"len\")\n\n print(auswahl[y_label].count(), \"len y\", name)\n\n\n\n if auswahl[x_label].count() == 0:\n\n print(\"empty dataframe in:\", x_label)\n\n\n\n elif auswahl[y_label].count() == 0:\n\n print(\"empty dataframe in:\", y_label)\n\n\n\n else:\n\n plt.scatter(x = auswahl[x_label], y = auswahl[y_label], color = color, marker = '.', alpha = 0.2, label=name )\n\n plt.xlabel(x_label)\n\n plt.ylabel(y_label)\n\n plt.legend()\n\n plt.show()\n\n\n\n\n\n\n\n\n def plot_together_in_one_graph(self,color, name, auswahl, x_label, y_label):\n\n\n\n\n x1= auswahl[x_label]\n\n y1 = auswahl[y_label]\n\n\n marker_list= (\".\", \"x\",\"+\",\"<\", \">\")\n\n\n marker = marker_list[self.marker]\n\n print (marker)\n\n\n if x1.count() == 0:\n\n print(\"empty dataframe in:\", x_label)\n\n elif y1.count() == 0:\n\n print(\"empty dataframe in:\", y_label)\n\n\n else:\n\n\n self.marker = self.marker + 1\n\n plt.scatter(x1, y1, color = color, alpha = 0.2, label=name, marker = marker )\n\n plt.xlabel(x_label)\n\n plt.ylabel(y_label)\n\n plt.legend()\n\n\n\n def reset_marker(self):\n\n self.marker = 0\n\n return self.marker\n\n\n\n\n\n\n\n\n def mean_fundamental_GVD_vs_z(self, y_label):\n\n\n self.GVD_selection_dataframe()\n\n\n\n y_label = y_label\n\n x_label = \"z um\"\n\n parameter_x = list(range(-50, 50))\n\n\n self.mean_of_something_vs_something(self.auswahl_GVDneg, parameter_x, 100, x_label, y_label, y_label+\"[nm]\", self.selection_members[0])\n\n\n self.mean_of_something_vs_something(self.auswahl_GVD0, parameter_x, 100, x_label, y_label, y_label+\"[nm]\", self.selection_members[1])\n\n\n self.mean_of_something_vs_something(self.auswahl_GVD600, parameter_x, 100, x_label, y_label, y_label+\"[nm]\", self.selection_members[2])\n\n\n\n self.mean_of_something_vs_something(self.auswahl_GVD900, parameter_x, 100, x_label, y_label, y_label+\"[nm]\", self.selection_members[3])\n\n\n\n\n\n plt.ylim(760, 860)\n\n\n self.reset_name()\n\n\n self.name = self.name + \"mean\"\n\n\n self.save_picture(y_label)\n\n\n self.reset_name_list()\n\n\n\n plt.show()\n\n\n\n\n\n def fundamental_GVD_vs_z(self, y_label):\n\n\n x_label = \"z um\"\n\n self.GVD_selection_dataframe()\n\n self.zero_to_none(self.auswahl_GVDneg,x_label, y_label)\n self.zero_to_none(self.auswahl_GVD0, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD600, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD900, x_label, y_label)\n\n print(self.selection_members, \"tuple\")\n\n\n\n self.reset_marker()\n\n\n self.plot_together_in_one_graph(\"b\", self.selection_members[0], self.auswahl_GVDneg, x_label, y_label)\n self.plot_together_in_one_graph('m', self.selection_members[1], self.auswahl_GVD0, x_label, y_label)\n self.plot_together_in_one_graph('g', self.selection_members[2], self.auswahl_GVD600, x_label, y_label)\n self.plot_together_in_one_graph('r', self.selection_members[3], self.auswahl_GVD900, x_label, y_label)\n\n\n self.reset_name_list()\n plt.show()\n\n\n\n\n\n\n def GVD_N25_div_vs_z(self):\n\n\n x_label = \"z um\"\n\n y_label = 'divergence N=25'\n\n\n\n self.GVD_selection_dataframe()\n\n self.zero_to_none(self.auswahl_GVDneg, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD0, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD600, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD900, x_label, y_label)\n\n self.reset_marker()\n\n plt.figure(1)\n\n\n self.plot_together_in_one_graph(\"k\",self.selection_members[1],self.auswahl_GVD0, x_label, y_label)\n\n\n self.plot_together_in_one_graph(\"g\",self.selection_members[2],self.auswahl_GVD600, x_label, y_label)\n\n\n self.plot_together_in_one_graph(\"r\",self.selection_members[3],self.auswahl_GVD900, x_label, y_label)\n\n\n\n plt.ylabel(\"divergence [mrad]\")\n\n plt.xlabel(\"z [um]\")\n\n plt.ylim(1,20)\n\n plt.xlim(0,4500)\n\n plt.legend()\n\n\n self.save_picture(y_label)\n\n plt.show()\n\n self.reset_name_list()\n\n\n\n\n\n\n\n\n\n def zero_to_none(self, auswahl, x_label, y_label):\n\n\n\n auswahl = auswahl.replace({x_label: 0.}, np.nan)[[y_label, \"z um\", 'shot', 'day']]\n\n return auswahl\n\n\n\n\n\n def reset_name_list(self):\n\n\n\n liste = ()\n\n return liste\n\n\n\n\n\n\n def mean_GVD_div_N25(self):\n\n\n\n\n x_label = \"z um\"\n\n y_label = 'divergence N=25'\n\n self.GVD_selection_dataframe()\n\n self.zero_to_none(self.auswahl_GVDneg, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD0, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD600, x_label, y_label)\n self.zero_to_none(self.auswahl_GVD900, x_label, y_label)\n\n\n\n\n\n\n\n\n\n z_list = list(range(-20,50))\n\n z_faktor = 100\n\n\n\n self.mean_of_something_vs_something(self.auswahl_GVDneg,z_list,z_faktor,x_label,y_label, y_label+\"[mrad]\", self.selection_members[0])\n\n self.mean_of_something_vs_something(self.auswahl_GVD0,z_list,z_faktor,x_label,y_label, y_label+\"[mrad]\", self.selection_members[1])\n\n self.mean_of_something_vs_something(self.auswahl_GVD600,z_list,z_faktor,x_label,y_label, y_label+\"[mrad]\", self.selection_members[2])\n\n self.mean_of_something_vs_something(self.auswahl_GVD900,z_list,z_faktor,x_label,y_label, y_label+\"[mrad]\", self.selection_members[3])\n\n\n\n self. reset_name_list()\n\n self.reset_name()\n\n\n self.name = self.name+\"_mean_\"\n\n self.save_picture(y_label)\n\n\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Sorting_by_parameters(dataframe, str(name), bool(save_picture???))\nsorting1 = Sorting_by_parameters(RESULT, \"SHHG19_\", True)\n\nsorting1.PP_out()\n#sorting1.PP_in()\n\n############ either day (number) or (0) which means \"ALL\" #########\nsorting1.sort_by_day(0)\nsorting1.high_energy(1.8)\n#sorting1.low_energy(1.8)\n\n\n\n############## second argument gives name of column for evaluation (y axis), since for energy two different ranges existing\n####### name of column high energy range \"HHG 24nm-34nm\nenergy_coloumnlabel = \"HHG 34-50nm\"\n################ is not working !!! sorting1.energy_content_vs_z_highE(energy_coloumnlabel)\n#sorting1.mean_energy_GVD_vs_z(energy_coloumnlabel)\nsorting1.energy_content_vs_z_highE(energy_coloumnlabel)\n\n######## either \"central ROM\", \"central\" or \"CWE...something\"\ncolumname ='central ROM'\n#sorting1.mean_fundamental_GVD_vs_z(columname)\n#sorting1.fundamental_GVD_vs_z(columname)\n\n\n\n###### GVD25_N25_div() ... plots all selection for different GVD values in scatter plot, if True then mean is plotted in addition (separate it!!!)\nsorting1.GVD_N25_div_vs_z()\n# returns sorted dataframes:\n#sorting1.GVD_selection()\n#sorting1.mean_GVD_div_N25()\n\n\n\n\n\n\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_excel",
"matplotlib.pyplot.scatter",
"numpy.isnan",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lsdace30095/model_server | [
"43862a61935d0798ed6908ac84b20192287933f1"
] | [
"example_client/face_detection.py"
] | [
"#\n# Copyright (c) 2019-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport cv2\nimport datetime\nimport grpc\nimport numpy as np\nimport os\nfrom tensorflow import make_tensor_proto, make_ndarray\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nfrom client_utils import print_statistics\n\n\ndef load_image(file_path):\n img = cv2.imread(file_path) # BGR color format, shape HWC\n img = cv2.resize(img, (args['width'], args['height']))\n img = img.transpose(2,0,1).reshape(1,3,args['height'],args['width'])\n # change shape to NCHW\n return img\n\n\nparser = argparse.ArgumentParser(description='Demo for face detection requests via TFS gRPC API.'\n 'analyses input images and saves with with detected faces.'\n 'it relies on model face_detection...')\n\nparser.add_argument('--input_images_dir', required=False, help='Directory with input images', default=\"images/people\")\nparser.add_argument('--output_dir', required=False, help='Directory for staring images with detection results', default=\"results\")\nparser.add_argument('--batch_size', required=False, help='How many images should be grouped in one batch', default=1, type=int)\nparser.add_argument('--width', required=False, help='How the input image width should be resized in pixels', default=1200, type=int)\nparser.add_argument('--height', required=False, help='How the input image width should be resized in pixels', default=800, type=int)\nparser.add_argument('--grpc_address',required=False, default='localhost', help='Specify url to grpc service. default:localhost')\nparser.add_argument('--grpc_port',required=False, default=9000, help='Specify port to grpc service. default: 9000')\n\nargs = vars(parser.parse_args())\n\nchannel = grpc.insecure_channel(\"{}:{}\".format(args['grpc_address'],args['grpc_port']))\nstub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n\nfiles = os.listdir(args['input_images_dir'])\nbatch_size = args['batch_size']\nprint(files)\n\n\nimgs = np.zeros((0,3,args['height'],args['width']), np.dtype('<f'))\nfor i in files:\n img = load_image(os.path.join(args['input_images_dir'], i))\n imgs = np.append(imgs, img, axis=0) # contains all imported images\n\nprint('Start processing {} iterations with batch size {}'.format(len(files)//batch_size , batch_size))\n\niteration = 0\nprocessing_times = np.zeros((0),int)\n\n\nfor x in range(0, imgs.shape[0] - batch_size + 1, batch_size):\n iteration += 1\n request = predict_pb2.PredictRequest()\n request.model_spec.name = \"face-detection\"\n img = imgs[x:(x + batch_size)]\n print(\"\\nRequest shape\", img.shape)\n request.inputs[\"data\"].CopyFrom(make_tensor_proto(img, shape=(img.shape)))\n start_time = datetime.datetime.now()\n result = stub.Predict(request, 10.0) # result includes a dictionary with all model outputs\n end_time = datetime.datetime.now()\n\n duration = (end_time - start_time).total_seconds() * 1000\n processing_times = np.append(processing_times,np.array([int(duration)]))\n output = make_ndarray(result.outputs[\"detection_out\"])\n print(\"Response shape\", output.shape)\n for y in range(0,img.shape[0]): # iterate over responses from all images in the batch\n img_out = img[y,:,:,:]\n\n print(\"image in batch item\",y, \", output shape\",img_out.shape)\n img_out = img_out.transpose(1,2,0)\n for i in range(0, 200*batch_size-1): # there is returned 200 detections for each image in the batch\n detection = output[:,:,i,:]\n # each detection has shape 1,1,7 where last dimension represent:\n # image_id - ID of the image in the batch\n # label - predicted class ID\n # conf - confidence for the predicted class\n # (x_min, y_min) - coordinates of the top left bounding box corner\n #(x_max, y_max) - coordinates of the bottom right bounding box corner.\n if detection[0,0,2] > 0.5 and int(detection[0,0,0]) == y: # ignore detections for image_id != y and confidence <0.5\n print(\"detection\", i , detection)\n x_min = int(detection[0,0,3] * args['width'])\n y_min = int(detection[0,0,4] * args['height'])\n x_max = int(detection[0,0,5] * args['width'])\n y_max = int(detection[0,0,6] * args['height'])\n # box coordinates are proportional to the image size\n print(\"x_min\", x_min)\n print(\"y_min\", y_min)\n print(\"x_max\", x_max)\n print(\"y_max\", y_max)\n\n img_out = cv2.rectangle(cv2.UMat(img_out),(x_min,y_min),(x_max,y_max),(0,0,255),1)\n # draw each detected box on the input image\n print(\"saving result to\",os.path.join(args['output_dir'],str(iteration)+\"_\"+str(y)+'.jpg'))\n cv2.imwrite(os.path.join(args['output_dir'],str(iteration)+\"_\"+str(y)+'.jpg'),img_out)\n\n print('Iteration {}; Processing time: {:.2f} ms; speed {:.2f} fps'\n .format(iteration, round(np.average(duration), 2), round(1000 * batch_size / np.average(duration), 2)\n ))\n\nprint_statistics(processing_times, batch_size)\n\n"
] | [
[
"numpy.dtype",
"numpy.append",
"tensorflow.make_tensor_proto",
"tensorflow.make_ndarray",
"numpy.average",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bluemoo/lifelines | [
"972278d17910edf9285da88f9946f36785eb89b5"
] | [
"lifelines/tests/test_estimation.py"
] | [
"# -*- coding: utf-8 -*-\nimport warnings\n\n# pylint: disable=wrong-import-position\nwarnings.simplefilter(action=\"ignore\", category=DeprecationWarning)\n\nfrom collections import Counter\nfrom collections.abc import Iterable\nimport os\nimport pickle\nfrom itertools import combinations\n\nfrom io import StringIO, BytesIO as stringio\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom scipy.stats import weibull_min, norm, logistic, invweibull, invgamma\nfrom autograd.scipy.special import expit\nfrom autograd import numpy as anp\n\ntry:\n from flaky import flaky\nexcept ImportError:\n pass\n\nfrom pandas.testing import assert_frame_equal, assert_series_equal, assert_index_equal\nimport numpy.testing as npt\n\nfrom lifelines.utils import (\n k_fold_cross_validation,\n StatError,\n concordance_index,\n ConvergenceWarning,\n to_long_format,\n normalize,\n to_episodic_format,\n ConvergenceError,\n median_survival_times,\n StatisticalWarning,\n qth_survival_time,\n)\n\nfrom lifelines.fitters import BaseFitter, ParametricUnivariateFitter, ParametricRegressionFitter\n\nfrom lifelines import (\n WeibullFitter,\n ExponentialFitter,\n NelsonAalenFitter,\n KaplanMeierFitter,\n BreslowFlemingHarringtonFitter,\n CoxPHFitter,\n CoxTimeVaryingFitter,\n AalenAdditiveFitter,\n AalenJohansenFitter,\n LogNormalFitter,\n LogLogisticFitter,\n PiecewiseExponentialFitter,\n WeibullAFTFitter,\n LogNormalAFTFitter,\n LogLogisticAFTFitter,\n PiecewiseExponentialRegressionFitter,\n GeneralizedGammaFitter,\n GeneralizedGammaRegressionFitter,\n SplineFitter,\n MixtureCureFitter,\n)\n\nfrom lifelines.datasets import (\n load_larynx,\n load_waltons,\n load_kidney_transplant,\n load_rossi,\n load_panel_test,\n load_g3,\n load_holly_molly_polly,\n load_regression_dataset,\n load_stanford_heart_transplants,\n load_multicenter_aids_cohort_study,\n load_diabetes,\n)\nfrom lifelines.generate_datasets import (\n generate_hazard_rates,\n generate_random_lifetimes,\n piecewise_exponential_survival_data,\n)\n\n\[email protected]\ndef sample_lifetimes():\n N = 30\n return (np.random.randint(20, size=N), np.random.randint(2, size=N))\n\n\[email protected]\ndef positive_sample_lifetimes():\n N = 30\n return (np.random.randint(1, 20, size=N), np.random.randint(2, size=N))\n\n\[email protected]\ndef waltons_dataset():\n return load_waltons()\n\n\[email protected]\ndef data_pred1():\n N = 150\n data_pred1 = pd.DataFrame()\n data_pred1[\"x1\"] = np.random.uniform(size=N)\n data_pred1[\"t\"] = 1 + data_pred1[\"x1\"] + np.random.normal(0, 0.05, size=N)\n data_pred1[\"E\"] = True\n return data_pred1\n\n\nclass PiecewiseExponentialFitterTesting(PiecewiseExponentialFitter):\n def __init__(self, *args, **kwargs):\n super(PiecewiseExponentialFitterTesting, self).__init__([5.0], *args, **kwargs)\n\n\nclass SplineFitterTesting(SplineFitter):\n def __init__(self, *args, **kwargs):\n super(SplineFitterTesting, self).__init__([0.0, 40.0], *args, **kwargs)\n\n\nclass CustomRegressionModelTesting(ParametricRegressionFitter):\n\n _fitted_parameter_names = [\"lambda_\", \"beta_\", \"rho_\"]\n\n def __init__(self, **kwargs):\n cols = load_rossi().drop([\"week\", \"arrest\"], axis=1).columns\n self.regressors = {\"lambda_\": cols, \"beta_\": cols, \"rho_\": cols}\n super(CustomRegressionModelTesting, self).__init__(**kwargs)\n\n def _cumulative_hazard(self, params, T, Xs):\n c = expit(anp.dot(Xs[\"beta_\"], params[\"beta_\"]))\n\n lambda_ = anp.exp(anp.dot(Xs[\"lambda_\"], params[\"lambda_\"]))\n rho_ = anp.exp(anp.dot(Xs[\"rho_\"], params[\"rho_\"]))\n cdf = 1 - anp.exp(-((T / lambda_) ** rho_))\n\n return -anp.log((1 - c) + c * (1 - cdf))\n\n\[email protected]\ndef data_pred2():\n N = 150\n data_pred2 = pd.DataFrame()\n data_pred2[\"x1\"] = np.random.uniform(size=N)\n data_pred2[\"x2\"] = np.random.uniform(size=N)\n data_pred2[\"t\"] = 1 + data_pred2[\"x1\"] + data_pred2[\"x2\"] + np.random.normal(0, 0.05, size=N)\n data_pred2[\"E\"] = True\n return data_pred2\n\n\[email protected]\ndef data_nus():\n data_nus = pd.DataFrame(\n [\n [6, 31.4],\n [98, 21.5],\n [189, 27.1],\n [374, 22.7],\n [1002, 35.7],\n [1205, 30.7],\n [2065, 26.5],\n [2201, 28.3],\n [2421, 27.9],\n ],\n columns=[\"t\", \"x\"],\n )\n data_nus[\"E\"] = True\n return data_nus\n\n\[email protected]\ndef rossi():\n return load_rossi()\n\n\[email protected]\ndef regression_dataset():\n return load_regression_dataset()\n\n\[email protected]\ndef known_parametric_univariate_fitters():\n return [\n ExponentialFitter,\n WeibullFitter,\n LogNormalFitter,\n LogLogisticFitter,\n PiecewiseExponentialFitterTesting,\n GeneralizedGammaFitter,\n SplineFitterTesting,\n ]\n\n\nclass TestBaseFitter:\n def test_repr_without_fitter(self):\n bf = BaseFitter()\n assert bf.__repr__() == \"<lifelines.BaseFitter>\"\n\n\nclass TestParametricUnivariateFitters:\n @flaky\n def test_confidence_interval_is_expected(self):\n\n from autograd.scipy.special import logit\n from autograd.scipy.stats import norm\n\n N = 20\n U = np.random.rand(N)\n T = -(logit(-np.log(U) / 0.5) - np.random.exponential(2, N) - 7.00) / 0.50\n\n E = ~np.isnan(T)\n T[np.isnan(T)] = 50\n\n class UpperAsymptoteFitter(ParametricUnivariateFitter):\n\n _fitted_parameter_names = [\"c_\", \"mu_\"]\n\n _bounds = ((0, None), (None, None))\n\n def _cumulative_hazard(self, params, times):\n c, mu_ = params\n return c * norm.cdf((times - mu_) / 6.3, loc=0, scale=1)\n\n uaf = UpperAsymptoteFitter().fit(T, E, ci_labels=(\"l\", \"u\"))\n upper = uaf.confidence_interval_.iloc[-1][\"u\"]\n lower = uaf.confidence_interval_.iloc[-1][\"l\"]\n coef, std = uaf.summary.loc[\"c_\", [\"coef\", \"se(coef)\"]]\n\n assert (upper - lower) > 0\n assert abs(upper - lower) > 0.3\n assert coef - std > lower\n assert coef + std < upper\n\n def test_models_can_handle_really_large_duration_values(self, known_parametric_univariate_fitters):\n T1 = np.random.exponential(1e12, size=1000)\n T2 = np.random.exponential(1e12, size=1000)\n E = T1 < T2\n T = np.minimum(T1, T2)\n for fitter in known_parametric_univariate_fitters:\n fitter().fit(T, E)\n\n def test_models_can_handle_really_small_duration_values(self, known_parametric_univariate_fitters):\n T1 = np.random.exponential(1e-6, size=1000)\n T2 = np.random.exponential(1e-6, size=1000)\n E = T1 < T2\n T = np.minimum(T1, T2)\n\n for fitter in known_parametric_univariate_fitters:\n fitter().fit(T, E).print_summary(5)\n\n def test_models_can_handle_really_small_duration_values_for_left_censorship(\n self, known_parametric_univariate_fitters\n ):\n T1 = np.random.exponential(1e-6, size=1000)\n T2 = np.random.exponential(1e-6, size=1000)\n E = T1 > T2\n T = np.maximum(T1, T2)\n\n for fitter in known_parametric_univariate_fitters:\n fitter().fit_left_censoring(T, E)\n\n def test_parametric_univariate_fitters_can_print_summary(\n self, positive_sample_lifetimes, known_parametric_univariate_fitters\n ):\n for fitter in known_parametric_univariate_fitters:\n f = fitter().fit(positive_sample_lifetimes[0])\n f.summary\n f.print_summary()\n\n def test_parametric_univariate_fitters_has_confidence_intervals(\n self, positive_sample_lifetimes, known_parametric_univariate_fitters\n ):\n for fitter in known_parametric_univariate_fitters:\n f = fitter().fit(positive_sample_lifetimes[0])\n assert f.confidence_interval_ is not None\n assert f.confidence_interval_survival_function_ is not None\n assert f.confidence_interval_hazard_ is not None\n\n def test_warnings_for_problematic_cumulative_hazards(self):\n class NegativeFitter(ParametricUnivariateFitter):\n\n _fitted_parameter_names = [\"a\"]\n\n def _cumulative_hazard(self, params, times):\n return params[0] * (times - 0.4)\n\n class DecreasingFitter(ParametricUnivariateFitter):\n\n _fitted_parameter_names = [\"a\"]\n\n def _cumulative_hazard(self, params, times):\n return params[0] * 1 / times\n\n with pytest.warns(StatisticalWarning, match=\"positive\") as w:\n NegativeFitter().fit([0.01, 0.5, 10.0, 20.0])\n\n with pytest.warns(StatisticalWarning, match=\"non-decreasing\") as w:\n DecreasingFitter().fit([0.01, 0.5, 10.0, 20])\n\n def test_parameteric_models_all_can_do_interval_censoring(self, known_parametric_univariate_fitters):\n df = load_diabetes()\n for fitter in known_parametric_univariate_fitters:\n f = fitter().fit_interval_censoring(df[\"left\"], df[\"right\"])\n f.print_summary()\n\n def test_parameteric_models_all_can_do_interval_censoring_with_prediction(\n self, known_parametric_univariate_fitters\n ):\n df = load_diabetes()\n for fitter in known_parametric_univariate_fitters:\n f = fitter().fit_interval_censoring(df[\"left\"], df[\"right\"])\n f.predict(3.0)\n\n def test_parameteric_models_fail_if_passing_in_bad_event_data(self, known_parametric_univariate_fitters):\n df = load_diabetes()\n for fitter in known_parametric_univariate_fitters:\n with pytest.raises(ValueError, match=\"lower_bound == upper_bound\"):\n f = fitter().fit_interval_censoring(df[\"left\"], df[\"right\"], event_observed=np.ones_like(df[\"right\"]))\n\n def test_print_summary(self, sample_lifetimes, known_parametric_univariate_fitters):\n T = np.random.exponential(1, size=100)\n for f in known_parametric_univariate_fitters:\n f = f()\n f.fit(T)\n f.print_summary(style=\"ascii\")\n f.print_summary(style=\"html\")\n f.print_summary(style=\"latex\")\n\n\nclass TestUnivariateFitters:\n @pytest.fixture\n def univariate_fitters(self):\n return [\n KaplanMeierFitter,\n NelsonAalenFitter,\n BreslowFlemingHarringtonFitter,\n ExponentialFitter,\n WeibullFitter,\n LogNormalFitter,\n LogLogisticFitter,\n PiecewiseExponentialFitterTesting,\n GeneralizedGammaFitter,\n SplineFitterTesting,\n ]\n\n def test_confidence_interval_has_the_correct_order_so_plotting_doesnt_break(\n self, sample_lifetimes, univariate_fitters\n ):\n T, E = sample_lifetimes\n for f in univariate_fitters:\n f = f()\n f.fit(T, E)\n assert \"lower\" in f.confidence_interval_.columns[0]\n assert \"upper\" in f.confidence_interval_.columns[1]\n\n def test_repr_with_fitter(self, sample_lifetimes, univariate_fitters):\n T, E = sample_lifetimes\n for f in univariate_fitters:\n f = f()\n f.fit(T, E)\n assert f.__repr__() == \"\"\"<lifelines.%s:\"%s\", fitted with %d total observations, %d right-censored observations>\"\"\" % (\n f._class_name,\n f._label,\n E.shape[0],\n E.shape[0] - E.sum(),\n )\n\n def test_allow_dataframes(self, univariate_fitters):\n t_2d = np.random.exponential(5, size=(2000, 1)) ** 2\n t_df = pd.DataFrame(t_2d)\n for f in univariate_fitters:\n f().fit(t_2d)\n f().fit(t_df)\n\n def test_has_percentile_function(self, univariate_fitters, positive_sample_lifetimes):\n for fitter in univariate_fitters:\n f = fitter().fit(positive_sample_lifetimes[0])\n if hasattr(f, \"survival_function_\"):\n print(f)\n assert f.percentile(0.5) == f.median_survival_time_\n\n def test_default_alpha_is_005(self, univariate_fitters):\n for f in univariate_fitters:\n assert f().alpha == 0.05\n\n def test_univariate_fitters_accept_late_entries(self, positive_sample_lifetimes, univariate_fitters):\n entries = 0.1 * positive_sample_lifetimes[0]\n for fitter in univariate_fitters:\n f = fitter().fit(positive_sample_lifetimes[0], entry=entries)\n assert f.entry is not None\n\n def test_univariate_fitters_with_survival_function_have_conditional_time_to_(\n self, positive_sample_lifetimes, univariate_fitters\n ):\n for fitter in univariate_fitters:\n\n f = fitter().fit(positive_sample_lifetimes[0])\n if hasattr(f, \"survival_function_\"):\n assert all(f.conditional_time_to_event_.index == f.survival_function_.index)\n\n def test_conditional_time_to_allows_custom_timelines(self, univariate_fitters):\n t = np.random.binomial(50, 0.4, 100)\n e = np.random.binomial(1, 0.8, 100)\n for fitter in univariate_fitters:\n f = fitter().fit(t, e, timeline=np.linspace(0, 40, 41))\n if hasattr(f, \"survival_function_\"):\n assert all(f.conditional_time_to_event_.index == f.survival_function_.index)\n\n def test_univariate_fitters_allows_one_to_change_alpha_at_fit_time(\n self, positive_sample_lifetimes, univariate_fitters\n ):\n alpha = 0.1\n alpha_fit = 0.05\n for f in univariate_fitters:\n fitter = f(alpha=alpha)\n fitter.fit(positive_sample_lifetimes[0], alpha=alpha_fit)\n assert str(1 - alpha_fit) in fitter.confidence_interval_.columns[0]\n\n fitter.fit(positive_sample_lifetimes[0])\n assert str(1 - alpha) in fitter.confidence_interval_.columns[0]\n\n def test_univariate_fitters_have_a_plot_method(self, positive_sample_lifetimes, univariate_fitters):\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n assert hasattr(fitter, \"plot\")\n\n def test_univariate_fitters_ok_if_given_timedelta(self, univariate_fitters):\n t = pd.Series(\n [pd.to_datetime(\"2015-01-01 12:00\"), pd.to_datetime(\"2015-01-02\"), pd.to_datetime(\"2015-01-02 12:00\")]\n )\n T = pd.to_datetime(\"2015-01-03\") - t\n for fitter in univariate_fitters:\n f = fitter().fit(T)\n try:\n npt.assert_allclose(f.timeline, 1e9 * 12 * 60 * 60 * np.array([0, 1, 2, 3]))\n except:\n npt.assert_allclose(f.timeline, 1e9 * 12 * 60 * 60 * np.array([1, 2, 3]))\n\n def test_univariate_fitters_okay_if_given_boolean_col_with_object_dtype(self, univariate_fitters):\n df = pd.DataFrame({\"T\": [1, 2, 3, 4, 5], \"E\": [True, True, True, True, None]})\n assert df[\"E\"].dtype == object\n df = df.dropna()\n assert df[\"E\"].dtype == object\n\n for fitter in univariate_fitters:\n with pytest.warns(UserWarning, match=\"convert\"):\n fitter().fit(df[\"T\"], df[\"E\"])\n\n def test_predict_methods_returns_a_scalar_or_a_array_depending_on_input(\n self, positive_sample_lifetimes, univariate_fitters\n ):\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n assert not isinstance(fitter.predict(1), Iterable)\n assert isinstance(fitter.predict([1, 2]), Iterable)\n\n def test_predict_method_returns_exact_value_if_given_an_observed_time(self):\n T = [1, 2, 3]\n kmf = KaplanMeierFitter()\n kmf.fit(T)\n time = 1\n assert abs(kmf.predict(time) - kmf.survival_function_.iloc[time].values) < 10e-8\n\n def test_predict_method_returns_an_approximation_if_not_in_the_index_and_interpolate_set_to_true(self):\n T = [1, 2, 3]\n kmf = KaplanMeierFitter()\n kmf.fit(T)\n assert abs(kmf.predict(0.5, interpolate=True) - 5 / 6.0) < 10e-8\n assert abs(kmf.predict(1.9999, interpolate=True) - 0.3333666666) < 10e-8\n\n def test_predict_method_returns_the_previous_value_if_not_in_the_index(self):\n T = [1, 2, 3]\n kmf = KaplanMeierFitter()\n kmf.fit(T)\n assert abs(kmf.predict(1.0, interpolate=False) - 2 / 3) < 10e-8\n assert abs(kmf.predict(1.9999, interpolate=False) - 2 / 3) < 10e-8\n\n def test_custom_timeline_can_be_list_or_array(self, positive_sample_lifetimes, univariate_fitters):\n T, C = positive_sample_lifetimes\n timeline = [2, 3, 4.0, 1.0, 6, 5.0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T, C, timeline=timeline)\n if hasattr(fitter, \"survival_function_\"):\n with_list = fitter.survival_function_.values\n with_array = fitter.fit(T, C, timeline=np.array(timeline)).survival_function_.values\n npt.assert_array_equal(with_list, with_array)\n elif hasattr(fitter, \"cumulative_hazard_\"):\n with_list = fitter.cumulative_hazard_.values\n with_array = fitter.fit(T, C, timeline=np.array(timeline)).cumulative_hazard_.values\n npt.assert_array_equal(with_list, with_array)\n\n def test_custom_timeline(self, positive_sample_lifetimes, univariate_fitters):\n T, C = positive_sample_lifetimes\n timeline = [2, 3, 4.0, 1.0, 6, 5.0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T, C, timeline=timeline)\n if hasattr(fitter, \"survival_function_\"):\n assert sorted(timeline) == list(fitter.survival_function_.index.values)\n elif hasattr(fitter, \"cumulative_hazard_\"):\n assert sorted(timeline) == list(fitter.cumulative_hazard_.index.values)\n\n def test_label_is_a_property(self, positive_sample_lifetimes, univariate_fitters):\n label = \"Test Label\"\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(positive_sample_lifetimes[0], label=label)\n assert fitter._label == label\n assert fitter.confidence_interval_.columns[0] == \"%s_lower_0.95\" % label\n assert fitter.confidence_interval_.columns[1] == \"%s_upper_0.95\" % label\n\n def test_ci_labels(self, positive_sample_lifetimes, univariate_fitters):\n expected = [\"upper\", \"lower\"]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(positive_sample_lifetimes[0], ci_labels=expected)\n npt.assert_array_equal(fitter.confidence_interval_.columns, expected)\n\n def test_ci_is_not_all_nan(self, positive_sample_lifetimes, univariate_fitters):\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(positive_sample_lifetimes[0])\n try:\n fitter.print_summary()\n except:\n pass\n assert not (pd.isnull(fitter.confidence_interval_)).all().all()\n\n def test_lists_as_input(self, positive_sample_lifetimes, univariate_fitters):\n T, C = positive_sample_lifetimes\n for f in univariate_fitters:\n fitter = f()\n\n if isinstance(fitter, NelsonAalenFitter):\n with_array = fitter.fit(T, C).cumulative_hazard_\n with_list = fitter.fit(list(T), list(C)).cumulative_hazard_\n assert_frame_equal(with_list, with_array)\n\n else:\n with_array = fitter.fit(T, C).survival_function_\n with_list = fitter.fit(list(T), list(C)).survival_function_\n assert_frame_equal(with_list, with_array)\n\n if isinstance(fitter, ParametricUnivariateFitter):\n with_array = fitter.fit_interval_censoring(T, T + 1, (T == T + 1)).survival_function_\n with_list = fitter.fit_interval_censoring(\n list(T), list(T + 1), list((T == T + 1))\n ).survival_function_\n assert_frame_equal(with_list, with_array)\n\n with_array = fitter.fit_left_censoring(T, C).survival_function_\n with_list = fitter.fit_left_censoring(list(T), list(C)).survival_function_\n assert_frame_equal(with_list, with_array)\n\n def test_subtraction_function(self, positive_sample_lifetimes, univariate_fitters):\n T2 = np.arange(1, 50)\n for fitter in univariate_fitters:\n f1 = fitter()\n f2 = fitter()\n\n f1.fit(positive_sample_lifetimes[0])\n f2.fit(T2)\n\n result = f1.subtract(f2)\n assert result.shape[0] == (np.unique(np.concatenate((f1.timeline, f2.timeline))).shape[0])\n\n npt.assert_array_almost_equal(f1.subtract(f1).sum().values, 0.0)\n\n def test_subtract_function_with_labelled_data(self, positive_sample_lifetimes, univariate_fitters):\n T2 = np.arange(1, 50)\n for fitter in univariate_fitters:\n f1 = fitter()\n f2 = fitter()\n\n f1.fit(positive_sample_lifetimes[0], label=\"A\")\n f2.fit(T2, label=\"B\")\n\n result = f1.subtract(f2)\n assert result.columns == [\"diff\"]\n assert result.shape[1] == 1\n\n def test_divide_function(self, positive_sample_lifetimes, univariate_fitters):\n T2 = np.arange(1, 50)\n for fitter in univariate_fitters:\n f1 = fitter()\n f2 = fitter()\n\n f1.fit(positive_sample_lifetimes[0])\n f2.fit(T2)\n\n result = f1.divide(f2)\n assert result.shape[0] == (np.unique(np.concatenate((f1.timeline, f2.timeline))).shape[0])\n npt.assert_array_almost_equal(np.log(f1.divide(f1)).sum().values, 0.0)\n\n def test_divide_function_with_labelled_data(self, positive_sample_lifetimes, univariate_fitters):\n T2 = np.arange(1, 50)\n for fitter in univariate_fitters:\n f1 = fitter()\n f2 = fitter()\n\n f1.fit(positive_sample_lifetimes[0], label=\"A\")\n f2.fit(T2, label=\"B\")\n\n result = f1.divide(f2)\n assert result.columns == [\"ratio\"]\n assert result.shape[1] == 1\n\n def test_valueerror_is_thrown_if_alpha_out_of_bounds(self, univariate_fitters):\n for fitter in univariate_fitters:\n with pytest.raises(ValueError):\n fitter(alpha=95)\n\n def test_typeerror_is_thrown_if_there_is_nans_in_the_duration_col(self, univariate_fitters):\n T = np.array([1.0, 2.0, 4.0, np.nan, 8.0])\n for fitter in univariate_fitters:\n with pytest.raises(TypeError):\n fitter().fit(T)\n\n def test_typeerror_is_thrown_if_there_is_nans_in_the_event_col(self, univariate_fitters):\n T = np.arange(1, 5)\n E = [1, 0, None, 1, 1]\n for fitter in univariate_fitters:\n with pytest.raises(TypeError):\n fitter().fit(T, E)\n\n def test_pickle_serialization(self, positive_sample_lifetimes, univariate_fitters):\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n\n unpickled = pickle.loads(pickle.dumps(fitter))\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_dill_serialization(self, positive_sample_lifetimes, univariate_fitters):\n from dill import dumps, loads\n\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n\n unpickled = loads(dumps(fitter))\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_joblib_serialization(self, positive_sample_lifetimes, univariate_fitters):\n from joblib import dump, load\n\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n\n dump(fitter, \"filename.joblib\")\n unpickled = load(\"filename.joblib\")\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_all_models_have_censoring_type(self, positive_sample_lifetimes, univariate_fitters):\n T = positive_sample_lifetimes[0]\n for f in univariate_fitters:\n fitter = f()\n fitter.fit(T)\n assert hasattr(fitter, \"_censoring_type\")\n\n\nclass TestPiecewiseExponentialFitter:\n def test_fit_with_bad_breakpoints_raises_error(self):\n with pytest.raises(ValueError):\n pwf = PiecewiseExponentialFitter(None)\n\n with pytest.raises(ValueError):\n pwf = PiecewiseExponentialFitter([])\n\n with pytest.raises(ValueError):\n pwf = PiecewiseExponentialFitter([0, 1, 2, 3])\n\n with pytest.raises(ValueError):\n pwf = PiecewiseExponentialFitter([1, 2, 3, np.inf])\n\n @flaky(max_runs=3, min_passes=1)\n def test_fit_on_simulated_data(self):\n bp = [1, 2]\n lambdas = [0.5, 0.1, 1.0]\n N = int(5 * 1e5)\n T_actual = piecewise_exponential_survival_data(N, bp, lambdas)\n T_censor = piecewise_exponential_survival_data(N, bp, lambdas)\n T = np.minimum(T_actual, T_censor)\n E = T_actual < T_censor\n\n pwf = PiecewiseExponentialFitter(bp).fit(T, E)\n npt.assert_allclose(pwf.summary.loc[\"lambda_0_\", \"coef\"], 1 / 0.5, rtol=0.01)\n npt.assert_allclose(pwf.summary.loc[\"lambda_1_\", \"coef\"], 1 / 0.1, rtol=0.01)\n npt.assert_allclose(pwf.summary.loc[\"lambda_2_\", \"coef\"], 1 / 1.0, rtol=0.01)\n\n\nclass TestLogNormalFitter:\n @pytest.fixture()\n def lnf(self):\n return LogNormalFitter()\n\n def test_fit(self, lnf):\n T = np.exp(np.random.randn(100000))\n E = np.ones_like(T)\n lnf.fit(T, E)\n assert abs(lnf.mu_) < 0.1\n assert abs(lnf.sigma_ - 1) < 0.1\n\n def test_lognormal_model_does_not_except_negative_or_zero_values(self, lnf):\n T = [0, 1, 2, 4, 5]\n with pytest.raises(ValueError):\n lnf.fit(T)\n\n T[0] = -1\n with pytest.raises(ValueError):\n lnf.fit(T)\n\n def test_cumulative_hazard_doesnt_fail(self, lnf):\n T = np.exp(np.random.randn(100))\n lnf.fit(T)\n results = lnf.cumulative_hazard_at_times([1, 2, 3])\n assert results.shape[0] == 3\n\n results = lnf.cumulative_hazard_at_times(pd.Series([1, 2, 3]))\n assert results.shape[0] == 3\n\n results = lnf.cumulative_hazard_at_times(1)\n assert results.shape[0] == 1\n\n def test_lnf_inference(self, lnf):\n N = 250000\n mu = 3 * np.random.randn()\n sigma = np.random.uniform(0.1, 3.0)\n\n X, C = np.exp(sigma * np.random.randn(N) + mu), np.exp(np.random.randn(N) + mu)\n E = X <= C\n T = np.minimum(X, C)\n\n lnf.fit(T, E)\n\n assert abs(mu - lnf.mu_) < 0.05\n assert abs(sigma - lnf.sigma_) < 0.05\n assert abs(lnf.median_survival_time_ / np.percentile(X, 50) - 1) < 0.05\n\n def test_lnf_inference_with_large_sigma(self, lnf):\n N = 250000\n mu = 4.94\n sigma = 12\n\n X, C = np.exp(sigma * np.random.randn(N) + mu), np.exp(np.random.randn(N) + mu)\n E = X <= C\n T = np.minimum(X, C)\n\n lnf.fit(T, E)\n\n assert abs(mu / lnf.mu_ - 1) < 0.05\n assert abs(sigma / lnf.sigma_ - 1) < 0.05\n\n def test_lnf_inference_with_small_sigma(self, lnf):\n N = 25000\n mu = 3\n sigma = 0.04\n\n X, C = np.exp(sigma * np.random.randn(N) + mu), np.exp(np.random.randn(N) + mu)\n E = X <= C\n T = np.minimum(X, C)\n\n lnf.fit(T, E)\n\n assert abs(mu / lnf.mu_ - 1) < 0.05\n assert abs(sigma / lnf.sigma_ - 1) < 0.05\n\n def test_lnf_inference_with_really_small_sigma(self, lnf):\n N = 250000\n mu = 3 * np.random.randn()\n sigma = 0.02\n\n X, C = np.exp(sigma * np.random.randn(N) + mu), np.exp(np.random.randn(N) + mu)\n E = X <= C\n T = np.minimum(X, C)\n\n lnf.fit(T, E)\n\n assert abs(mu / lnf.mu_ - 1) < 0.05\n assert abs(sigma / lnf.sigma_ - 1) < 0.05\n\n def test_lnf_inference_no_censorship(self, lnf):\n N = 1000000\n mu = 10 * np.random.randn()\n sigma = np.random.exponential(10)\n\n T = np.exp(sigma * np.random.randn(N) + mu)\n\n lnf.fit(T)\n\n assert abs(mu / lnf.mu_ - 1) < 0.1\n assert abs(sigma / lnf.sigma_ - 1) < 0.1\n\n\nclass TestLogLogisticFitter:\n @pytest.fixture()\n def llf(self):\n return LogLogisticFitter()\n\n def test_loglogistic_model_does_not_except_negative_or_zero_values(self, llf):\n\n T = [0, 1, 2, 4, 5]\n with pytest.raises(ValueError):\n llf.fit(T)\n\n T[0] = -1\n with pytest.raises(ValueError):\n llf.fit(T)\n\n def test_llf_simple_inference(self, llf):\n from scipy.stats import fisk\n\n T = fisk.rvs(1, scale=1, size=60000)\n llf.fit(T)\n assert abs(llf.alpha_ - 1) < 0.05\n assert abs(llf.beta_ - 1) < 0.05\n\n def test_llf_less_simple_inference(self, llf):\n from scipy.stats import fisk\n\n scale = 0.3\n c = 5.4\n T = fisk.rvs(c, scale=scale, size=60000)\n llf.fit(T)\n assert abs(llf.alpha_ - scale) < 0.05\n assert abs(llf.beta_ - c) < 0.05\n\n def test_llf_less_simple_inference_with_censorship(self, llf):\n from scipy.stats import fisk\n\n scale = 0.3\n c = 5.4\n T = fisk.rvs(c, scale=scale, size=120000)\n C = fisk.rvs(c, scale=scale, size=120000)\n E = T < C\n T = np.minimum(T, C)\n assert 1 > E.mean() > 0\n\n llf.fit(T, E)\n assert abs(llf.alpha_ - scale) < 0.05\n assert abs(llf.beta_ - c) < 0.05\n\n def test_llf_large_values(self, llf):\n from scipy.stats import fisk\n\n scale = 20\n c = 50\n T = fisk.rvs(c, scale=scale, size=100000)\n C = fisk.rvs(c, scale=scale, size=100000)\n E = T < C\n T = np.minimum(T, C)\n\n assert 1 > E.mean() > 0\n\n llf.fit(T, E)\n assert abs(llf.alpha_ / scale - 1) < 0.05\n assert abs(llf.beta_ / c - 1) < 0.05\n\n @pytest.mark.xfail\n def test_llf_small_values(self, llf):\n from scipy.stats import fisk\n\n scale = 0.02\n c = 0.05\n T = fisk.rvs(c, scale=scale, size=100000)\n C = fisk.rvs(c, scale=scale, size=100000)\n E = T < C\n T = np.minimum(T, C)\n\n assert 1 > E.mean() > 0\n\n llf.fit(T, E)\n assert abs(llf.alpha_ - scale) < 0.02\n assert abs(llf.beta_ - c) < 0.02\n\n\nclass TestWeibullFitter:\n def test_unstable_data(self):\n data = pd.read_csv(\"https://raw.githubusercontent.com/scotty269/lifelines_test/master/my_data.csv\")\n T = data[\"T\"]\n E = data[\"E\"]\n assert abs(WeibullFitter().fit(T, E).log_likelihood_ - LogNormalFitter().fit(T, E).log_likelihood_) < 0.5\n\n @flaky(max_runs=3, min_passes=2)\n @pytest.mark.parametrize(\"N\", [750, 1500])\n def test_left_censorship_inference(self, N):\n T_actual = 0.5 * np.random.weibull(5, size=N)\n\n MIN_0 = np.percentile(T_actual, 5)\n MIN_1 = np.percentile(T_actual, 10)\n MIN_2 = np.percentile(T_actual, 30)\n MIN_3 = np.percentile(T_actual, 50)\n\n T = T_actual.copy()\n ix = np.random.randint(4, size=N)\n\n T = np.where(ix == 0, np.maximum(T, MIN_0), T)\n T = np.where(ix == 1, np.maximum(T, MIN_1), T)\n T = np.where(ix == 2, np.maximum(T, MIN_2), T)\n T = np.where(ix == 3, np.maximum(T, MIN_3), T)\n E = T_actual == T\n\n wf = WeibullFitter().fit_left_censoring(T, E)\n\n assert wf.summary.loc[\"rho_\", \"coef lower 95%\"] < 5 < wf.summary.loc[\"rho_\", \"coef upper 95%\"]\n assert wf.summary.loc[\"lambda_\", \"coef lower 95%\"] < 0.5 < wf.summary.loc[\"lambda_\", \"coef upper 95%\"]\n\n def test_weibull_with_delayed_entries(self):\n # note the the independence of entry and final time is really important\n # (also called non-informative)\n # for example, the following doesn't work\n # D = np.random.rand(15000) * T\n\n wf = WeibullFitter()\n T = np.random.exponential(10, 350000)\n D = np.random.exponential(10, 350000)\n\n keep = T > D\n T = T[keep]\n D = D[keep]\n\n wf = WeibullFitter().fit(T, entry=D)\n\n assert np.abs(wf.lambda_ / 10.0 - 1) < 0.01\n\n def test_weibull_fit_returns_float_timelines(self):\n wf = WeibullFitter()\n T = np.linspace(0.1, 10)\n wf.fit(T)\n npt.assert_array_equal(wf.timeline, T)\n npt.assert_array_equal(wf.survival_function_.index.values, T)\n\n def test_weibull_model_does_not_accept_negative_or_zero_values(self):\n wf = WeibullFitter()\n\n T = [0, 1, 2, 4, 5]\n with pytest.raises(ValueError):\n wf.fit(T)\n\n T[0] = -1\n with pytest.raises(ValueError):\n wf.fit(T)\n\n def test_exponential_data_produces_correct_inference_no_censorship(self):\n wf = WeibullFitter()\n N = 600000\n T = 5 * np.random.exponential(1, size=N) ** 2\n wf.fit(T)\n assert abs(wf.rho_ - 0.5) < 0.01\n assert abs(wf.lambda_ / 5 - 1) < 0.01\n assert abs(wf.median_survival_time_ - 5 * np.log(2) ** 2) < 0.1 # worse convergence\n assert abs(wf.median_survival_time_ - np.median(T)) < 0.1\n\n def test_exponential_data_produces_correct_inference_with_censorship(self):\n wf = WeibullFitter()\n N = 80000\n factor = 5\n T = factor * np.random.exponential(1, size=N)\n T_ = factor * np.random.exponential(1, size=N)\n wf.fit(np.minimum(T, T_), (T < T_))\n assert abs(wf.rho_ - 1.0) < 0.05\n assert abs(wf.lambda_ / factor - 1) < 0.05\n assert abs(wf.median_survival_time_ - factor * np.log(2)) < 0.1\n\n def test_convergence_completes_for_ever_increasing_data_sizes(self):\n wf = WeibullFitter()\n rho = 5\n lambda_ = 1.0 / 2\n for N in [10, 50, 500, 5000, 50000]:\n T = np.random.weibull(rho, size=N) * lambda_\n wf.fit(T)\n assert abs(1 - wf.rho_ / rho) < 5 / np.sqrt(N)\n assert abs(1 - wf.lambda_ / lambda_) < 5 / np.sqrt(N)\n\n\nclass TestGeneralizedGammaFitter:\n def test_exponential_data_inference(self):\n T = np.random.exponential(1.0, size=20000)\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary()\n assert gg.summary.loc[\"lambda_\"][\"coef lower 95%\"] < 1 < gg.summary.loc[\"lambda_\"][\"coef upper 95%\"]\n assert gg.summary.loc[\"ln_sigma_\"][\"coef lower 95%\"] < 0 < gg.summary.loc[\"ln_sigma_\"][\"coef upper 95%\"]\n\n def test_weibull_data_inference(self):\n T = 5 * np.random.exponential(1, size=10000) ** 0.5\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary()\n assert gg.summary.loc[\"lambda_\"][\"coef lower 95%\"] < 1 < gg.summary.loc[\"lambda_\"][\"coef upper 95%\"]\n\n def test_gamma_data_inference(self):\n T = np.random.gamma(shape=4, scale=0.5, size=15000)\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary()\n assert abs(gg.summary.loc[\"lambda_\", \"coef\"] - np.exp(gg.summary.loc[\"ln_sigma_\", \"coef\"])) < 0.15\n\n def test_lognormal_data_inference(self):\n T = np.exp(np.random.randn(20000))\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary(4)\n assert abs(gg.summary.loc[\"lambda_\"][\"coef\"]) < 0.05\n\n def test_inverse_weibull_inference(self):\n T = invweibull(5).rvs(10000)\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary(4)\n assert abs(gg.summary.loc[\"lambda_\"][\"coef\"] - -1.0) < 0.05\n\n def test_inverse_gamma_inference(self):\n T = invgamma(0.5).rvs(20000)\n gg = GeneralizedGammaFitter().fit(T)\n gg.print_summary(4)\n assert abs(gg.summary.loc[\"lambda_\", \"coef\"] - -np.exp(gg.summary.loc[\"ln_sigma_\", \"coef\"])) < 0.15\n\n def test_against_reliability_software(self):\n # From http://reliawiki.org/index.php/The_Generalized_Gamma_Distribution\n T = [\n 17.88,\n 28.92,\n 33,\n 41.52,\n 42.12,\n 45.6,\n 48.4,\n 51.84,\n 51.96,\n 54.12,\n 55.56,\n 67.8,\n 68.64,\n 68.64,\n 68.88,\n 84.12,\n 93.12,\n 98.64,\n 105.12,\n 105.84,\n 127.92,\n 128.04,\n 173.4,\n ]\n\n gg = GeneralizedGammaFitter().fit(T)\n npt.assert_allclose(gg.summary.loc[\"mu_\", \"coef\"], 4.23064, rtol=0.001)\n npt.assert_allclose(gg.summary.loc[\"lambda_\", \"coef\"], 0.307639, rtol=1e-3)\n npt.assert_allclose(np.exp(gg.summary.loc[\"ln_sigma_\", \"coef\"]), 0.509982, rtol=1e-3)\n\n\nclass TestExponentialFitter:\n def test_fit_computes_correct_lambda_(self):\n T = np.array([10, 10, 10, 10], dtype=float)\n E = np.array([1, 1, 1, 0], dtype=float)\n enf = ExponentialFitter()\n enf.fit(T, E)\n assert abs(enf.lambda_ - (T.sum() / E.sum())) < 1e-4\n\n def test_fit_computes_correct_asymptotic_variance(self):\n N = 5000\n T = np.random.exponential(size=N)\n C = np.random.exponential(size=N)\n E = T < C\n T = np.minimum(T, C)\n enf = ExponentialFitter()\n enf.fit(T, E)\n assert abs(enf.summary.loc[\"lambda_\", \"se(coef)\"] ** 2 - (T.sum() / E.sum()) ** 2 / N) < 1e-3\n\n\nclass TestKaplanMeierFitter:\n def kaplan_meier(self, lifetimes, observed=None):\n lifetimes_counter = Counter(lifetimes)\n km = np.zeros((len(list(lifetimes_counter.keys())), 1))\n ordered_lifetimes = np.sort(list(lifetimes_counter.keys()))\n N = len(lifetimes)\n v = 1.0\n n = N * 1.0\n for i, t in enumerate(ordered_lifetimes):\n if observed is not None:\n ix = lifetimes == t\n c = sum(1 - observed[ix])\n if n != 0:\n v *= 1 - (lifetimes_counter.get(t) - c) / n\n n -= lifetimes_counter.get(t)\n else:\n v *= 1 - lifetimes_counter.get(t) / n\n n -= lifetimes_counter.get(t)\n km[i] = v\n if lifetimes_counter.get(0) is None:\n km = np.insert(km, 0, 1.0)\n return km.reshape(len(km), 1)\n\n def test_kmf_overflow_error(self):\n N = int(1e6)\n T = np.random.exponential(size=N)\n E = T < 0.001\n kmf = KaplanMeierFitter()\n kmf.fit(T, E)\n assert True\n\n def test_left_truncation_against_Cole_and_Hudgens(self):\n df = load_multicenter_aids_cohort_study()\n kmf = KaplanMeierFitter()\n kmf.fit(df[\"T\"], event_observed=df[\"D\"], entry=df[\"W\"])\n\n # the papers event table only looks at times when the individuals die\n event_table = kmf.event_table[kmf.event_table[\"observed\"] > 0]\n\n assert event_table.shape[0] == 26\n assert event_table.loc[0.26899999999999996, \"at_risk\"] == 42\n assert event_table.loc[0.7909999999999999, \"at_risk\"] == 44\n assert event_table.loc[4.688, \"at_risk\"] == 11\n\n assert kmf.survival_function_.loc[0.7909999999999999, \"KM_estimate\"] == 0.9540043290043292\n assert abs(kmf.median_survival_time_ - 3) < 0.1\n\n def test_kaplan_meier_no_censorship(self, sample_lifetimes):\n T, _ = sample_lifetimes\n kmf = KaplanMeierFitter()\n kmf.fit(T)\n npt.assert_almost_equal(kmf.survival_function_.values, self.kaplan_meier(T))\n\n def test_kaplan_meier_with_censorship(self, sample_lifetimes):\n T, C = sample_lifetimes\n kmf = KaplanMeierFitter()\n kmf.fit(T, C)\n npt.assert_almost_equal(kmf.survival_function_.values, self.kaplan_meier(T, C))\n\n def test_stat_error_is_raised_if_too_few_early_deaths(self):\n observations = np.array(\n [1, 1, 1, 22, 30, 28, 32, 11, 14, 36, 31, 33, 33, 37, 35, 25, 31, 22, 26, 24, 35, 34, 30, 35, 40, 39, 2]\n )\n births = observations - 1\n kmf = KaplanMeierFitter()\n with pytest.raises(StatError):\n kmf.fit(observations, entry=births)\n\n def test_sort_doesnt_affect_kmf(self, sample_lifetimes):\n T, _ = sample_lifetimes\n kmf = KaplanMeierFitter()\n assert_frame_equal(kmf.fit(T).survival_function_, kmf.fit(sorted(T)).survival_function_)\n\n def test_passing_in_left_censorship_creates_a_cumulative_density(self, sample_lifetimes):\n T, C = sample_lifetimes\n kmf = KaplanMeierFitter()\n kmf.fit_left_censoring(T, C)\n assert hasattr(kmf, \"cumulative_density_\")\n assert hasattr(kmf, \"plot_cumulative_density\")\n\n def test_kmf_left_censored_data_stats(self):\n # from http://www.public.iastate.edu/~pdixon/stat505/Chapter%2011.pdf\n T = [3, 5, 5, 5, 6, 6, 10, 12]\n C = [1, 0, 0, 1, 1, 1, 0, 1]\n kmf = KaplanMeierFitter()\n kmf.fit_left_censoring(T, C)\n\n actual = kmf.cumulative_density_[kmf._label].values\n npt.assert_allclose(actual, np.array([0, 0.437500, 0.5833333, 0.875, 0.875, 1]))\n\n def test_shifting_durations_doesnt_affect_survival_function_values(self):\n T = np.random.exponential(10, size=100)\n kmf = KaplanMeierFitter()\n expected = kmf.fit(T).survival_function_.values\n\n T_shifted = T + 100\n npt.assert_allclose(expected, kmf.fit(T_shifted).survival_function_.values)\n\n T_shifted = T - 50\n npt.assert_allclose(expected[1:], kmf.fit(T_shifted).survival_function_.values)\n\n T_shifted = T - 200\n npt.assert_allclose(expected[1:], kmf.fit(T_shifted).survival_function_.values)\n\n def test_kmf_survival_curve_output_against_R(self):\n df = load_g3()\n ix = df[\"group\"] == \"RIT\"\n kmf = KaplanMeierFitter()\n\n expected = np.array([[0.909, 0.779]]).T\n kmf.fit(df.loc[ix][\"time\"], df.loc[ix][\"event\"], timeline=[25, 53])\n npt.assert_allclose(kmf.survival_function_.values, expected, rtol=10e-3)\n\n expected = np.array([[0.833, 0.667, 0.5, 0.333]]).T\n kmf.fit(df.loc[~ix][\"time\"], df.loc[~ix][\"event\"], timeline=[9, 19, 32, 34])\n npt.assert_allclose(kmf.survival_function_.values, expected, rtol=10e-3)\n\n @pytest.mark.xfail()\n def test_kmf_survival_curve_output_against_R_super_accurate(self):\n df = load_g3()\n ix = df[\"group\"] == \"RIT\"\n kmf = KaplanMeierFitter()\n\n expected = np.array([[0.909, 0.779]]).T\n kmf.fit(df.loc[ix][\"time\"], df.loc[ix][\"event\"], timeline=[25, 53])\n npt.assert_allclose(kmf.survival_function_.values, expected, rtol=10e-4)\n\n expected = np.array([[0.833, 0.667, 0.5, 0.333]]).T\n kmf.fit(df.loc[~ix][\"time\"], df.loc[~ix][\"event\"], timeline=[9, 19, 32, 34])\n npt.assert_allclose(kmf.survival_function_.values, expected, rtol=10e-4)\n\n def test_kmf_confidence_intervals_output_against_R(self):\n # this uses conf.type = 'log-log'\n df = load_g3()\n ix = df[\"group\"] != \"RIT\"\n kmf = KaplanMeierFitter()\n kmf.fit(df.loc[ix][\"time\"], df.loc[ix][\"event\"], timeline=[9, 19, 32, 34])\n\n expected_lower_bound = np.array([0.2731, 0.1946, 0.1109, 0.0461])\n npt.assert_allclose(kmf.confidence_interval_[\"KM_estimate_lower_0.95\"].values, expected_lower_bound, rtol=10e-4)\n\n expected_upper_bound = np.array([0.975, 0.904, 0.804, 0.676])\n npt.assert_allclose(kmf.confidence_interval_[\"KM_estimate_upper_0.95\"].values, expected_upper_bound, rtol=10e-4)\n\n def test_kmf_does_not_drop_to_zero_if_last_point_is_censored(self):\n T = np.arange(0, 50, 0.5)\n E = np.random.binomial(1, 0.7, 100)\n E[np.argmax(T)] = 0\n kmf = KaplanMeierFitter()\n kmf.fit(T, E)\n assert kmf.survival_function_[\"KM_estimate\"].iloc[-1] > 0\n\n def test_adding_weights_to_KaplanMeierFitter(self):\n n = 100\n df = pd.DataFrame()\n df[\"T\"] = np.random.binomial(40, 0.5, n)\n df[\"E\"] = np.random.binomial(1, 0.9, n)\n\n kmf_no_weights = KaplanMeierFitter().fit(df[\"T\"], df[\"E\"])\n\n df_grouped = df.groupby([\"T\", \"E\"]).size().reset_index()\n kmf_w_weights = KaplanMeierFitter().fit(df_grouped[\"T\"], df_grouped[\"E\"], weights=df_grouped[0])\n\n assert_frame_equal(kmf_w_weights.survival_function_, kmf_no_weights.survival_function_)\n\n def test_weights_can_be_floats(self):\n n = 100\n T = np.random.binomial(40, 0.5, n)\n E = np.random.binomial(1, 0.9, n)\n with pytest.warns(StatisticalWarning) as w:\n kmf = KaplanMeierFitter().fit(T, E, weights=np.random.random(n))\n assert True\n\n def test_weights_with_unaligned_index(self):\n df = pd.DataFrame(index=[5, 6, 7, 8])\n df[\"t\"] = [0.6, 0.4, 0.8, 0.9]\n df[\"y\"] = [0, 1, 1, 0]\n df[\"w\"] = [1.5, 2, 0.8, 0.9]\n with pytest.warns(StatisticalWarning) as w:\n kmf = KaplanMeierFitter().fit(durations=df[\"t\"], event_observed=df[\"y\"], weights=df[\"w\"])\n a = list(kmf.survival_function_.KM_estimate)\n assert a == [1.0, 0.6153846153846154, 0.6153846153846154, 0.32579185520362, 0.32579185520362]\n\n def test_late_entry_with_almost_tied_entry_and_death_against_R(self):\n entry = [1.9, 0, 0, 0, 0]\n T = [2, 10, 5, 4, 3]\n kmf = KaplanMeierFitter()\n kmf.fit(T, entry=entry)\n\n expected = [1.0, 1.0, 0.8, 0.6, 0.4, 0.2, 0.0]\n npt.assert_allclose(kmf.survival_function_.values.reshape(7), expected)\n\n def test_late_entry_with_against_R(self):\n entry = [1, 2, 4, 0, 0]\n T = [2, 10, 5, 4, 3]\n kmf = KaplanMeierFitter()\n kmf.fit(T, entry=entry)\n\n expected = [1.0, 1.0, 0.667, 0.444, 0.222, 0.111, 0.0]\n npt.assert_allclose(kmf.survival_function_.values.reshape(7), expected, rtol=1e-2)\n\n def test_kmf_has_both_survival_function_and_cumulative_density(self):\n # right censoring\n kmf = KaplanMeierFitter().fit_right_censoring(np.arange(100))\n assert hasattr(kmf, \"survival_function_\")\n assert hasattr(kmf, \"plot_survival_function\")\n assert hasattr(kmf, \"confidence_interval_survival_function_\")\n assert_frame_equal(kmf.confidence_interval_survival_function_, kmf.confidence_interval_)\n\n assert hasattr(kmf, \"cumulative_density_\")\n assert hasattr(kmf, \"plot_cumulative_density\")\n assert hasattr(kmf, \"confidence_interval_cumulative_density_\")\n\n # left censoring\n kmf = KaplanMeierFitter().fit_left_censoring(np.arange(100))\n assert hasattr(kmf, \"survival_function_\")\n assert hasattr(kmf, \"plot_survival_function\")\n assert hasattr(kmf, \"confidence_interval_survival_function_\")\n\n assert hasattr(kmf, \"cumulative_density_\")\n assert hasattr(kmf, \"plot_cumulative_density\")\n assert hasattr(kmf, \"confidence_interval_cumulative_density_\")\n assert_frame_equal(kmf.confidence_interval_cumulative_density_, kmf.confidence_interval_)\n\n def test_late_entry_with_tied_entry_and_death(self):\n np.random.seed(101)\n\n Ct = 10.0\n\n n = 10000\n df = pd.DataFrame()\n df[\"id\"] = [i for i in range(n)]\n df[\"t\"] = np.ceil(np.random.weibull(1, size=n) * 5)\n df[\"t_cens\"] = np.ceil(np.random.weibull(1, size=n) * 3)\n df[\"t_enter\"] = np.floor(np.random.weibull(1.5, size=n) * 2)\n df[\"ft\"] = 10\n df[\"t_out\"] = np.min(df[[\"t\", \"t_cens\", \"ft\"]], axis=1).astype(int)\n df[\"d\"] = (np.where(df[\"t\"] <= Ct, 1, 0)) * (np.where(df[\"t\"] <= df[\"t_cens\"], 1, 0))\n df[\"c\"] = (np.where(df[\"t_cens\"] <= Ct, 1, 0)) * (np.where(df[\"t_cens\"] < df[\"t\"], 1, 0))\n df[\"y\"] = (\n (np.where(df[\"t\"] > df[\"t_enter\"], 1, 0))\n * (np.where(df[\"t_cens\"] > df[\"t_enter\"], 1, 0))\n * (np.where(Ct > df[\"t_enter\"], 1, 0))\n )\n dfo = df.loc[df[\"y\"] == 1].copy() # \"observed data\"\n\n # Fitting KM to full data\n km1 = KaplanMeierFitter()\n km1.fit(df[\"t_out\"], event_observed=df[\"d\"])\n rf = pd.DataFrame(index=km1.survival_function_.index)\n rf[\"KM_true\"] = km1.survival_function_\n\n # Fitting KM to \"observed\" data\n km2 = KaplanMeierFitter()\n km2.fit(dfo[\"t_out\"], entry=dfo[\"t_enter\"], event_observed=dfo[\"d\"])\n rf[\"KM_lifelines_latest\"] = km2.survival_function_\n\n # Version of KM where late entries occur after\n rf[\"KM_lateenterafter\"] = np.cumprod(\n 1 - (km2.event_table.observed / (km2.event_table.at_risk - km2.event_table.entrance))\n )\n\n # drop the first NA from comparison\n rf = rf.dropna()\n\n npt.assert_allclose(rf[\"KM_true\"].values, rf[\"KM_lateenterafter\"].values, rtol=10e-2)\n npt.assert_allclose(rf[\"KM_lifelines_latest\"].values, rf[\"KM_lateenterafter\"].values, rtol=10e-2)\n npt.assert_allclose(rf[\"KM_lifelines_latest\"].values, rf[\"KM_true\"].values, rtol=10e-2)\n\n\nclass TestNelsonAalenFitter:\n def nelson_aalen(self, lifetimes, observed=None):\n lifetimes_counter = Counter(lifetimes)\n na = np.zeros((len(list(lifetimes_counter.keys())), 1))\n ordered_lifetimes = np.sort(list(lifetimes_counter.keys()))\n N = len(lifetimes)\n v = 0.0\n n = N * 1.0\n for i, t in enumerate(ordered_lifetimes):\n if observed is not None:\n ix = lifetimes == t\n c = sum(1 - observed[ix])\n if n != 0:\n v += (lifetimes_counter.get(t) - c) / n\n n -= lifetimes_counter.get(t)\n else:\n v += lifetimes_counter.get(t) / n\n n -= lifetimes_counter.get(t)\n na[i] = v\n if lifetimes_counter.get(0) is None:\n na = np.insert(na, 0, 0.0)\n return na.reshape(len(na), 1)\n\n def test_nelson_aalen_no_censorship(self, sample_lifetimes):\n T, _ = sample_lifetimes\n naf = NelsonAalenFitter(nelson_aalen_smoothing=False)\n naf.fit(T)\n npt.assert_almost_equal(naf.cumulative_hazard_.values, self.nelson_aalen(T))\n\n def test_censor_nelson_aalen(self, sample_lifetimes):\n T, C = sample_lifetimes\n naf = NelsonAalenFitter(nelson_aalen_smoothing=False)\n naf.fit(T, C)\n npt.assert_almost_equal(naf.cumulative_hazard_.values, self.nelson_aalen(T, C))\n\n def test_loc_slicing(self, waltons_dataset):\n naf = NelsonAalenFitter().fit(waltons_dataset[\"T\"])\n assert naf.cumulative_hazard_.loc[0:10].shape[0] == 4\n\n def test_iloc_slicing(self, waltons_dataset):\n naf = NelsonAalenFitter().fit(waltons_dataset[\"T\"])\n assert naf.cumulative_hazard_.iloc[0:10].shape[0] == 10\n assert naf.cumulative_hazard_.iloc[0:-1].shape[0] == 32\n\n def test_smoothing_hazard_ties(self):\n T = np.random.binomial(20, 0.7, size=300)\n C = np.random.binomial(1, 0.8, size=300)\n naf = NelsonAalenFitter()\n naf.fit(T, C)\n naf.smoothed_hazard_(1.0)\n\n def test_smoothing_hazard_nontied(self):\n T = np.random.exponential(20, size=300) ** 2\n C = np.random.binomial(1, 0.8, size=300)\n naf = NelsonAalenFitter()\n naf.fit(T, C)\n naf.smoothed_hazard_(1.0)\n naf.fit(T)\n naf.smoothed_hazard_(1.0)\n\n def test_smoothing_hazard_ties_all_events_observed(self):\n T = np.random.binomial(20, 0.7, size=300)\n naf = NelsonAalenFitter()\n naf.fit(T)\n naf.smoothed_hazard_(1.0)\n\n def test_smoothing_hazard_with_spike_at_time_0(self):\n T = np.random.binomial(20, 0.7, size=300)\n T[np.random.binomial(1, 0.3, size=300).astype(bool)] = 0\n naf = NelsonAalenFitter()\n naf.fit(T)\n df = naf.smoothed_hazard_(bandwidth=0.1)\n assert df.iloc[0].values[0] > df.iloc[1].values[0]\n\n def test_nelson_aalen_smoothing(self):\n # this test was included because I was refactoring the estimators.\n np.random.seed(1)\n N = 10 ** 4\n t = np.random.exponential(1, size=N)\n c = np.random.binomial(1, 0.9, size=N)\n naf = NelsonAalenFitter(nelson_aalen_smoothing=True)\n naf.fit(t, c)\n assert abs(naf.cumulative_hazard_[\"NA_estimate\"].iloc[-1] - 8.545665) < 1e-6\n assert abs(naf.confidence_interval_[\"NA_estimate_upper_0.95\"].iloc[-1] - 11.315662) < 1e-6\n assert abs(naf.confidence_interval_[\"NA_estimate_lower_0.95\"].iloc[-1] - 6.4537448) < 1e-6\n\n def test_adding_weights_to_NelsonAalenFitter(self):\n n = 100\n df = pd.DataFrame()\n df[\"T\"] = np.random.binomial(40, 0.5, n)\n df[\"E\"] = np.random.binomial(1, 0.9, n)\n\n naf_no_weights = NelsonAalenFitter().fit(df[\"T\"], df[\"E\"])\n\n df_grouped = df.groupby([\"T\", \"E\"]).size().reset_index()\n naf_w_weights = NelsonAalenFitter().fit(df_grouped[\"T\"], df_grouped[\"E\"], weights=df_grouped[0])\n\n assert_frame_equal(naf_w_weights.cumulative_hazard_, naf_no_weights.cumulative_hazard_)\n\n\nclass TestBreslowFlemingHarringtonFitter:\n def test_BHF_fit_when_KMF_throws_an_error(self):\n bfh = BreslowFlemingHarringtonFitter()\n kmf = KaplanMeierFitter()\n\n observations = np.array(\n [1, 1, 2, 22, 30, 28, 32, 11, 14, 36, 31, 33, 33, 37, 35, 25, 31, 22, 26, 24, 35, 34, 30, 35, 40, 39, 2]\n )\n births = observations - 1\n\n with pytest.raises(StatError):\n kmf.fit(observations, entry=births)\n\n bfh.fit(observations, entry=births)\n\n\nclass TestParametricRegressionFitter:\n @pytest.fixture\n def rossi(self):\n rossi = load_rossi()\n rossi[\"_int\"] = 1.0\n return rossi\n\n def test_custom_weibull_model_gives_the_same_data_as_implemented_weibull_model(self, rossi):\n class CustomWeibull(ParametricRegressionFitter):\n _scipy_fit_method = \"SLSQP\"\n _scipy_fit_options = {\"ftol\": 1e-10, \"maxiter\": 200}\n _fitted_parameter_names = [\"lambda_\", \"rho_\"]\n\n def _cumulative_hazard(self, params, T, Xs):\n lambda_ = anp.exp(anp.dot(Xs[\"lambda_\"], params[\"lambda_\"]))\n rho_ = anp.exp(anp.dot(Xs[\"rho_\"], params[\"rho_\"]))\n\n return (T / lambda_) ** rho_\n\n def _log_hazard(self, params, T, Xs):\n lambda_params = params[\"lambda_\"]\n log_lambda_ = Xs[\"lambda_\"] @ lambda_params\n\n rho_params = params[\"rho_\"]\n log_rho_ = Xs[\"rho_\"] @ rho_params\n\n return log_rho_ - log_lambda_ + anp.expm1(log_rho_) * (anp.log(T) - log_lambda_)\n\n cb = CustomWeibull(penalizer=0.0)\n wf = WeibullAFTFitter(fit_intercept=False, penalizer=0.0)\n\n cb.fit(rossi, \"week\", \"arrest\", regressors={\"lambda_\": rossi.columns, \"rho_\": [\"_int\"]})\n wf.fit(rossi, \"week\", \"arrest\")\n\n assert_frame_equal(cb.summary.loc[\"lambda_\"], wf.summary.loc[\"lambda_\"], check_less_precise=1)\n npt.assert_allclose(cb.log_likelihood_, wf.log_likelihood_)\n\n cb.fit_left_censoring(rossi, \"week\", \"arrest\", regressors={\"lambda_\": rossi.columns, \"rho_\": [\"_int\"]})\n wf.fit_left_censoring(rossi, \"week\", \"arrest\")\n\n assert_frame_equal(cb.summary.loc[\"lambda_\"], wf.summary.loc[\"lambda_\"], check_less_precise=1)\n npt.assert_allclose(cb.log_likelihood_, wf.log_likelihood_)\n\n rossi = rossi.loc[rossi[\"arrest\"].astype(bool)]\n rossi[\"week_end\"] = rossi[\"week\"].copy()\n rossi = rossi.drop(\"arrest\", axis=1)\n cb.fit_interval_censoring(rossi, \"week\", \"week_end\", regressors={\"lambda_\": rossi.columns, \"rho_\": [\"_int\"]})\n wf.fit_interval_censoring(rossi, \"week\", \"week_end\")\n\n assert_frame_equal(cb.summary.loc[\"lambda_\"], wf.summary.loc[\"lambda_\"], check_less_precise=1)\n npt.assert_allclose(cb.log_likelihood_, wf.log_likelihood_, rtol=0.01)\n\n\nclass CureModelA(ParametricRegressionFitter):\n\n _fitted_parameter_names = [\"lambda_\", \"beta_\", \"rho_\"]\n\n def _cumulative_hazard(self, params, T, Xs):\n c = expit(anp.dot(Xs[\"beta_\"], params[\"beta_\"]))\n\n lambda_ = anp.exp(anp.dot(Xs[\"lambda_\"], params[\"lambda_\"]))\n rho_ = anp.exp(anp.dot(Xs[\"rho_\"], params[\"rho_\"]))\n cdf = 1 - anp.exp(-((T / lambda_) ** rho_))\n\n return -anp.log((1 - c) + c * (1 - cdf))\n\n\nclass CureModelB(ParametricRegressionFitter):\n # notice the c vs 1-c in the return statement\n _fitted_parameter_names = [\"lambda_\", \"beta_\", \"rho_\"]\n\n def _cumulative_hazard(self, params, T, Xs):\n c = expit(anp.dot(Xs[\"beta_\"], params[\"beta_\"]))\n\n lambda_ = anp.exp(anp.dot(Xs[\"lambda_\"], params[\"lambda_\"]))\n rho_ = anp.exp(anp.dot(Xs[\"rho_\"], params[\"rho_\"]))\n cdf = 1 - anp.exp(-((T / lambda_) ** rho_))\n\n return -anp.log(c + (1 - c) * (1 - cdf))\n\n\nclass CureModelC(CureModelB):\n # shuffle these parameter names - shouldn't change anything.\n _fitted_parameter_names = [\"lambda_\", \"rho_\", \"beta_\"]\n\n\nclass TestCustomRegressionModel:\n @pytest.fixture\n def rossi(self):\n rossi = load_rossi()\n rossi[\"intercept\"] = 1.0\n return rossi\n\n def test_reparameterization_flips_the_sign(self, rossi):\n\n regressors = {\"lambda_\": rossi.columns, \"rho_\": [\"intercept\"], \"beta_\": [\"intercept\", \"fin\"]}\n\n cmA = CureModelA()\n cmB = CureModelB()\n cmC = CureModelC()\n\n cmA.fit(rossi, \"week\", event_col=\"arrest\", regressors=regressors)\n cmB.fit(rossi, \"week\", event_col=\"arrest\", regressors=regressors)\n cmC.fit(\n rossi,\n \"week\",\n event_col=\"arrest\",\n regressors={\"lambda_\": rossi.columns, \"beta_\": [\"intercept\", \"fin\"], \"rho_\": [\"intercept\"]},\n )\n assert_frame_equal(cmA.summary.loc[\"lambda_\"], cmB.summary.loc[\"lambda_\"])\n assert_frame_equal(cmA.summary.loc[\"rho_\"], cmB.summary.loc[\"rho_\"])\n assert_frame_equal(cmC.summary, cmB.summary)\n assert_series_equal(cmA.params_.loc[\"beta_\"], -cmB.params_.loc[\"beta_\"])\n\n\nclass TestRegressionFitters:\n @pytest.fixture\n def rossi(self):\n rossi = load_rossi()\n return rossi\n\n @pytest.fixture\n def regression_models_sans_strata_model(self):\n return [\n CoxPHFitter(penalizer=1.0, baseline_estimation_method=\"breslow\"),\n CoxPHFitter(penalizer=1.0, baseline_estimation_method=\"spline\", n_baseline_knots=1),\n CoxPHFitter(penalizer=1.0, baseline_estimation_method=\"spline\", n_baseline_knots=2),\n AalenAdditiveFitter(coef_penalizer=1.0, smoothing_penalizer=1.0),\n WeibullAFTFitter(fit_intercept=True),\n LogNormalAFTFitter(fit_intercept=True),\n LogLogisticAFTFitter(fit_intercept=True),\n PiecewiseExponentialRegressionFitter(breakpoints=[25.0]),\n CustomRegressionModelTesting(penalizer=1.0),\n GeneralizedGammaRegressionFitter(penalizer=5.0),\n ]\n\n @pytest.fixture\n def regression_models(self, regression_models_sans_strata_model):\n regression_models_sans_strata_model.append(CoxPHFitter(strata=[\"race\", \"paro\", \"mar\", \"wexp\"]))\n return regression_models_sans_strata_model\n\n def test_score_method_returns_same_value_for_unpenalized_models(self, rossi):\n regression_models = [CoxPHFitter(), WeibullAFTFitter()]\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n npt.assert_almost_equal(\n fitter.score(rossi, scoring_method=\"log_likelihood\"), fitter.log_likelihood_ / rossi.shape[0]\n )\n npt.assert_almost_equal(fitter.score(rossi, scoring_method=\"concordance_index\"), fitter.concordance_index_)\n\n rossi[\"_intercept\"] = 1.0\n regression_models = [CustomRegressionModelTesting(), PiecewiseExponentialRegressionFitter(breakpoints=[25.0])]\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n npt.assert_almost_equal(\n fitter.score(rossi, scoring_method=\"log_likelihood\"), fitter.log_likelihood_ / rossi.shape[0]\n )\n npt.assert_almost_equal(fitter.score(rossi, scoring_method=\"concordance_index\"), fitter.concordance_index_)\n\n def test_print_summary(self, rossi, regression_models):\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n fitter.print_summary()\n\n def test_pickle_serialization(self, rossi, regression_models):\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n\n unpickled = pickle.loads(pickle.dumps(fitter))\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_dill_serialization(self, rossi, regression_models):\n from dill import dumps, loads\n\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n\n unpickled = loads(dumps(fitter))\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_joblib_serialization(self, rossi, regression_models):\n from joblib import dump, load\n\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n\n dump(fitter, \"filename.joblib\")\n unpickled = load(\"filename.joblib\")\n dif = (fitter.durations - unpickled.durations).sum()\n assert dif == 0\n\n def test_fit_will_accept_object_dtype_as_event_col(self, regression_models_sans_strata_model, rossi):\n # issue #638\n rossi[\"arrest\"] = rossi[\"arrest\"].astype(object)\n rossi[\"arrest\"].iloc[0] = None\n\n assert rossi[\"arrest\"].dtype == object\n rossi = rossi.dropna()\n assert rossi[\"arrest\"].dtype == object\n\n for fitter in regression_models_sans_strata_model:\n fitter.fit(rossi, \"week\", \"arrest\")\n\n def test_fit_raise_an_error_if_nan_in_event_col(self, regression_models_sans_strata_model):\n df = pd.DataFrame({\"T\": np.arange(1, 11), \"E\": [True] * 9 + [None]})\n\n for fitter in regression_models_sans_strata_model:\n with pytest.raises(TypeError, match=\"NaNs were detected in the dataset\"):\n fitter.fit(df, \"T\", \"E\")\n\n def test_fit_methods_require_duration_col(self, rossi, regression_models):\n for fitter in regression_models:\n with pytest.raises(TypeError):\n fitter.fit(rossi)\n\n def test_predict_methods_in_regression_return_same_types(self, regression_models, rossi):\n\n fitted_regression_models = list(\n map(lambda model: model.fit(rossi, duration_col=\"week\", event_col=\"arrest\"), regression_models)\n )\n\n for predict_method in [\n \"predict_percentile\",\n \"predict_median\",\n \"predict_expectation\",\n \"predict_survival_function\",\n \"predict_cumulative_hazard\",\n ]:\n for fitter1, fitter2 in combinations(fitted_regression_models, 2):\n assert isinstance(\n getattr(fitter1, predict_method)(rossi), type(getattr(fitter2, predict_method)(rossi))\n )\n\n def test_predict_methods_in_regression_return_same_index(self, regression_models, rossi):\n\n fitted_regression_models = list(\n map(lambda model: model.fit(rossi, duration_col=\"week\", event_col=\"arrest\"), regression_models)\n )\n\n X = rossi.loc[:10]\n\n for predict_method in [\n \"predict_percentile\",\n \"predict_median\",\n \"predict_expectation\",\n \"predict_survival_function\",\n \"predict_cumulative_hazard\",\n ]:\n for fitter1, fitter2 in combinations(fitted_regression_models, 2):\n assert_index_equal(getattr(fitter1, predict_method)(X).index, getattr(fitter2, predict_method)(X).index)\n\n def test_duration_vector_can_be_normalized_up_to_an_intercept(self, regression_models, rossi):\n t = rossi[\"week\"]\n normalized_rossi = rossi.copy()\n normalized_rossi[\"week\"] = (normalized_rossi[\"week\"]) / t.std()\n\n for fitter in regression_models:\n if (\n isinstance(fitter, PiecewiseExponentialRegressionFitter)\n or isinstance(fitter, CustomRegressionModelTesting)\n or isinstance(fitter, GeneralizedGammaRegressionFitter)\n ):\n continue\n\n # we drop indexes since aaf will have a different \"time\" index.\n try:\n hazards = fitter.fit(rossi, duration_col=\"week\", event_col=\"arrest\").hazards_\n hazards_norm = fitter.fit(normalized_rossi, duration_col=\"week\", event_col=\"arrest\").hazards_\n except AttributeError:\n hazards = fitter.fit(rossi, duration_col=\"week\", event_col=\"arrest\").params_\n hazards_norm = fitter.fit(normalized_rossi, duration_col=\"week\", event_col=\"arrest\").params_\n\n if isinstance(hazards, pd.DataFrame):\n assert_frame_equal(hazards.reset_index(drop=True), hazards_norm.reset_index(drop=True))\n else:\n if isinstance(hazards.index, pd.MultiIndex):\n assert_series_equal(\n hazards.drop(\"_intercept\", axis=0, level=1),\n hazards_norm.drop(\"_intercept\", axis=0, level=1),\n check_less_precise=2,\n )\n else:\n assert_series_equal(hazards, hazards_norm, check_less_precise=2)\n\n def test_prediction_methods_respect_index(self, regression_models, rossi):\n X = rossi.iloc[:4].sort_index(ascending=False)\n expected_index = pd.Index(np.array([3, 2, 1, 0]))\n\n for fitter in regression_models:\n fitter.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n npt.assert_array_equal(fitter.predict_percentile(X).index, expected_index)\n npt.assert_array_equal(fitter.predict_expectation(X).index, expected_index)\n try:\n npt.assert_array_equal(fitter.predict_partial_hazard(X).index, expected_index)\n except AttributeError:\n pass\n\n def test_error_is_raised_if_using_non_numeric_data_in_fit(self):\n df = pd.DataFrame.from_dict(\n {\n \"t\": [1.0, 5.0, 3.0, 4.0],\n \"bool_\": [True, True, False, True],\n \"int_\": [1, -1, 0, 2],\n \"uint8_\": pd.Series([1, 0, 2, 1], dtype=\"uint8\"),\n \"string_\": [\"test\", \"a\", \"2.5\", \"\"],\n \"float_\": [1.2, -0.5, 0.0, 2.2],\n \"categorya_\": pd.Series([1, 2, 3, 1], dtype=\"category\"),\n \"categoryb_\": pd.Series([\"a\", \"b\", \"a\", \"b\"], dtype=\"category\"),\n }\n )\n\n for fitter in [CoxPHFitter(), WeibullAFTFitter()]:\n for subset in [[\"t\", \"categoryb_\"], [\"t\", \"string_\"]]:\n with pytest.raises(ValueError):\n fitter.fit(df[subset], duration_col=\"t\")\n\n for subset in [[\"t\", \"uint8_\"]]:\n fitter.fit(df[subset], duration_col=\"t\")\n\n @pytest.mark.xfail\n def test_regression_model_has_concordance_index_(self, regression_models, rossi):\n\n for fitter in regression_models:\n assert not hasattr(fitter, \"concordance_index_\")\n fitter.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n assert hasattr(fitter, \"concordance_index_\")\n\n @pytest.mark.xfail\n def test_regression_model_updates_concordance_index_(self, regression_models, rossi):\n\n for fitter in regression_models:\n assert not hasattr(fitter, \"concordance_index_\")\n fitter.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n assert hasattr(fitter, \"concordance_index_\")\n first_score_ = fitter.concordance_index_\n\n fitter.fit(rossi.head(50), duration_col=\"week\", event_col=\"arrest\")\n assert first_score_ != fitter.concordance_index_\n\n def test_error_is_thrown_if_there_is_nans_in_the_duration_col(self, regression_models, rossi):\n rossi.loc[3, \"week\"] = None\n for fitter in regression_models:\n with pytest.raises(TypeError):\n fitter.fit(rossi, \"week\", \"arrest\")\n\n def test_error_is_thrown_if_there_is_nans_in_the_event_col(self, regression_models, rossi):\n rossi.loc[3, \"arrest\"] = None\n for fitter in regression_models:\n with pytest.raises(TypeError):\n fitter.fit(rossi, \"week\", \"arrest\")\n\n def test_all_models_have_censoring_type(self, regression_models, rossi):\n for fitter in regression_models:\n fitter.fit(rossi, \"week\", \"arrest\")\n assert hasattr(fitter, \"_censoring_type\")\n\n def test_regression_models_will_not_fail_when_provided_int_times_on_prediction(\n self, regression_models_sans_strata_model, rossi\n ):\n # reported an issue\n for fitter in regression_models_sans_strata_model:\n df = rossi.copy()\n\n fitter.fit(df, duration_col=\"week\", event_col=\"arrest\")\n\n # select only censored items\n df = df[df[\"arrest\"] == 0]\n\n func = lambda row: fitter.predict_survival_function(row, times=row[\"week\"])\n df.apply(func, axis=1)\n\n assert True\n\n\nclass TestPiecewiseExponentialRegressionFitter:\n def test_print_summary(self):\n df = load_rossi()\n pew = PiecewiseExponentialRegressionFitter(breakpoints=[25, 40]).fit(df, \"week\", \"arrest\")\n pew.print_summary()\n\n def test_inference(self):\n\n N, d = 80000, 2\n\n # some numbers take from http://statwonk.com/parametric-survival.html\n breakpoints = (1, 31, 34, 62, 65)\n\n betas = np.array(\n [\n [1.0, -0.2, np.log(15)],\n [5.0, -0.4, np.log(333)],\n [9.0, -0.6, np.log(18)],\n [5.0, -0.8, np.log(500)],\n [2.0, -1.0, np.log(20)],\n [1.0, -1.2, np.log(500)],\n ]\n )\n\n X = 0.1 * np.random.exponential(size=(N, d))\n X = np.c_[X, np.ones(N)]\n\n T = np.empty(N)\n for i in range(N):\n lambdas = np.exp(-betas.dot(X[i, :]))\n T[i] = piecewise_exponential_survival_data(1, breakpoints, lambdas)[0]\n\n T_censor = np.minimum(\n T.mean() * np.random.exponential(size=N), 110\n ) # 110 is the end of observation, eg. current time.\n\n df = pd.DataFrame(X[:, :-1], columns=[\"var1\", \"var2\"])\n df[\"_intercept\"] = 1.0\n\n df[\"T\"] = np.round(np.maximum(np.minimum(T, T_censor), 0.1), 1)\n df[\"E\"] = T <= T_censor\n\n pew = PiecewiseExponentialRegressionFitter(breakpoints=breakpoints, penalizer=0.00001).fit(df, \"T\", \"E\")\n\n def assert_allclose(variable_name_tuple, actual):\n npt.assert_allclose(\n pew.summary.loc[variable_name_tuple, \"coef\"],\n actual,\n rtol=1,\n atol=2 * pew.summary.loc[variable_name_tuple, \"se(coef)\"],\n )\n\n assert_allclose((\"lambda_0_\", \"var1\"), betas[0][0])\n assert_allclose((\"lambda_0_\", \"var2\"), betas[0][1])\n assert_allclose((\"lambda_0_\", \"_intercept\"), betas[0][2])\n\n assert_allclose((\"lambda_1_\", \"var1\"), betas[1][0])\n assert_allclose((\"lambda_1_\", \"var2\"), betas[1][1])\n assert_allclose((\"lambda_1_\", \"_intercept\"), betas[1][2])\n\n assert_allclose((\"lambda_5_\", \"var1\"), betas[-1][0])\n assert_allclose((\"lambda_5_\", \"var2\"), betas[-1][1])\n assert_allclose((\"lambda_5_\", \"_intercept\"), betas[-1][2])\n\n\nclass TestAFTFitters:\n @pytest.fixture\n def models(self):\n return [WeibullAFTFitter(), LogNormalAFTFitter(), LogLogisticAFTFitter()]\n\n def test_predict_median_takes_dataframe_with_bools(self):\n\n df = pd.DataFrame(\n [\n {\"dep_y_obs\": 1.0, \"dep_y_cens\": False, \"idp_x1_obs\": 5.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 3.0, \"dep_y_cens\": True, \"idp_x1_obs\": 3.0, \"idp_x1_cens\": False},\n {\"dep_y_obs\": 2.0, \"dep_y_cens\": True, \"idp_x1_obs\": 2.0, \"idp_x1_cens\": False},\n {\"dep_y_obs\": 2.0, \"dep_y_cens\": False, \"idp_x1_obs\": 6.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 2.5, \"dep_y_cens\": True, \"idp_x1_obs\": 7.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 2.7, \"dep_y_cens\": True, \"idp_x1_obs\": 8.0, \"idp_x1_cens\": True},\n ]\n )\n\n wf = WeibullAFTFitter()\n wf.fit_left_censoring(df, \"dep_y_obs\", \"dep_y_cens\")\n wf.predict_median(df)\n\n def test_predict_median_accepts_series(self, rossi):\n df = pd.DataFrame(\n [\n {\"dep_y_obs\": 1.0, \"dep_y_cens\": False, \"idp_x1_obs\": 5.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 3.0, \"dep_y_cens\": True, \"idp_x1_obs\": 3.0, \"idp_x1_cens\": False},\n {\"dep_y_obs\": 2.0, \"dep_y_cens\": True, \"idp_x1_obs\": 2.0, \"idp_x1_cens\": False},\n {\"dep_y_obs\": 2.0, \"dep_y_cens\": False, \"idp_x1_obs\": 6.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 2.5, \"dep_y_cens\": True, \"idp_x1_obs\": 7.0, \"idp_x1_cens\": True},\n {\"dep_y_obs\": 2.7, \"dep_y_cens\": True, \"idp_x1_obs\": 8.0, \"idp_x1_cens\": True},\n ]\n )\n\n wf = WeibullAFTFitter()\n wf.fit_left_censoring(df, \"dep_y_obs\", \"dep_y_cens\")\n wf.predict_median(df.loc[1])\n\n def test_heterogenous_initial_point(self, rossi):\n aft = WeibullAFTFitter()\n aft.fit(rossi, \"week\", \"arrest\", initial_point={\"lambda_\": np.zeros(8), \"rho_\": np.zeros(1)})\n with pytest.raises(ValueError):\n aft.fit(rossi, \"week\", \"arrest\", initial_point={\"lambda_\": np.zeros(7), \"rho_\": np.zeros(1)})\n\n aft.fit(rossi, \"week\", \"arrest\", initial_point=np.zeros(9))\n with pytest.raises(ValueError):\n aft.fit(rossi, \"week\", \"arrest\", initial_point=np.zeros(10))\n\n def test_percentile_gives_proper_result_compared_to_survival_function(self, rossi, models):\n for model in models:\n model.fit(rossi, \"week\", \"arrest\")\n times = np.linspace(1, 2000, 5000)\n p = 0.1\n subject = rossi.loc[[400]]\n assert (\n abs(\n model.predict_percentile(subject, p=p)\n - qth_survival_time(p, model.predict_survival_function(subject, times=times))\n ).loc[400]\n < 0.5\n )\n assert (\n abs(\n model.predict_percentile(subject, p=p, conditional_after=[50])\n - qth_survival_time(\n p, model.predict_survival_function(subject, times=times, conditional_after=[50])\n )\n ).loc[400]\n < 0.5\n )\n\n def test_fit_intercept_can_be_false_and_not_provided(self, rossi):\n # nonsensical data\n interval_rossi = rossi.copy()\n interval_rossi[\"start\"] = 0\n interval_rossi[\"end\"] = rossi[\"week\"]\n interval_rossi[\"arrest\"] = False\n interval_rossi = interval_rossi.drop(\"week\", axis=1)\n\n # nonsensical data\n left_rossi = rossi.copy()\n left_rossi[\"week\"] = 1 / rossi[\"week\"] + 1\n\n for fitter in [WeibullAFTFitter(fit_intercept=False)]:\n fitter.fit_right_censoring(rossi, \"week\", \"arrest\")\n fitter.fit_left_censoring(left_rossi, \"week\", \"arrest\")\n fitter.fit_interval_censoring(interval_rossi, \"start\", \"end\", \"arrest\")\n\n def test_fit_intercept_can_be_false_but_provided(self, rossi):\n rossi[\"intercept\"] = 1.0\n for fitter in [\n WeibullAFTFitter(fit_intercept=False),\n LogNormalAFTFitter(fit_intercept=False),\n LogLogisticAFTFitter(fit_intercept=False),\n ]:\n fitter.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi[[\"intercept\"]])\n\n def test_warning_is_present_if_entry_greater_than_duration(self, rossi, models):\n rossi[\"start\"] = 10\n for fitter in models:\n with pytest.raises(ValueError, match=\"entry > duration\"):\n fitter.fit(rossi, \"week\", \"arrest\", entry_col=\"start\")\n\n def test_weights_col_and_start_col_is_not_included_in_the_output(self, models, rossi):\n rossi[\"weights\"] = 2.0\n rossi[\"start\"] = 0.0\n\n for fitter in models:\n fitter.fit(rossi, \"week\", \"arrest\", weights_col=\"weights\", entry_col=\"start\", ancillary_df=False)\n assert \"weights\" not in fitter.params_.index.get_level_values(1)\n assert \"start\" not in fitter.params_.index.get_level_values(1)\n\n fitter.fit(rossi, \"week\", \"arrest\", weights_col=\"weights\", entry_col=\"start\", ancillary_df=True)\n assert \"weights\" not in fitter.params_.index.get_level_values(1)\n assert \"start\" not in fitter.params_.index.get_level_values(1)\n\n fitter.fit(rossi, \"week\", \"arrest\", weights_col=\"weights\", entry_col=\"start\", ancillary_df=rossi)\n assert \"weights\" not in fitter.params_.index.get_level_values(1)\n assert \"start\" not in fitter.params_.index.get_level_values(1)\n\n def test_accept_initial_params(self, rossi, models):\n for fitter in models:\n fitter.fit(rossi, \"week\", \"arrest\", initial_point=0.01 * np.ones(9))\n\n def test_log_likelihood_is_maximized_for_data_generating_model(self):\n\n N = 50000\n p = 0.5\n bX = np.log(0.5)\n bZ = np.log(4)\n\n Z = np.random.binomial(1, p, size=N)\n X = np.random.binomial(1, 0.5, size=N)\n\n # weibullAFT should have the best fit -> largest ll\n W = weibull_min.rvs(1, scale=1, loc=0, size=N)\n\n Y = bX * X + bZ * Z + np.log(W)\n T = np.exp(Y)\n\n df = pd.DataFrame({\"T\": T, \"x\": X, \"z\": Z})\n\n wf = WeibullAFTFitter().fit(df, \"T\")\n lnf = LogNormalAFTFitter().fit(df, \"T\")\n llf = LogLogisticAFTFitter().fit(df, \"T\")\n\n assert wf.log_likelihood_ > lnf.log_likelihood_\n assert wf.log_likelihood_ > llf.log_likelihood_\n\n # lognormal should have the best fit -> largest ll\n W = norm.rvs(scale=1, loc=0, size=N)\n\n Y = bX * X + bZ * Z + W\n T = np.exp(Y)\n\n df = pd.DataFrame({\"T\": T, \"x\": X, \"z\": Z})\n\n wf = WeibullAFTFitter().fit(df, \"T\")\n lnf = LogNormalAFTFitter().fit(df, \"T\")\n llf = LogLogisticAFTFitter().fit(df, \"T\")\n\n assert lnf.log_likelihood_ > wf.log_likelihood_\n assert lnf.log_likelihood_ > llf.log_likelihood_\n\n # loglogistic should have the best fit -> largest ll\n W = logistic.rvs(scale=1, loc=0, size=N)\n\n Y = bX * X + bZ * Z + W\n T = np.exp(Y)\n\n df = pd.DataFrame({\"T\": T, \"x\": X, \"z\": Z})\n\n wf = WeibullAFTFitter().fit(df, \"T\")\n lnf = LogNormalAFTFitter().fit(df, \"T\")\n llf = LogLogisticAFTFitter().fit(df, \"T\")\n\n assert llf.log_likelihood_ > wf.log_likelihood_\n assert llf.log_likelihood_ > lnf.log_likelihood_\n\n def test_aft_median_behaviour(self, models, rossi):\n for aft in models:\n aft.fit(rossi, \"week\", \"arrest\")\n\n subject = aft._norm_mean.to_frame().T\n\n baseline_survival = aft.predict_median(subject).squeeze()\n\n subject.loc[0, \"prio\"] += 1\n accelerated_survival = aft.predict_median(subject).squeeze()\n factor = aft.summary.loc[(aft._primary_parameter_name, \"prio\"), \"exp(coef)\"]\n npt.assert_allclose(accelerated_survival, baseline_survival * factor)\n\n def test_aft_mean_behaviour(self, models, rossi):\n for aft in models:\n aft.fit(rossi, \"week\", \"arrest\")\n\n subject = aft._norm_mean.to_frame().T\n\n baseline_survival = aft.predict_expectation(subject).squeeze()\n\n subject.loc[0, \"prio\"] += 1\n accelerated_survival = aft.predict_expectation(subject).squeeze()\n factor = aft.summary.loc[(aft._primary_parameter_name, \"prio\"), \"exp(coef)\"]\n npt.assert_allclose(accelerated_survival, baseline_survival * factor)\n\n def test_aft_models_can_do_left_censoring(self, models):\n N = 100\n T_actual = 0.5 * np.random.weibull(5, size=N)\n\n MIN_0 = np.percentile(T_actual, 5)\n MIN_1 = np.percentile(T_actual, 10)\n MIN_2 = np.percentile(T_actual, 30)\n MIN_3 = np.percentile(T_actual, 50)\n\n T = T_actual.copy()\n ix = np.random.randint(4, size=N)\n\n T = np.where(ix == 0, np.maximum(T, MIN_0), T)\n T = np.where(ix == 1, np.maximum(T, MIN_1), T)\n T = np.where(ix == 2, np.maximum(T, MIN_2), T)\n T = np.where(ix == 3, np.maximum(T, MIN_3), T)\n E = T_actual == T\n df = pd.DataFrame({\"T\": T, \"E\": E})\n\n for model in models:\n model.fit_left_censoring(df, \"T\", \"E\")\n model.print_summary()\n\n def test_model_ancillary_parameter_works_as_expected(self, rossi):\n aft = WeibullAFTFitter(model_ancillary=True)\n aft.fit(rossi, \"week\", \"arrest\")\n assert aft.summary.loc[\"rho_\"].shape[0] == 8\n\n assert aft.predict_median(rossi).shape[0] == rossi.shape[0]\n\n\nclass TestLogNormalAFTFitter:\n @pytest.fixture\n def aft(self):\n return LogNormalAFTFitter()\n\n def test_coefs_with_fitted_ancillary_params(self, aft, rossi):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio + sdlog(prio) + sdlog(age), data=df, dist='lnorm')\n r$coef\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi[[\"prio\", \"age\"]])\n\n npt.assert_allclose(aft.summary.loc[(\"mu_\", \"paro\"), \"coef\"], 0.09698076, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"mu_\", \"prio\"), \"coef\"], -0.10216665, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"mu_\", \"_intercept\"), \"coef\"], 2.63459946, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"sigma_\", \"_intercept\"), \"coef\"], -0.47257736, rtol=1e-1)\n npt.assert_allclose(aft.summary.loc[(\"sigma_\", \"prio\"), \"coef\"], -0.04741327, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"sigma_\", \"age\"), \"coef\"], 0.03769193, rtol=1e-1)\n\n\nclass TestLogLogisticAFTFitter:\n @pytest.fixture\n def aft(self):\n return LogLogisticAFTFitter()\n\n def test_coefs_with_fitted_ancillary_params(self, aft, rossi):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio + shape(prio) + shape(age), data=df, dist='llogis')\n r$coef\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi[[\"prio\", \"age\"]])\n\n npt.assert_allclose(aft.summary.loc[(\"alpha_\", \"paro\"), \"coef\"], 0.07512732, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"alpha_\", \"prio\"), \"coef\"], -0.08837948, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"alpha_\", \"_intercept\"), \"coef\"], 2.75013722, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"beta_\", \"_intercept\"), \"coef\"], 1.22928200, rtol=1e-1)\n npt.assert_allclose(aft.summary.loc[(\"beta_\", \"prio\"), \"coef\"], 0.02707661, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"beta_\", \"age\"), \"coef\"], -0.03853006, rtol=1e-1)\n\n def test_proportional_odds(self, aft, rossi):\n\n aft.fit(rossi, \"week\", \"arrest\")\n\n subject = aft._norm_mean.to_frame().T\n\n baseline_survival = aft.predict_survival_function(subject).squeeze()\n\n subject.loc[0, \"prio\"] += 1\n accelerated_survival = aft.predict_survival_function(subject).squeeze()\n\n factor = aft.summary.loc[(\"alpha_\", \"prio\"), \"exp(coef)\"]\n expon = aft.summary.loc[(\"beta_\", \"_intercept\"), \"exp(coef)\"]\n npt.assert_allclose(\n baseline_survival / (1 - baseline_survival) * factor ** expon,\n accelerated_survival / (1 - accelerated_survival),\n )\n\n\nclass TestWeibullAFTFitter:\n @pytest.fixture\n def aft(self):\n return WeibullAFTFitter()\n\n def test_fitted_coefs_with_eha_when_left_truncated(self, aft, rossi):\n \"\"\"\n library(eha)\n df = read.csv(\"~/code/lifelines/lifelines/datasets/rossi.csv\")\n df['start'] = 0\n df[df['week'] > 10, 'start'] = 2\n r = aftreg(Surv(start, week, arrest) ~ fin + race + wexp + mar + paro + prio + age, data=df)\n summary(r)\n \"\"\"\n\n rossi[\"start\"] = 0\n rossi.loc[rossi[\"week\"] > 10, \"start\"] = 2\n\n aft.fit(rossi, \"week\", \"arrest\", entry_col=\"start\")\n\n # it's the negative in EHA\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"coef\"], 0.28865175, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"age\"), \"coef\"], 0.04323855, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"coef\"], -0.23883560, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"coef\"], 0.11339258, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"coef\"], 0.33081212, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"coef\"], 0.06303764, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"coef\"], -0.06954257, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], 3.98650094, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], 0.27564733, rtol=1e-4)\n\n def test_fitted_se_with_eha_when_left_truncated(self, aft, rossi):\n \"\"\"\n library(eha)\n df = read.csv(\"~/code/lifelines/lifelines/datasets/rossi.csv\")\n df['start'] = 0\n df[df['week'] > 10, 'start'] = 2\n r = aftreg(Surv(start, week, arrest) ~ fin + race + wexp + mar + paro + prio + age, data=df)\n summary(r)\n \"\"\"\n\n rossi[\"start\"] = 0\n rossi.loc[rossi[\"week\"] > 10, \"start\"] = 2\n\n aft.fit(rossi, \"week\", \"arrest\", entry_col=\"start\")\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"se(coef)\"], 0.148, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"age\"), \"se(coef)\"], 0.017, rtol=1e-1)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"se(coef)\"], 0.235, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"se(coef)\"], 0.162, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"se(coef)\"], 0.292, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"se(coef)\"], 0.149, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"se(coef)\"], 0.022, rtol=1e-1)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"se(coef)\"], 0.446, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"se(coef)\"], 0.104, rtol=1e-2)\n\n def test_fitted_coefs_match_with_flexsurv_has(self, aft, rossi):\n \"\"\"\n library('flexsurv')\n df = read.csv(\"~/code/lifelines/lifelines/datasets/rossi.csv\")\n r = flexsurvreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio, data=df, dist='weibull')\n r$coef\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\")\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"coef\"], 0.27230591, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"age\"), \"coef\"], 0.04072758, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"coef\"], -0.22480808, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"coef\"], 0.10664712, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"coef\"], 0.31095531, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"coef\"], 0.05883352, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"coef\"], -0.06580211, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], 3.98968559, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], 0.33911900, rtol=1e-4)\n\n def test_fitted_se_match_with_flexsurv_has(self, aft, rossi):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio, data=df, dist='weibull')\n diag(sqrt(vcov(r)))\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\")\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"se(coef)\"], 0.13796834, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"age\"), \"se(coef)\"], 0.01599442, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"se(coef)\"], 0.22015347, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"se(coef)\"], 0.15154541, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"se(coef)\"], 0.27326405, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"se(coef)\"], 0.13963680, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"se(coef)\"], 0.02093981, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"se(coef)\"], 0.41904636, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"se(coef)\"], 0.08900064, rtol=1e-3)\n\n def test_fitted_log_likelihood_match_with_flexsurv_has(self, aft, rossi):\n # survreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio, data=df, dist='weibull')\n aft.fit(rossi, \"week\", \"arrest\")\n npt.assert_allclose(aft.log_likelihood_, -679.9166)\n\n def test_fitted_log_likelihood_ratio_test_match_with_flexsurv_has(self, aft, rossi):\n # survreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio, data=df, dist='weibull')\n aft.fit(rossi, \"week\", \"arrest\")\n npt.assert_allclose(aft.log_likelihood_ratio_test().test_statistic, 33.42, rtol=0.01)\n\n def test_coefs_with_fitted_ancillary_params(self, aft, rossi):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio + shape(prio) + shape(age), data=df, dist='weibull')\n r$coef\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi[[\"prio\", \"age\"]])\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"coef\"], 0.088364095, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"coef\"], -0.074052141, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], 2.756355922, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], 1.163429253, rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"prio\"), \"coef\"], 0.008982523, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"age\"), \"coef\"], -0.037069994, rtol=1e-4)\n\n def test_ancillary_True_is_same_as_full_df(self, rossi):\n\n aft1 = WeibullAFTFitter().fit(rossi, \"week\", \"arrest\", ancillary_df=True)\n aft2 = WeibullAFTFitter().fit(rossi, \"week\", \"arrest\", ancillary_df=rossi)\n\n assert_frame_equal(aft1.summary, aft2.summary, check_like=True)\n\n def test_ancillary_None_is_same_as_False(self, rossi):\n\n aft1 = WeibullAFTFitter().fit(rossi, \"week\", \"arrest\", ancillary_df=None)\n aft2 = WeibullAFTFitter().fit(rossi, \"week\", \"arrest\", ancillary_df=False)\n\n assert_frame_equal(aft1.summary, aft2.summary)\n\n def test_fit_intercept(self, rossi):\n aft_without_intercept = WeibullAFTFitter(fit_intercept=True)\n aft_without_intercept.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi)\n\n rossi[\"_intercept\"] = 1.0\n aft_with_intercept = WeibullAFTFitter(fit_intercept=False)\n aft_with_intercept.fit(rossi, \"week\", \"arrest\", ancillary_df=rossi)\n\n assert_frame_equal(aft_with_intercept.summary, aft_without_intercept.summary)\n\n def test_passing_in_additional_ancillary_df_in_predict_methods_if_fitted_with_one(self, rossi):\n\n aft = WeibullAFTFitter().fit(rossi, \"week\", \"arrest\", ancillary_df=True)\n aft.predict_median(rossi, ancillary_df=rossi)\n aft.predict_percentile(rossi, ancillary_df=rossi)\n aft.predict_cumulative_hazard(rossi, ancillary_df=rossi)\n aft.predict_hazard(rossi, ancillary_df=rossi)\n aft.predict_survival_function(rossi, ancillary_df=rossi)\n\n aft.predict_median(rossi)\n aft.predict_percentile(rossi)\n aft.predict_cumulative_hazard(rossi)\n aft.predict_hazard(rossi)\n aft.predict_survival_function(rossi)\n\n def test_passing_in_additional_ancillary_df_in_predict_methods_okay_if_not_fitted_with_one(self, rossi, aft):\n\n aft.fit(rossi, \"week\", \"arrest\", ancillary_df=False)\n aft.predict_median(rossi, ancillary_df=rossi)\n aft.predict_percentile(rossi, ancillary_df=rossi)\n aft.predict_hazard(rossi, ancillary_df=rossi)\n aft.predict_survival_function(rossi, ancillary_df=rossi)\n\n def test_robust_errors_against_R(self, rossi, aft):\n # r = survreg(Surv(week, arrest) ~ fin + race + wexp + mar + paro + prio + age, data=df, dist='weibull', robust=TRUE)\n\n aft.fit(rossi, \"week\", \"arrest\", robust=True)\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"se(coef)\"], 0.1423, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"age\"), \"se(coef)\"], 0.0174, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"se(coef)\"], 0.2107, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"se(coef)\"], 0.1577, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"se(coef)\"], 0.2748, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"se(coef)\"], 0.1429, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"se(coef)\"], 0.0208, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"se(coef)\"], 0.4631, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"se(coef)\"], 0.0870, rtol=1e-3)\n\n def test_robust_errors_against_R_with_weights(self, rossi, aft):\n # r = survreg(Surv(week, arrest) ~ fin + race + wexp + mar + paro + prio, data=df, dist='weibull', robust=TRUE, weights=age)\n\n aft.fit(rossi, \"week\", \"arrest\", robust=True, weights_col=\"age\")\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"se(coef)\"], 0.006581, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"se(coef)\"], 0.010367, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"se(coef)\"], 0.007106, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"se(coef)\"], 0.012179, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"se(coef)\"], 0.006427, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"se(coef)\"], 0.000964, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"se(coef)\"], 0.013165, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"se(coef)\"], 0.003630, rtol=1e-3)\n\n def test_inference_is_the_same_if_using_right_censorship_or_interval_censorship_with_inf_endpoints(\n self, rossi, aft\n ):\n df = rossi.copy()\n df[\"start\"] = df[\"week\"]\n df[\"stop\"] = np.where(df[\"arrest\"], df[\"start\"], np.inf)\n df = df.drop(\"week\", axis=1)\n\n aft.fit_interval_censoring(df, lower_bound_col=\"start\", upper_bound_col=\"stop\", event_col=\"arrest\")\n interval_censored_results = aft.summary.copy()\n\n aft.fit_right_censoring(rossi, \"week\", event_col=\"arrest\")\n right_censored_results = aft.summary.copy()\n\n assert_frame_equal(interval_censored_results, right_censored_results, check_less_precise=3)\n\n def test_weibull_interval_censoring_inference_on_known_R_output(self, aft):\n \"\"\"\n library(flexsurv)\n\n flexsurvreg(Surv(left, right, type='interval2') ~ gender, data=IR_diabetes, dist=\"weibull\")\n ic_par(Surv(left, right, type = \"interval2\") ~ gender, data = IR_diabetes, model = \"aft\", dist = \"weibull\")\n\n \"\"\"\n df = load_diabetes()\n df[\"gender\"] = df[\"gender\"] == \"male\"\n df[\"E\"] = df[\"left\"] == df[\"right\"]\n\n aft.fit_interval_censoring(df, \"left\", \"right\", \"E\")\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"gender\"), \"coef\"], 0.04576, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], np.log(18.31971), rtol=1e-4)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], np.log(2.82628), rtol=1e-4)\n\n npt.assert_allclose(aft.log_likelihood_, -2027.196, rtol=1e-3)\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"gender\"), \"se(coef)\"], 0.02823, rtol=1e-1)\n\n with pytest.raises(AssertionError):\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"se(coef)\"], 0.42273, rtol=1e-1)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"se(coef)\"], 0.08356, rtol=1e-1)\n\n aft.fit_interval_censoring(df, \"left\", \"right\", \"E\", ancillary_df=True)\n\n npt.assert_allclose(aft.log_likelihood_, -2025.813, rtol=1e-3)\n\n with pytest.raises(AssertionError):\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"gender\"), \"coef\"], 0.1670, rtol=1e-4)\n\n def test_aft_weibull_with_weights(self, rossi, aft):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + race + wexp + mar + paro + prio, data=df, dist='weibull', weights=age)\n r$coef\n \"\"\"\n aft.fit(rossi, \"week\", \"arrest\", weights_col=\"age\")\n\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"fin\"), \"coef\"], 0.3842902, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"race\"), \"coef\"], -0.24538246, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"wexp\"), \"coef\"], 0.31146214, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"mar\"), \"coef\"], 0.47322543, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"paro\"), \"coef\"], -0.02885281, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"prio\"), \"coef\"], -0.06162843, rtol=1e-3)\n npt.assert_allclose(aft.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], 4.93041526, rtol=1e-2)\n npt.assert_allclose(aft.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], 0.28612353, rtol=1e-4)\n\n def test_aft_weibull_with_ancillary_model_and_with_weights(self, rossi):\n \"\"\"\n library('flexsurv')\n r = flexsurvreg(Surv(week, arrest) ~ fin + race + wexp + mar + paro + prio + shape(prio), data=df, dist='weibull', weights=age)\n r$coef\n \"\"\"\n wf = WeibullAFTFitter(penalizer=0).fit(rossi, \"week\", \"arrest\", weights_col=\"age\", ancillary_df=rossi[[\"prio\"]])\n\n npt.assert_allclose(wf.summary.loc[(\"lambda_\", \"fin\"), \"coef\"], 0.39347, rtol=1e-3)\n npt.assert_allclose(wf.summary.loc[(\"lambda_\", \"_intercept\"), \"coef\"], np.log(140.55112), rtol=1e-2)\n npt.assert_allclose(wf.summary.loc[(\"rho_\", \"_intercept\"), \"coef\"], np.log(1.25981), rtol=1e-4)\n npt.assert_allclose(wf.summary.loc[(\"rho_\", \"prio\"), \"coef\"], 0.01485, rtol=1e-4)\n\n def test_aft_weibull_can_do_interval_prediction(self, aft):\n # https://github.com/CamDavidsonPilon/lifelines/issues/839\n df = load_diabetes()\n\n aft = WeibullAFTFitter()\n df[\"gender\"] = df[\"gender\"] == \"male\"\n df[\"E\"] = df[\"left\"] == df[\"right\"]\n\n aft.fit_interval_censoring(df, \"left\", \"right\", \"E\")\n aft.predict_survival_function(df)\n aft.print_summary()\n\n aft = WeibullAFTFitter()\n df = df.drop(\"E\", axis=1)\n\n aft.fit_interval_censoring(df, \"left\", \"right\")\n aft.predict_survival_function(df)\n aft.print_summary()\n\n\nclass TestCoxPHFitter:\n @pytest.fixture\n def cph(self):\n return CoxPHFitter()\n\n @pytest.fixture\n def cph_spline(self):\n return CoxPHFitter(baseline_estimation_method=\"spline\")\n\n def test_penalty_term_is_used_in_log_likelihood_value(self, rossi):\n assert (\n CoxPHFitter(penalizer=2).fit(rossi, \"week\", \"arrest\").log_likelihood_\n < CoxPHFitter(penalizer=1).fit(rossi, \"week\", \"arrest\").log_likelihood_\n < CoxPHFitter(penalizer=0).fit(rossi, \"week\", \"arrest\").log_likelihood_\n )\n assert (\n CoxPHFitter(penalizer=2, baseline_estimation_method=\"spline\").fit(rossi, \"week\", \"arrest\").log_likelihood_\n < CoxPHFitter(penalizer=1, baseline_estimation_method=\"spline\").fit(rossi, \"week\", \"arrest\").log_likelihood_\n < CoxPHFitter(penalizer=0, baseline_estimation_method=\"spline\").fit(rossi, \"week\", \"arrest\").log_likelihood_\n )\n\n def test_baseline_estimation_for_spline(self, rossi, cph_spline):\n cph_spline.fit(rossi, \"week\", \"arrest\")\n\n assert isinstance(cph_spline.baseline_survival_, pd.DataFrame)\n assert list(cph_spline.baseline_survival_.columns) == [\"baseline survival\"]\n assert list(cph_spline.baseline_cumulative_hazard_.columns) == [\"baseline cumulative hazard\"]\n\n def test_conditional_after_in_prediction(self, rossi, cph):\n rossi.loc[rossi[\"week\"] == 1, \"week\"] = 0\n cph.fit(rossi, \"week\", \"arrest\")\n p1 = cph.predict_survival_function(rossi.iloc[0])\n p2 = cph.predict_survival_function(rossi.iloc[0], conditional_after=[8])\n\n explicit = p1 / p1.loc[8]\n\n npt.assert_allclose(explicit.loc[8.0, 0], p2.loc[0.0, 0])\n npt.assert_allclose(explicit.loc[10.0, 0], p2.loc[2.0, 0])\n npt.assert_allclose(explicit.loc[12.0, 0], p2.loc[4.0, 0])\n npt.assert_allclose(explicit.loc[20.0, 0], p2.loc[12.0, 0])\n\n def test_conditional_after_with_strata_in_prediction(self, rossi, cph):\n rossi.loc[rossi[\"week\"] == 1, \"week\"] = 0\n cph.fit(rossi, \"week\", \"arrest\", strata=[\"fin\"])\n p1 = cph.predict_survival_function(rossi.iloc[0])\n p2 = cph.predict_survival_function(rossi.iloc[0], conditional_after=[8])\n\n explicit = p1 / p1.loc[8]\n\n npt.assert_allclose(explicit.loc[8.0, 0], p2.loc[0.0, 0])\n npt.assert_allclose(explicit.loc[10.0, 0], p2.loc[2.0, 0])\n npt.assert_allclose(explicit.loc[12.0, 0], p2.loc[4.0, 0])\n npt.assert_allclose(explicit.loc[20.0, 0], p2.loc[12.0, 0])\n\n def test_conditional_after_with_strata_in_prediction2(self, rossi, cph):\n\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\", strata=[\"race\"])\n\n censored_subjects = rossi.loc[~rossi[\"arrest\"].astype(bool)]\n censored_subjects_last_obs = censored_subjects[\"week\"]\n pred = cph.predict_survival_function(censored_subjects, conditional_after=censored_subjects_last_obs)\n\n def test_conditional_after_in_prediction_multiple_subjects(self, rossi, cph):\n rossi.loc[rossi[\"week\"] == 1, \"week\"] = 0\n cph.fit(rossi, \"week\", \"arrest\", strata=[\"fin\"])\n p1 = cph.predict_survival_function(rossi.iloc[[0, 1, 2]])\n p2 = cph.predict_survival_function(rossi.iloc[[0, 1, 2]], conditional_after=[8, 9, 0])\n\n explicit = p1 / p1.loc[8]\n\n npt.assert_allclose(explicit.loc[8.0, 0], p2.loc[0.0, 0])\n npt.assert_allclose(explicit.loc[10.0, 0], p2.loc[2.0, 0])\n npt.assert_allclose(explicit.loc[12.0, 0], p2.loc[4.0, 0])\n npt.assert_allclose(explicit.loc[20.0, 0], p2.loc[12.0, 0])\n\n # no strata\n cph.fit(rossi, \"week\", \"arrest\")\n p1 = cph.predict_survival_function(rossi.iloc[[0, 1, 2]])\n p2 = cph.predict_survival_function(rossi.iloc[[0, 1, 2]], conditional_after=[8, 9, 0])\n\n explicit = p1 / p1.loc[8]\n\n npt.assert_allclose(explicit.loc[8.0, 0], p2.loc[0.0, 0])\n npt.assert_allclose(explicit.loc[10.0, 0], p2.loc[2.0, 0])\n npt.assert_allclose(explicit.loc[12.0, 0], p2.loc[4.0, 0])\n npt.assert_allclose(explicit.loc[20.0, 0], p2.loc[12.0, 0])\n\n def test_conditional_after_in_prediction_multiple_subjects_with_custom_times(self, rossi, cph):\n\n cph.fit(rossi, \"week\", \"arrest\")\n p2 = cph.predict_survival_function(rossi.iloc[[0, 1, 2]], conditional_after=[8, 9, 0], times=[10, 20, 30])\n\n assert p2.index.tolist() == [10.0, 20.0, 30.0]\n\n def test_that_a_convergence_warning_is_not_thrown_if_using_compute_residuals(self, rossi):\n rossi[\"c\"] = rossi[\"week\"]\n\n cph = CoxPHFitter(penalizer=1.0)\n cph.fit(rossi, \"week\", \"arrest\")\n\n with pytest.warns(None) as record:\n cph.compute_residuals(rossi, \"martingale\")\n\n assert len(record) == 0\n\n def test_that_adding_strata_will_change_c_index(self, cph, rossi):\n \"\"\"\n df = read.csv('~/code/lifelines/lifelines/datasets/rossi.csv')\n r <- coxph(Surv(week, arrest) ~ fin + age + race + mar + paro + prio + strata(wexp), data=df)\n survConcordance(Surv(week, arrest) ~predict(r) + strata(wexp), df)\n \"\"\"\n\n cph.fit(rossi, \"week\", \"arrest\")\n c_index_no_strata = cph.concordance_index_\n\n cph.fit(rossi, \"week\", \"arrest\", strata=[\"wexp\"])\n c_index_with_strata = cph.concordance_index_\n\n assert c_index_with_strata != c_index_no_strata\n npt.assert_allclose(c_index_with_strata, 0.6124492)\n\n def test_check_assumptions(self, cph, rossi):\n # TODO make this better\n cph.fit(rossi, \"week\", \"arrest\")\n cph.check_assumptions(rossi)\n\n def test_check_assumptions_for_subset_of_columns(self, cph, rossi):\n cph.fit(rossi, \"week\", \"arrest\")\n cph.check_assumptions(rossi, columns=[\"age\"])\n cph.check_assumptions(rossi, columns=[])\n cph.check_assumptions(rossi, columns=[\"age\", \"fin\"])\n\n def test_cph_doesnt_modify_original_dataframe(self, cph):\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [5, 6, 7, 8, 9],\n \"E\": [1, 1, 1, 1, 1],\n \"W\": [1, 1, 1, 1, 1],\n }\n )\n\n cph.fit(df, \"T\", \"E\", weights_col=\"W\")\n assert df.dtypes[\"E\"] in (int, np.dtype(\"int64\"))\n assert df.dtypes[\"W\"] in (int, np.dtype(\"int64\"))\n assert df.dtypes[\"T\"] in (int, np.dtype(\"int64\"))\n\n def test_cph_will_handle_times_with_only_censored_individuals(self, rossi):\n rossi_29 = rossi.iloc[0:10].copy()\n rossi_29[\"week\"] = 29\n rossi_29[\"arrest\"] = False\n\n cph1_summary = CoxPHFitter().fit(rossi.append(rossi_29), \"week\", \"arrest\").summary\n\n cph2_summary = CoxPHFitter().fit(rossi, \"week\", \"arrest\").summary\n\n assert cph2_summary[\"coef\"].iloc[0] != cph1_summary[\"coef\"].iloc[0]\n\n def test_schoenfeld_residuals_no_strata_but_with_censorship(self, cph):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var\" = c(-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105),\n \"T\" = c(5, 6, 7, 8, 9),\n \"E\" = c(1, 1, 1, 1, 1),\n )\n\n c = coxph(formula=Surv(T, E) ~ var , data=df)\n residuals(c, \"schoen\")\n \"\"\"\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [5, 6, 7, 8, 9],\n \"E\": [1, 1, 1, 1, 1],\n }\n )\n\n cph.fit(df, \"T\", \"E\")\n\n results = cph.compute_residuals(df, \"schoenfeld\")\n expected = pd.DataFrame([-0.2165282492, -0.4573005808, 1.1117589644, -0.4379301344, 0.0], columns=[\"var1\"])\n assert_frame_equal(results, expected, check_less_precise=3)\n\n def test_schoenfeld_residuals_with_censorship_and_ties(self, cph):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var\" = c(-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105),\n \"T\" = c(6, 6, 7, 8, 9),\n \"E\" = c(1, 1, 1, 0, 1),\n )\n\n c = coxph(formula=Surv(T, E) ~ var , data=df)\n residuals(c, \"schoen\")\n \"\"\"\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [6, 6, 7, 8, 9],\n \"E\": [1, 1, 1, 0, 1],\n }\n )\n\n cph.fit(df, \"T\", \"E\")\n\n results = cph.compute_residuals(df, \"schoenfeld\")\n expected = pd.DataFrame([-0.3903793341, -0.5535578141, 0.9439371482, 0.0], columns=[\"var1\"], index=[0, 1, 2, 4])\n assert_frame_equal(results, expected, check_less_precise=3)\n\n def test_schoenfeld_residuals_with_weights(self, cph):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var\" = c(-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105),\n \"T\" = c(6, 6, 7, 8, 9),\n \"E\" = c(1, 1, 1, 0, 1),\n )\n\n c = coxph(formula=Surv(T, E) ~ var , data=df)\n residuals(c, \"schoen\")\n \"\"\"\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [5, 6, 7, 8, 9],\n \"E\": [1, 1, 1, 1, 1],\n \"w\": [0.5, 1.0, 3.0, 1.0, 1.0],\n }\n )\n\n cph.fit(df, \"T\", \"E\", weights_col=\"w\", robust=True)\n\n results = cph.compute_residuals(df, \"schoenfeld\")\n expected = pd.DataFrame([-0.6633324862, -0.9107785234, 0.6176009038, -0.6103579448, 0.0], columns=[\"var1\"])\n assert_frame_equal(results, expected, check_less_precise=3)\n\n def test_schoenfeld_residuals_with_strata(self, cph):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var\" = c(-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105),\n \"T\" = c( 6, 6, 7, 8, 9),\n \"E\" = c(1, 1, 1, 1, 1),\n \"s\" = c(1, 2, 2, 1, 1)\n )\n\n c = coxph(formula=Surv(T, E) ~ var + stata(s), data=df)\n residuals(c, \"schoen\")\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [6, 6, 7, 8, 9],\n \"E\": [1, 1, 1, 1, 1],\n \"s\": [1, 2, 2, 1, 1],\n }\n )\n\n cph.fit(df, \"T\", \"E\", strata=[\"s\"])\n\n results = cph.compute_residuals(df, \"schoenfeld\")\n expected = pd.DataFrame(\n [5.898252711e-02, -2.074325854e-02, 0.0, -3.823926885e-02, 0.0], columns=[\"var1\"], index=[0, 3, 4, 1, 2]\n )\n assert_frame_equal(results, expected, check_less_precise=3)\n\n def test_schoenfeld_residuals_with_first_subjects_censored(self, rossi, cph):\n rossi.loc[rossi[\"week\"] == 1, \"arrest\"] = 0\n\n cph.fit(rossi, \"week\", \"arrest\")\n cph.compute_residuals(rossi, \"schoenfeld\")\n\n def test_scaled_schoenfeld_residuals_against_R(self, regression_dataset, cph):\n \"\"\"\n NOTE: lifelines does not add the coefficients to the final results, but R does when you call residuals(c, \"scaledsch\")\n \"\"\"\n\n cph.fit(regression_dataset, \"T\", \"E\")\n\n results = cph.compute_residuals(regression_dataset, \"scaled_schoenfeld\") - cph.params_.values\n npt.assert_allclose(results.iloc[0].values, [0.785518935413, 0.862926592959, 2.479586809860], rtol=5)\n npt.assert_allclose(results.iloc[1].values, [-0.888580165064, -1.037904485796, -0.915334612372], rtol=5)\n npt.assert_allclose(\n results.iloc[results.shape[0] - 1].values, [0.222207366875, 0.050957334886, 0.218314242931], rtol=5\n )\n\n def test_original_index_is_respected_in_all_residual_tests(self, cph):\n\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [6, 6, 7, 8, 9],\n \"s\": [1, 2, 2, 1, 1],\n }\n )\n df.index = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n cph.fit(df, \"T\")\n\n for kind in {\"martingale\", \"schoenfeld\", \"score\", \"delta_beta\", \"deviance\"}:\n resids = cph.compute_residuals(df, kind)\n assert resids.sort_index().index.tolist() == [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n def test_original_index_is_respected_in_all_residual_tests_with_strata(self, cph):\n\n df = pd.DataFrame(\n {\n \"var1\": [-0.71163379, -0.87481227, 0.99557251, -0.83649751, 1.42737105],\n \"T\": [6, 6, 7, 8, 9],\n \"s\": [1, 2, 2, 1, 1],\n }\n )\n df.index = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n cph.fit(df, \"T\", strata=[\"s\"])\n\n for kind in {\"martingale\", \"schoenfeld\", \"score\", \"delta_beta\", \"deviance\", \"scaled_schoenfeld\"}:\n resids = cph.compute_residuals(df, kind)\n assert resids.sort_index().index.tolist() == [\"A\", \"B\", \"C\", \"D\", \"E\"]\n\n def test_martingale_residuals(self, regression_dataset, cph):\n\n cph.fit(regression_dataset, \"T\", \"E\")\n\n results = cph.compute_residuals(regression_dataset, \"martingale\")\n npt.assert_allclose(results.loc[0, \"martingale\"], -2.315035744901, rtol=1e-05)\n npt.assert_allclose(results.loc[1, \"martingale\"], 0.774216356429, rtol=1e-05)\n npt.assert_allclose(results.loc[199, \"martingale\"], 0.868510420157, rtol=1e-05)\n\n def test_strata_will_work_with_matched_pairs(self, rossi, cph):\n rossi[\"matched_pairs\"] = np.floor(rossi.index / 2.0).astype(int)\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\", strata=[\"matched_pairs\"], show_progress=True)\n assert cph.baseline_cumulative_hazard_.shape[1] == 216\n\n def test_print_summary_with_decimals(self, rossi, cph):\n import sys\n\n saved_stdout = sys.stdout\n try:\n\n out = StringIO()\n sys.stdout = out\n\n cph = CoxPHFitter()\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\", batch_mode=True)\n cph._time_fit_was_called = \"2018-10-23 02:40:45 UTC\"\n cph.print_summary(decimals=1)\n output_dec_1 = out.getvalue().strip().split()\n\n cph.print_summary(decimals=3)\n output_dec_3 = out.getvalue().strip().split()\n\n assert output_dec_1 != output_dec_3\n finally:\n sys.stdout = saved_stdout\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\", batch_mode=False)\n\n def test_print_summary(self, rossi, cph):\n\n import sys\n\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n cph._time_fit_was_called = \"2018-10-23 02:40:45 UTC\"\n cph.print_summary()\n output = out.getvalue().strip().split()\n expected = (\n (\n repr(cph)\n + \"\\n\"\n + \"\"\"\n duration col = week\n event col = arrest\nnumber of subjects = 432\n number of events = 114\n log-likelihood = -658.748\n time fit was run = 2018-10-23 02:40:45 UTC\n\n---\n coef exp(coef) se(coef) z p coef lower 95% coef upper 95%\nfin -0.3794 0.6843 0.1914 -1.9826 0.0474 -0.7545 -0.0043\nage -0.0574 0.9442 0.0220 -2.6109 0.0090 -0.1006 -0.0143\nrace 0.3139 1.3688 0.3080 1.0192 0.3081 -0.2898 0.9176\nwexp -0.1498 0.8609 0.2122 -0.7058 0.4803 -0.5657 0.2662\nmar -0.4337 0.6481 0.3819 -1.1358 0.2561 -1.1821 0.3147\nparo -0.0849 0.9186 0.1958 -0.4336 0.6646 -0.4685 0.2988\nprio 0.0915 1.0958 0.0286 3.1939 0.0014 0.0353 0.1476\n---\n\nConcordance = 0.640\nLog-likelihood ratio test = 33.27 on 7 df, -log2(p)=15.37\n\"\"\"\n )\n .strip()\n .split()\n )\n for i in [0, 1, 2, 3, -2, -1, -3, -4, -5]:\n assert output[i] == expected[i]\n finally:\n sys.stdout = saved_stdout\n\n def test_print_summary_with_styles(self, rossi, cph):\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n cph.print_summary(style=\"html\")\n cph.print_summary(style=\"latex\")\n cph.print_summary(style=\"ascii\")\n\n def test_log_likelihood(self, data_nus, cph):\n cph.fit(data_nus, duration_col=\"t\", event_col=\"E\")\n assert abs(cph.log_likelihood_ - -12.7601409152) < 0.001\n\n def test_single_efron_computed_by_hand_examples(self, data_nus, cph):\n\n X = data_nus[\"x\"][:, None]\n T = data_nus[\"t\"]\n E = data_nus[\"E\"]\n weights = np.ones_like(T)\n\n # Enforce numpy arrays\n X = np.array(X)\n T = np.array(T)\n E = np.array(E)\n\n # Want as bools\n E = E.astype(bool)\n\n # tests from http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes3.pdf\n beta = np.array([0])\n\n l, u, _ = cph._get_efron_values_single(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(u[0] - -2.51) < 0.05\n assert np.abs(l[0][0] - 77.13) < 0.05\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0326) < 0.05\n\n l, u, _ = cph._get_efron_values_single(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(l[0][0] - 72.83) < 0.05\n assert np.abs(u[0] - -0.069) < 0.05\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0325) < 0.01\n\n l, u, _ = cph._get_efron_values_single(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(l[0][0] - 72.70) < 0.01\n assert np.abs(u[0] - -0.000061) < 0.01\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0335) < 0.01\n\n def test_batch_efron_computed_by_hand_examples(self, data_nus, cph):\n\n X = data_nus[\"x\"][:, None]\n T = data_nus[\"t\"]\n E = data_nus[\"E\"]\n weights = np.ones_like(T)\n\n # Enforce numpy arrays\n X = np.array(X)\n T = np.array(T)\n E = np.array(E)\n\n # Want as bools\n E = E.astype(bool)\n\n # tests from http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes3.pdf\n beta = np.array([0])\n\n l, u, _ = cph._get_efron_values_batch(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(u[0] - -2.51) < 0.05\n assert np.abs(l[0][0] - 77.13) < 0.05\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0326) < 0.05\n\n l, u, _ = cph._get_efron_values_batch(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(l[0][0] - 72.83) < 0.05\n assert np.abs(u[0] - -0.069) < 0.05\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0325) < 0.01\n\n l, u, _ = cph._get_efron_values_batch(X, T, E, weights, beta)\n l = -l\n\n assert np.abs(l[0][0] - 72.70) < 0.01\n assert np.abs(u[0] - -0.000061) < 0.01\n beta = beta + u / l[0]\n assert np.abs(beta - -0.0335) < 0.01\n\n def test_efron_newtons_method(self, data_nus, cph):\n cph._batch_mode = False\n newton = cph._newton_rhapson_for_efron_model\n X, T, E, W = (data_nus[[\"x\"]], data_nus[\"t\"], data_nus[\"E\"], pd.Series(np.ones_like(data_nus[\"t\"])))\n\n assert np.abs(newton(X, T, E, W)[0] - -0.0335) < 0.0001\n\n def test_fit_method(self, data_nus, cph):\n cph.fit(data_nus, duration_col=\"t\", event_col=\"E\")\n assert np.abs(cph.params_.iloc[0] - -0.0335) < 0.0001\n\n def test_using_dataframes_vs_numpy_arrays(self, data_pred2, cph):\n cph.fit(data_pred2, \"t\", \"E\")\n\n X = data_pred2[data_pred2.columns.difference([\"t\", \"E\"])]\n assert_series_equal(cph.predict_partial_hazard(np.array(X)), cph.predict_partial_hazard(X))\n\n def test_prediction_methods_will_accept_a_times_arg_to_reindex_the_predictions(self, data_pred2, cph):\n cph.fit(data_pred2, duration_col=\"t\", event_col=\"E\")\n times_of_interest = np.arange(0, 10, 0.5)\n\n actual_index = cph.predict_survival_function(data_pred2.drop([\"t\", \"E\"], axis=1), times=times_of_interest).index\n np.testing.assert_allclose(actual_index.values, times_of_interest)\n\n actual_index = cph.predict_cumulative_hazard(data_pred2.drop([\"t\", \"E\"], axis=1), times=times_of_interest).index\n np.testing.assert_allclose(actual_index.values, times_of_interest)\n\n def test_data_normalization(self, data_pred2, cph):\n # During fit, CoxPH copies the training data and normalizes it.\n # Future calls should be normalized in the same way and\n\n cph.fit(data_pred2, duration_col=\"t\", event_col=\"E\")\n\n # Internal training set\n ci_trn = cph.concordance_index_\n # New data should normalize in the exact same way\n ci_org = concordance_index(\n data_pred2[\"t\"], -cph.predict_partial_hazard(data_pred2[[\"x1\", \"x2\"]]).values, data_pred2[\"E\"]\n )\n\n assert ci_org == ci_trn\n\n def test_cox_ph_prediction_with_series(self, rossi, cph):\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n rossi_mean = rossi.mean()\n result = cph.predict_survival_function(rossi_mean)\n assert_series_equal(cph.baseline_survival_[\"baseline survival\"], result[0], check_names=False)\n\n def test_cox_ph_prediction_with_series_of_longer_length(self, rossi, cph):\n rossi = rossi[[\"week\", \"arrest\", \"age\"]]\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n\n X = pd.Series([1, 2, 3, 4, 5])\n result = cph.predict_survival_function(X)\n\n def test_cox_ph_prediction_monotonicity(self, data_pred2):\n # Concordance wise, all prediction methods should be monotonic versions\n # of one-another, unless numerical factors screw it up.\n t = data_pred2[\"t\"]\n e = data_pred2[\"E\"]\n X = data_pred2[[\"x1\", \"x2\"]]\n\n cf = CoxPHFitter()\n cf.fit(data_pred2, duration_col=\"t\", event_col=\"E\")\n\n # Base comparison is partial_hazards\n ci_ph = concordance_index(t, -cf.predict_partial_hazard(X).values, e)\n\n ci_med = concordance_index(t, cf.predict_median(X).squeeze(), e)\n # pretty close.\n assert abs(ci_ph - ci_med) < 0.001\n\n ci_exp = concordance_index(t, cf.predict_expectation(X).squeeze(), e)\n assert ci_ph == ci_exp\n\n def test_crossval_for_cox_ph_with_normalizing_times(self, data_pred2, data_pred1):\n cf = CoxPHFitter()\n\n for data_pred in [data_pred1, data_pred2]:\n\n # why does this\n data_norm = data_pred.copy()\n times = data_norm[\"t\"]\n # Normalize to mean = 0 and standard deviation = 1\n times -= np.mean(times)\n times /= np.std(times)\n data_norm[\"t\"] = times\n\n scores = k_fold_cross_validation(\n cf, data_norm, duration_col=\"t\", event_col=\"E\", k=3, scoring_method=\"concordance_index\"\n )\n\n mean_score = np.mean(scores)\n\n expected = 0.9\n msg = \"Expected min-mean c-index {:.2f} < {:.2f}\"\n assert mean_score > expected, msg.format(expected, mean_score)\n\n def test_crossval_for_cox_ph(self, data_pred2, data_pred1):\n cf = CoxPHFitter()\n\n for data_pred in [data_pred1, data_pred2]:\n scores = k_fold_cross_validation(\n cf, data_pred, duration_col=\"t\", event_col=\"E\", k=3, scoring_method=\"concordance_index\"\n )\n\n mean_score = np.mean(scores)\n\n expected = 0.9\n msg = \"Expected min-mean c-index {:.2f} < {:.2f}\"\n assert mean_score > expected, msg.format(expected, mean_score)\n\n def test_crossval_for_cox_ph_normalized(self, data_pred2, data_pred1):\n cf = CoxPHFitter()\n for data_pred in [data_pred1, data_pred2]:\n data_norm = data_pred.copy()\n\n times = data_norm[\"t\"]\n # Normalize to mean = 0 and standard deviation = 1\n times -= np.mean(times)\n times /= np.std(times)\n data_norm[\"t\"] = times\n\n x1 = data_norm[\"x1\"]\n x1 -= np.mean(x1)\n x1 /= np.std(x1)\n data_norm[\"x1\"] = x1\n\n if \"x2\" in data_norm.columns:\n x2 = data_norm[\"x2\"]\n x2 -= np.mean(x2)\n x2 /= np.std(x2)\n data_norm[\"x2\"] = x2\n\n scores = k_fold_cross_validation(\n cf, data_norm, duration_col=\"t\", event_col=\"E\", k=3, scoring_method=\"concordance_index\"\n )\n\n mean_score = np.mean(scores)\n expected = 0.9\n msg = \"Expected min-mean c-index {:.2f} < {:.2f}\"\n assert mean_score > expected, msg.format(expected, mean_score)\n\n def test_coef_output_against_R_super_accurate(self, rossi):\n \"\"\"\n from http://cran.r-project.org/doc/contrib/Fox-Companion/appendix-cox-regression.pdf\n Link is now broken, but this is the code:\n\n library(survival)\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n r <- coxph(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio,\n data=rossi)\n cat(round(r$coefficients, 8), sep=\", \")\n \"\"\"\n expected = np.array([-0.3794222, -0.0574377, 0.3138998, -0.1497957, -0.4337039, -0.0848711, 0.0914971])\n cf = CoxPHFitter()\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\", show_progress=True, batch_mode=True)\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=6)\n\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\", show_progress=True, batch_mode=False)\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=6)\n\n def test_coef_output_against_R_with_strata_super_accurate(self, rossi):\n \"\"\"\n from http://cran.r-project.org/doc/contrib/Fox-Companion/appendix-cox-regression.pdf\n Link is now broken, but this is the code:\n\n library(survival)\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n r <- coxph(Surv(week, arrest) ~ fin + age + strata(race) + wexp + mar + paro + prio,\n data=rossi)\n cat(round(r$coefficients, 4), sep=\", \")\n \"\"\"\n expected = np.array([-0.3788, -0.0576, -0.1427, -0.4388, -0.0858, 0.0922])\n cf = CoxPHFitter()\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\", strata=[\"race\"], show_progress=True, batch_mode=True)\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=4)\n\n def test_coef_output_against_R_using_non_trivial_but_integer_weights(self, rossi):\n rossi_ = rossi.copy()\n rossi_[\"weights\"] = 1.0\n rossi_ = rossi_.groupby(rossi.columns.tolist())[\"weights\"].sum().reset_index()\n\n expected = np.array([-0.3794, -0.0574, 0.3139, -0.1498, -0.4337, -0.0849, 0.0915])\n cf = CoxPHFitter()\n cf.fit(rossi_, duration_col=\"week\", event_col=\"arrest\", weights_col=\"weights\")\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=4)\n\n def test_robust_errors_with_trivial_weights_is_the_same_than_R(self):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n df['var3'] = 0.75\n r = coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3, robust=TRUE)\n r$var\n r$naive.var\n \"\"\"\n\n w = 0.75\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n }\n )\n df[\"E\"] = 1\n df[\"var3\"] = w\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", robust=True, weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": 7.680, \"var2\": -0.915})\n assert_series_equal(cph.params_, expected, check_less_precise=2, check_names=False)\n\n expected_cov = np.array([[33.079106, -5.964652], [-5.964652, 2.040642]])\n npt.assert_array_almost_equal(w * cph.variance_matrix_, expected_cov, decimal=1)\n\n expected = pd.Series({\"var1\": 2.097, \"var2\": 0.827})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_delta_betas_are_the_same_as_in_R(self):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n r = coxph(formula=Surv(T, E) ~ var1, data=df, robust=TRUE)\n residuals(r, 'dfbeta')\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"T\": [5.269797, 6.601666, 7.335846, 11.684092, 12.678458],\n }\n )\n df[\"E\"] = True\n df[\"weights\"] = 1\n df = df.sort_values(by=\"T\")\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", show_progress=True, weights_col=\"weights\")\n\n X = normalize(df.drop([\"T\", \"E\", \"weights\"], axis=1), cph._norm_mean, cph._norm_std)\n\n expected = np.array([[-1.1099688, 0.6620063, 0.4630473, 0.5807250, -0.5958099]]).T\n actual = cph._compute_delta_beta(X, df[\"T\"], df[\"E\"], df[\"weights\"])\n npt.assert_allclose(expected, actual, rtol=0.001)\n\n def test_delta_betas_with_strata_are_the_same_as_in_R(self):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"T\" = c(5.269797, 6.601666, 7.335846, 11.684092, 12.678458),\n \"strata\" = c(1, 1, 1, 2, 2),\n )\n df['E'] = 1\n r = coxph(formula=Surv(T, E) ~ var1 + strata(strata), data=df, robust=TRUE)\n residuals(r, 'dfbeta')\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"T\": [5.269797, 6.601666, 7.335846, 11.684092, 12.678458],\n \"strata\": [1, 1, 1, 2, 2],\n }\n )\n df[\"E\"] = True\n df[\"weights\"] = 1\n df = df.sort_values(by=\"T\")\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", show_progress=True, weights_col=\"weights\", strata=[\"strata\"])\n\n df = df.set_index(\"strata\")\n X = normalize(df.drop([\"T\", \"E\", \"weights\"], axis=1), 0, cph._norm_std)\n\n expected = np.array([[-0.6960789, 1.6729761, 0.3094744, -0.2895864, -0.9967852]]).T\n actual = cph._compute_delta_beta(X, df[\"T\"], df[\"E\"], df[\"weights\"])\n npt.assert_allclose(expected, actual, rtol=0.001)\n\n def test_delta_betas_with_weights_are_the_same_as_in_R(self):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"T\" = c(5.269797, 6.601666, 7.335846, 11.684092, 12.678458),\n \"w\" = c(1, 0.5, 2, 1, 1)\n )\n df['E'] = 1\n r = coxph(formula=Surv(T, E) ~ var1 + strata(strata), data=df, weights=w)\n residuals(r, 'dfbeta')\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"T\": [5.269797, 6.601666, 7.335846, 11.684092, 12.678458],\n \"weights\": [1, 0.5, 2, 1, 1],\n }\n )\n df[\"E\"] = True\n df = df.sort_values(by=\"T\")\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", show_progress=True, weights_col=\"weights\", robust=True)\n\n X = normalize(df.drop([\"T\", \"E\", \"weights\"], axis=1), 0, cph._norm_std)\n\n expected = np.array([[-1.1156470, 0.7698781, 0.3923246, 0.8040079, -0.8505637]]).T\n actual = cph._compute_delta_beta(X, df[\"T\"], df[\"E\"], df[\"weights\"])\n npt.assert_allclose(expected, actual, rtol=0.001)\n\n def test_cluster_option(self):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var1\" = c(1, 1, 2, 2, 2),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"id\" = c(1, 1, 2, 3, 4),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n\n c = coxph(formula=Surv(T, E) ~ var1 + var2 + cluster(id), data=df)\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [1, 1, 2, 2, 2],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n \"id\": [1, 1, 2, 3, 4],\n }\n )\n df[\"E\"] = 1\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", cluster_col=\"id\", show_progress=True)\n expected = pd.Series({\"var1\": 5.9752, \"var2\": 4.0683})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_cluster_option_with_strata(self, regression_dataset):\n \"\"\"\n library(survival)\n df <- data.frame(\n \"var\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"id\" = c(1, 1, 2, 3, 4),\n \"strata\" = c(1, 1, 2, 2, 2),\n \"T\" = c( 5.269797, 6.601666, 7.335846, 11.684092, 12.678458)\n )\n df['E'] = 1\n\n c = coxph(formula=Surv(T, E) ~ strata(strata) + var + cluster(id), data=df)\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"id\": [1, 1, 2, 3, 4],\n \"strata\": [1, 1, 2, 2, 2],\n \"T\": [5.269797, 6.601666, 7.335846, 11.684092, 12.678458],\n }\n )\n df[\"E\"] = 1\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", cluster_col=\"id\", strata=[\"strata\"], show_progress=True)\n expected = pd.Series({\"var\": 0.643})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_robust_errors_with_less_trival_weights_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c(1, 2, 3, 4, 5)\n )\n df['E'] = 1\n df['var3'] = 2\n df[4, 'var3'] = 1\n r = coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3, robust=TRUE)\n r$var\n r$naive.var\n residuals(r, type='dfbeta')\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [1, 2, 3, 4, 5],\n \"var3\": [2, 2, 2, 1, 2],\n }\n )\n df[\"E\"] = 1\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", robust=True, weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": 1.431, \"var2\": -1.277})\n assert_series_equal(cph.params_, expected, check_less_precise=2, check_names=False)\n\n expected_cov = np.array([[3.5439245, -0.3549099], [-0.3549099, 0.4499553]])\n npt.assert_array_almost_equal(\n cph.variance_matrix_, expected_cov, decimal=1\n ) # not as precise because matrix inversion will accumulate estimation errors.\n\n expected = pd.Series({\"var1\": 2.094, \"var2\": 0.452})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_robust_errors_with_non_trivial_weights_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"var3\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n r = coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3, robust=TRUE)\n r$var\n r$naive.var\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"var3\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n }\n )\n df[\"E\"] = 1\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", robust=True, weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": -5.16231, \"var2\": 1.71924})\n assert_series_equal(cph.params_, expected, check_less_precise=1, check_names=False)\n\n expected = pd.Series({\"var1\": 9.97730, \"var2\": 2.45648})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_robust_errors_with_non_trivial_weights_with_censorship_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"var3\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666),\n \"E\" = c(1, 1, 0, 1, 1)\n )\n r = coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3, robust=TRUE)\n r$var\n r$naive.var\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"var3\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n \"E\": [1, 1, 0, 1, 1],\n }\n )\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", robust=True, weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": -8.360533, \"var2\": 1.781126})\n assert_series_equal(cph.params_, expected, check_less_precise=3, check_names=False)\n\n expected = pd.Series({\"var1\": 12.303338, \"var2\": 2.395670})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=3, check_names=False)\n\n def test_robust_errors_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n\n coxph(formula=Surv(T, E) ~ var1 + var2, data=df, robust=TRUE)\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n }\n )\n df[\"E\"] = 1\n\n cph = CoxPHFitter()\n cph.fit(df, \"T\", \"E\", robust=True, show_progress=True)\n expected = pd.Series({\"var1\": 7.680, \"var2\": -0.915})\n assert_series_equal(cph.params_, expected, check_less_precise=2, check_names=False)\n\n expected = pd.Series({\"var1\": 2.097, \"var2\": 0.827})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_compute_likelihood_ratio_test_is_different_if_weights_are_provided(self, regression_dataset):\n cph = CoxPHFitter()\n cph.fit(regression_dataset, \"T\", \"E\")\n\n without_weights = cph.log_likelihood_ratio_test()\n\n regression_dataset[\"weights\"] = 0.5\n cph = CoxPHFitter()\n\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n\n cph.fit(regression_dataset, \"T\", \"E\", weights_col=\"weights\")\n\n with_weights = cph.log_likelihood_ratio_test()\n assert with_weights.test_statistic != without_weights.test_statistic\n\n def test_log_likelihood_test_against_R_with_weights(self, rossi):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"T\" = c(5.269797, 6.601666, 7.335846, 11.684092, 12.678458),\n \"w\" = c(1, 0.5, 2, 1, 1)\n )\n df['E'] = 1\n r = coxph(formula=Surv(T, E) ~ var1, data=df, weights=w)\n summary(r)\n \"\"\"\n df = pd.DataFrame(\n {\n \"var1\": [0.209325, 0.693919, 0.443804, 0.065636, 0.386294],\n \"T\": [5.269797, 6.601666, 7.335846, 11.684092, 12.678458],\n \"w\": [1, 0.5, 2, 1, 1],\n }\n )\n df[\"E\"] = True\n\n cph = CoxPHFitter()\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n cph.fit(df, \"T\", \"E\", show_progress=True, weights_col=\"w\")\n expected = 0.05\n assert abs(cph.log_likelihood_ratio_test().test_statistic - expected) < 0.01\n\n def test_trival_float_weights_with_no_ties_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n df['var3'] = 0.75\n\n coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3)\n \"\"\"\n df = regression_dataset\n ix = df[\"var3\"] < 1.0\n df = df.loc[ix].head()\n df[\"var3\"] = [0.75] * 5\n\n cph = CoxPHFitter()\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n\n cph.fit(df, \"T\", \"E\", weights_col=\"var3\", show_progress=True)\n\n expected_coef = pd.Series({\"var1\": 7.680, \"var2\": -0.915})\n assert_series_equal(cph.params_, expected_coef, check_less_precise=2, check_names=False)\n\n expected_std = pd.Series({\"var1\": 6.641, \"var2\": 1.650})\n assert_series_equal(cph.summary[\"se(coef)\"], expected_std, check_less_precise=2, check_names=False)\n\n expected_ll = -1.142397\n assert abs(cph.log_likelihood_ - expected_ll) < 0.001\n\n def test_less_trival_float_weights_with_no_ties_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(0.209325, 0.693919, 0.443804, 0.065636, 0.386294),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n df['var3'] = 0.75\n df[1, 'var3'] = 1.75\n\n coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3)\n \"\"\"\n df = regression_dataset\n ix = df[\"var3\"] < 1.0\n df = df.loc[ix].head()\n df[\"var3\"] = [1.75] + [0.75] * 4\n\n cph = CoxPHFitter()\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n\n cph.fit(df, \"T\", \"E\", weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": 7.995, \"var2\": -1.154})\n assert_series_equal(cph.params_, expected, check_less_precise=2, check_names=False)\n\n expected = pd.Series({\"var1\": 6.690, \"var2\": 1.614})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_non_trival_float_weights_with_no_ties_is_the_same_as_R(self, regression_dataset):\n \"\"\"\n df <- read.csv('.../lifelines/datasets/regression.csv')\n coxph(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3)\n \"\"\"\n df = regression_dataset\n\n cph = CoxPHFitter()\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n\n cph.fit(df, \"T\", \"E\", weights_col=\"var3\", show_progress=True)\n expected = pd.Series({\"var1\": 0.3268, \"var2\": 0.0775})\n assert_series_equal(cph.params_, expected, check_less_precise=2, check_names=False)\n\n expected = pd.Series({\"var1\": 0.0697, \"var2\": 0.0861})\n assert_series_equal(cph.summary[\"se(coef)\"], expected, check_less_precise=2, check_names=False)\n\n def test_summary_output_using_non_trivial_but_integer_weights(self, rossi):\n\n rossi_weights = rossi.copy()\n rossi_weights[\"weights\"] = 1.0\n rossi_weights = rossi_weights.groupby(rossi.columns.tolist())[\"weights\"].sum().reset_index()\n\n cf1 = CoxPHFitter()\n cf1.fit(rossi_weights, duration_col=\"week\", event_col=\"arrest\", weights_col=\"weights\")\n\n cf2 = CoxPHFitter()\n cf2.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n\n # strictly speaking, the variances, etc. don't need to be the same, only the coefs.\n assert_frame_equal(cf1.summary, cf2.summary, check_like=True)\n\n def test_doubling_the_weights_halves_the_variance(self, rossi):\n\n w = 2.0\n rossi_weights = rossi.copy()\n rossi_weights[\"weights\"] = 2\n\n cf1 = CoxPHFitter()\n cf1.fit(rossi_weights, duration_col=\"week\", event_col=\"arrest\", weights_col=\"weights\")\n\n cf2 = CoxPHFitter()\n cf2.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n\n assert_series_equal(cf2.standard_errors_ ** 2, w * cf1.standard_errors_ ** 2)\n\n def test_adding_non_integer_weights_is_fine_if_robust_is_on(self, rossi):\n rossi[\"weights\"] = np.random.exponential(1, rossi.shape[0])\n\n cox = CoxPHFitter()\n\n with pytest.warns(None) as w:\n cox.fit(rossi, \"week\", \"arrest\", weights_col=\"weights\", robust=True)\n assert len(w) == 0\n\n def test_standard_error_coef_output_against_R(self, rossi):\n \"\"\"\n from http://cran.r-project.org/doc/contrib/Fox-Companion/appendix-cox-regression.pdf\n Link is now broken, but this is the code:\n\n library(survival)\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n mod.allison <- coxph(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio,\n data=rossi)\n summary(mod.allison)\n \"\"\"\n expected = np.array([0.19138, 0.02200, 0.30799, 0.21222, 0.38187, 0.19576, 0.02865])\n cf = CoxPHFitter()\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n npt.assert_array_almost_equal(cf.summary[\"se(coef)\"].values, expected, decimal=4)\n\n def test_z_value_output_against_R_to_3_decimal_places(self, rossi):\n \"\"\"\n from http://cran.r-project.org/doc/contrib/Fox-Companion/appendix-cox-regression.pdf\n Link is now broken, but this is the code:\n\n library(survival)\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n mod.allison <- coxph(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio,\n data=rossi)\n summary(mod.allison)\n \"\"\"\n expected = np.array([-1.983, -2.611, 1.019, -0.706, -1.136, -0.434, 3.194])\n cf = CoxPHFitter()\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n npt.assert_array_almost_equal(cf.summary[\"z\"].values, expected, decimal=3)\n\n def test_log_likelihood_test_against_R(self, rossi):\n \"\"\"\n from http://cran.r-project.org/doc/contrib/Fox-Companion/appendix-cox-regression.pdf\n Link is now broken, but this is the code:\n\n library(survival)\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n mod.allison <- coxph(Surv(week, arrest) ~ fin + age + race + wexp + mar + paro + prio,\n data=rossi)\n summary(mod.allison)\n \"\"\"\n expected = 33.27\n cf = CoxPHFitter()\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n assert (cf.log_likelihood_ratio_test().test_statistic - expected) < 0.001\n\n def test_output_with_strata_against_R(self, rossi):\n \"\"\"\n rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n r = coxph(formula = Surv(week, arrest) ~ fin + age + strata(race,\n paro, mar, wexp) + prio, data = rossi)\n \"\"\"\n expected = np.array([-0.3355, -0.0590, 0.1002])\n cf = CoxPHFitter()\n cf.fit(\n rossi, duration_col=\"week\", event_col=\"arrest\", strata=[\"race\", \"paro\", \"mar\", \"wexp\"], show_progress=True\n )\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=4)\n\n def test_penalized_output_against_R(self, rossi):\n # R code:\n #\n # rossi <- read.csv('.../lifelines/datasets/rossi.csv')\n # mod.allison <- coxph(Surv(week, arrest) ~ ridge(fin, age, race, wexp, mar, paro, prio,\n # theta=1.0, scale=TRUE), data=rossi)\n # cat(round(mod.allison$coefficients, 4), sep=\", \")\n expected = np.array([-0.3761, -0.0565, 0.3099, -0.1532, -0.4295, -0.0837, 0.0909])\n cf = CoxPHFitter(penalizer=1.0 / rossi.shape[0])\n cf.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n npt.assert_array_almost_equal(cf.params_.values, expected, decimal=2)\n\n def test_coef_output_against_Survival_Analysis_by_John_Klein_and_Melvin_Moeschberger(self):\n # see example 8.3 in Survival Analysis by John P. Klein and Melvin L. Moeschberger, Second Edition\n df = load_kidney_transplant(usecols=[\"time\", \"death\", \"black_male\", \"white_male\", \"black_female\"])\n cf = CoxPHFitter()\n cf.fit(df, duration_col=\"time\", event_col=\"death\")\n\n # coefs\n actual_coefs = cf.params_.values\n expected_coefs = np.array([0.1596, 0.2484, 0.6567])\n npt.assert_array_almost_equal(actual_coefs, expected_coefs, decimal=3)\n\n def test_se_against_Survival_Analysis_by_John_Klein_and_Melvin_Moeschberger(self):\n # see table 8.1 in Survival Analysis by John P. Klein and Melvin L. Moeschberger, Second Edition\n df = load_larynx()\n cf = CoxPHFitter()\n cf.fit(df, duration_col=\"time\", event_col=\"death\")\n\n # standard errors\n actual_se = cf._compute_standard_errors(None, None, None, None).values\n expected_se = np.array([0.0143, 0.4623, 0.3561, 0.4222])\n npt.assert_array_almost_equal(actual_se, expected_se, decimal=3)\n\n def test_p_value_against_Survival_Analysis_by_John_Klein_and_Melvin_Moeschberger(self):\n # see table 8.1 in Survival Analysis by John P. Klein and Melvin L. Moeschberger, Second Edition\n df = load_larynx()\n cf = CoxPHFitter()\n cf.fit(df, duration_col=\"time\", event_col=\"death\")\n\n # p-values\n actual_p = cf._compute_p_values()\n expected_p = np.array([0.1847, 0.7644, 0.0730, 0.00])\n npt.assert_array_almost_equal(actual_p, expected_p, decimal=2)\n\n def test_input_column_order_is_equal_to_output_hazards_order(self, rossi):\n cp = CoxPHFitter()\n expected = [\"fin\", \"age\", \"race\", \"wexp\", \"mar\", \"paro\", \"prio\"]\n cp.fit(rossi, event_col=\"week\", duration_col=\"arrest\")\n assert list(cp.params_.index.tolist()) == expected\n\n def test_strata_removes_variable_from_summary_output(self, rossi):\n cp = CoxPHFitter()\n cp.fit(rossi, \"week\", \"arrest\", strata=[\"race\"])\n assert \"race\" not in cp.summary.index\n\n def test_strata_works_if_only_a_single_element_is_in_the_strata(self):\n df = load_holly_molly_polly()\n del df[\"Start(days)\"]\n del df[\"Stop(days)\"]\n del df[\"ID\"]\n cp = CoxPHFitter()\n cp.fit(df, \"T\", \"Status\", strata=[\"Stratum\"])\n assert True\n\n def test_coxph_throws_a_explainable_error_when_predict_sees_a_strata_it_hasnt_seen(self):\n training_df = pd.DataFrame.from_records(\n [\n {\"t\": 1, \"e\": 1, \"s1\": 0, \"s2\": 0, \"v\": 1.0},\n {\"t\": 2, \"e\": 1, \"s1\": 0, \"s2\": 0, \"v\": 1.5},\n {\"t\": 3, \"e\": 1, \"s1\": 0, \"s2\": 0, \"v\": 2.5},\n {\"t\": 3, \"e\": 1, \"s1\": 0, \"s2\": 1, \"v\": 2.5},\n {\"t\": 4, \"e\": 1, \"s1\": 0, \"s2\": 1, \"v\": 2.5},\n {\"t\": 3, \"e\": 1, \"s1\": 0, \"s2\": 1, \"v\": 4.5},\n ]\n )\n\n cp = CoxPHFitter()\n cp.fit(training_df, \"t\", \"e\", strata=[\"s1\", \"s2\"])\n\n testing_df = pd.DataFrame.from_records(\n [\n {\"t\": 1, \"e\": 1, \"s1\": 1, \"s2\": 0, \"v\": 0.0},\n {\"t\": 2, \"e\": 1, \"s1\": 1, \"s2\": 0, \"v\": 0.5},\n {\"t\": 3, \"e\": 1, \"s1\": 1, \"s2\": 0, \"v\": -0.5},\n ]\n )\n\n with pytest.raises(StatError):\n cp.predict_median(testing_df)\n\n def test_strata_against_R_output(self, rossi):\n \"\"\"\n > library(survival)\n > rossi = read.csv('.../lifelines/datasets/rossi.csv')\n > r = coxph(formula = Surv(week, arrest) ~ fin + age + strata(race,\n paro, mar, wexp) + prio, data = rossi)\n > r$loglik\n \"\"\"\n\n cp = CoxPHFitter()\n cp.fit(rossi, \"week\", \"arrest\", strata=[\"race\", \"paro\", \"mar\", \"wexp\"])\n\n npt.assert_almost_equal(cp.summary[\"coef\"].values, [-0.335, -0.059, 0.100], decimal=3)\n assert abs(cp.log_likelihood_ - -436.9339) / 436.9339 < 0.01\n\n def test_baseline_hazard_works_with_strata_against_R_output(self, rossi):\n \"\"\"\n > library(survival)\n > rossi = read.csv('.../lifelines/datasets/rossi.csv')\n > r = coxph(formula = Surv(week, arrest) ~ fin + age + strata(race,\n paro, mar, wexp) + prio, data = rossi)\n > basehaz(r, centered=TRUE)\n \"\"\"\n cp = CoxPHFitter()\n cp.fit(rossi, \"week\", \"arrest\", strata=[\"race\", \"paro\", \"mar\", \"wexp\"])\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[(0, 0, 0, 0)].loc[[14, 35, 37, 43, 52]].values,\n [0.076600555, 0.169748261, 0.272088807, 0.396562717, 0.396562717],\n decimal=4,\n )\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[(0, 0, 0, 1)].loc[[27, 43, 48, 52]].values,\n [0.095499001, 0.204196905, 0.338393113, 0.338393113],\n decimal=4,\n )\n\n def test_baseline_hazard_works_with_weights_against_R_output(self, rossi):\n \"\"\"\n library(survival)\n\n fit<-coxph(Surv(week, arrest)~fin, data=rossi, weight=age)\n H0 <- basehaz(fit, centered=TRUE)\n \"\"\"\n\n rossi = rossi[[\"week\", \"arrest\", \"fin\", \"age\"]]\n cp = CoxPHFitter()\n cp.fit(rossi, \"week\", \"arrest\", weights_col=\"age\")\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[\"baseline cumulative hazard\"].loc[1.0], 0.00183466, decimal=4\n )\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[\"baseline cumulative hazard\"].loc[2.0], 0.005880265, decimal=4\n )\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[\"baseline cumulative hazard\"].loc[10.0], 0.035425868, decimal=4\n )\n npt.assert_almost_equal(\n cp.baseline_cumulative_hazard_[\"baseline cumulative hazard\"].loc[52.0], 0.274341397, decimal=3\n )\n\n def test_strata_from_init_is_used_in_fit_later(self, rossi):\n strata = [\"race\", \"paro\", \"mar\"]\n cp_with_strata_in_init = CoxPHFitter(strata=strata)\n cp_with_strata_in_init.fit(rossi, \"week\", \"arrest\")\n assert cp_with_strata_in_init.strata == strata\n\n cp_with_strata_in_fit = CoxPHFitter()\n cp_with_strata_in_fit.fit(rossi, \"week\", \"arrest\", strata=strata)\n assert cp_with_strata_in_fit.strata == strata\n\n assert cp_with_strata_in_init.log_likelihood_ == cp_with_strata_in_fit.log_likelihood_\n\n def test_baseline_survival_is_the_same_indp_of_location(self, regression_dataset):\n df = regression_dataset.copy()\n cp1 = CoxPHFitter()\n cp1.fit(df, event_col=\"E\", duration_col=\"T\")\n\n df_demeaned = regression_dataset.copy()\n df_demeaned[[\"var1\", \"var2\", \"var3\"]] = (\n df_demeaned[[\"var1\", \"var2\", \"var3\"]] - df_demeaned[[\"var1\", \"var2\", \"var3\"]].mean()\n )\n cp2 = CoxPHFitter()\n cp2.fit(df_demeaned, event_col=\"E\", duration_col=\"T\")\n assert_frame_equal(cp2.baseline_survival_, cp1.baseline_survival_)\n\n def test_baseline_cumulative_hazard_is_the_same_indp_of_location(self, regression_dataset):\n df = regression_dataset.copy()\n cp1 = CoxPHFitter()\n cp1.fit(df, event_col=\"E\", duration_col=\"T\")\n\n df_demeaned = regression_dataset.copy()\n df_demeaned[[\"var1\", \"var2\", \"var3\"]] = (\n df_demeaned[[\"var1\", \"var2\", \"var3\"]] - df_demeaned[[\"var1\", \"var2\", \"var3\"]].mean()\n )\n cp2 = CoxPHFitter()\n cp2.fit(df_demeaned, event_col=\"E\", duration_col=\"T\")\n assert_frame_equal(cp2.baseline_cumulative_hazard_, cp1.baseline_cumulative_hazard_)\n\n def test_survival_prediction_is_the_same_indp_of_location(self, regression_dataset):\n df = regression_dataset.copy()\n\n df_demeaned = regression_dataset.copy()\n mean = df_demeaned[[\"var1\", \"var2\", \"var3\"]].mean()\n df_demeaned[[\"var1\", \"var2\", \"var3\"]] = df_demeaned[[\"var1\", \"var2\", \"var3\"]] - mean\n\n cp1 = CoxPHFitter()\n cp1.fit(df, event_col=\"E\", duration_col=\"T\")\n\n cp2 = CoxPHFitter()\n cp2.fit(df_demeaned, event_col=\"E\", duration_col=\"T\")\n\n assert_frame_equal(\n cp1.predict_survival_function(df.iloc[[0]][[\"var1\", \"var2\", \"var3\"]]),\n cp2.predict_survival_function(df_demeaned.iloc[[0]][[\"var1\", \"var2\", \"var3\"]]),\n )\n\n def test_baseline_survival_is_the_same_indp_of_scale(self, regression_dataset):\n df = regression_dataset.copy()\n cp1 = CoxPHFitter()\n cp1.fit(df, event_col=\"E\", duration_col=\"T\")\n\n df_descaled = regression_dataset.copy()\n df_descaled[[\"var1\", \"var2\", \"var3\"]] = (\n df_descaled[[\"var1\", \"var2\", \"var3\"]] / df_descaled[[\"var1\", \"var2\", \"var3\"]].std()\n )\n cp2 = CoxPHFitter()\n cp2.fit(df_descaled, event_col=\"E\", duration_col=\"T\")\n assert_frame_equal(cp2.baseline_survival_, cp1.baseline_survival_)\n\n def test_error_thrown_weights_are_nonpositive(self, regression_dataset):\n regression_dataset[\"weights\"] = -1\n cph = CoxPHFitter()\n with pytest.raises(ValueError):\n cph.fit(regression_dataset, event_col=\"E\", duration_col=\"T\", weights_col=\"weights\")\n\n def test_survival_prediction_is_the_same_indp_of_scale(self, regression_dataset):\n df = regression_dataset.copy()\n\n df_scaled = regression_dataset.copy()\n df_scaled[[\"var1\", \"var2\", \"var3\"]] = df_scaled[[\"var1\", \"var2\", \"var3\"]] * 10.0\n\n cp1 = CoxPHFitter()\n cp1.fit(df, event_col=\"E\", duration_col=\"T\")\n\n cp2 = CoxPHFitter()\n cp2.fit(df_scaled, event_col=\"E\", duration_col=\"T\")\n\n assert_frame_equal(\n cp1.predict_survival_function(df.iloc[[0]][[\"var1\", \"var2\", \"var3\"]]),\n cp2.predict_survival_function(df_scaled.iloc[[0]][[\"var1\", \"var2\", \"var3\"]]),\n )\n\n def test_warning_is_raised_if_df_has_a_near_constant_column(self, rossi):\n cox = CoxPHFitter()\n rossi[\"constant\"] = 1.0\n\n with pytest.warns(ConvergenceWarning, match=\"variance\") as w:\n with pytest.raises(ConvergenceError):\n cox.fit(rossi, \"week\", \"arrest\")\n\n def test_warning_is_raised_if_df_has_a_near_constant_column_in_one_separation(self, rossi):\n # check for a warning if we have complete separation\n cox = CoxPHFitter()\n ix = rossi[\"arrest\"] == 1\n rossi.loc[ix, \"paro\"] = 1\n rossi.loc[~ix, \"paro\"] = 0\n\n with pytest.warns(ConvergenceWarning) as w:\n cox.fit(rossi, \"week\", \"arrest\")\n assert \"complete separation\" in str(w[0].message)\n assert \"non-unique\" in str(w[1].message)\n\n def test_warning_is_raised_if_complete_separation_is_present(self, cph):\n # check for a warning if we have complete separation\n\n df = pd.DataFrame.from_records(zip(np.arange(-5, 5), np.arange(1, 10)), columns=[\"x\", \"T\"])\n with pytest.warns(ConvergenceWarning, match=\"complete separation\") as w:\n cph.fit(df, \"T\")\n\n df = pd.DataFrame.from_records(zip(np.arange(1, 10), np.arange(1, 10)), columns=[\"x\", \"T\"])\n with pytest.warns(ConvergenceWarning, match=\"complete separation\") as w:\n cph.fit(df, \"T\")\n\n df = pd.DataFrame.from_records(zip(np.arange(0, 100), np.arange(0, 100)), columns=[\"x\", \"T\"])\n df[\"x\"] += 0.01 * np.random.randn(100)\n with pytest.warns(ConvergenceWarning, match=\"complete separation\") as w:\n cph.fit(df, \"T\")\n\n def test_what_happens_when_column_is_constant_for_all_non_deaths(self, rossi):\n # this is known as complete separation: See https://stats.stackexchange.com/questions/11109/how-to-deal-with-perfect-separation-in-logistic-regression\n cp = CoxPHFitter()\n ix = rossi[\"arrest\"] == 1\n rossi.loc[ix, \"paro\"] = 1\n\n with pytest.warns(ConvergenceWarning) as w:\n cp.fit(rossi, \"week\", \"arrest\", show_progress=True)\n\n assert cp.summary.loc[\"paro\", \"exp(coef)\"] > 100\n\n assert \"paro have very low variance\" in w[0].message.args[0]\n assert \"norm(delta)\" in w[1].message.args[0]\n\n def test_what_happens_with_colinear_inputs(self, rossi, cph):\n with pytest.raises(ConvergenceError):\n rossi[\"duped\"] = rossi[\"paro\"] + rossi[\"prio\"]\n cph.fit(rossi, \"week\", \"arrest\", show_progress=True)\n\n def test_durations_of_zero_are_okay(self, rossi, cph):\n rossi.loc[range(10), \"week\"] = 0\n cph.fit(rossi, \"week\", \"arrest\")\n\n def test_all_okay_with_non_trivial_index_in_dataframe(self, rossi):\n n = rossi.shape[0]\n\n cp1 = CoxPHFitter()\n cp1.fit(rossi, \"week\", event_col=\"arrest\")\n\n cp2 = CoxPHFitter()\n rossi_new_index = rossi.set_index(np.random.randint(n, size=n))\n cp2.fit(rossi_new_index, \"week\", event_col=\"arrest\")\n\n assert_frame_equal(cp2.summary, cp1.summary)\n\n def test_robust_errors_against_R_no_ties(self, regression_dataset, cph):\n df = regression_dataset\n cph.fit(df, \"T\", \"E\", robust=True)\n expected = pd.Series({\"var1\": 0.0879, \"var2\": 0.0847, \"var3\": 0.0655})\n assert_series_equal(cph.standard_errors_, expected, check_less_precise=2, check_names=False)\n\n def test_robust_errors_with_strata_against_R(self, rossi, cph):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(1, 1, 2, 2, 2, 1),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092, 0.5),\n \"var3\" = c(1, 2, 3, 2, 1, 2),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666, 8.)\n )\n df['E'] = 1\n\n coxph(formula=Surv(T, E) ~ strata(var1) + var2 + var3, data=df, robust=TRUE)\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [1, 1, 2, 2, 2, 1],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092, 0.5],\n \"var3\": [1, 2, 3, 2, 1, 2],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666, 8.0],\n }\n )\n df[\"E\"] = 1\n\n cph.fit(df, duration_col=\"T\", event_col=\"E\", strata=[\"var1\"], robust=True)\n npt.assert_allclose(cph.summary[\"se(coef)\"].values, np.array([1.076, 0.680]), rtol=1e-2)\n\n @pytest.mark.xfail\n def test_robust_errors_with_strata_against_R_super_accurate(self, rossi, cph):\n \"\"\"\n df <- data.frame(\n \"var1\" = c(1, 1, 2, 2, 2),\n \"var2\" = c(0.184677, 0.071893, 1.364646, 0.098375, 1.663092),\n \"T\" = c( 7.335846, 5.269797, 11.684092, 12.678458, 6.601666)\n )\n df['E'] = 1\n\n coxph(formula=Surv(T, E) ~ strata(var1) + var2, data=df, robust=TRUE)\n \"\"\"\n\n df = pd.DataFrame(\n {\n \"var1\": [1, 1, 2, 2, 2],\n \"var2\": [0.184677, 0.071893, 1.364646, 0.098375, 1.663092],\n \"T\": [7.335846, 5.269797, 11.684092, 12.678458, 6.601666],\n }\n )\n df[\"E\"] = 1\n\n cph.fit(df, duration_col=\"T\", event_col=\"E\", strata=[\"var1\"], robust=True)\n npt.assert_allclose(cph.summary[\"se(coef)\"].values, 2.78649, rtol=1e-4)\n\n def test_what_happens_to_nans(self, rossi, cph):\n rossi[\"var4\"] = np.nan\n with pytest.raises(TypeError):\n cph.fit(rossi, duration_col=\"week\", event_col=\"arrest\")\n\n def test_check_assumptions_fails_for_nonunique_index(self, cph, rossi):\n\n cph.fit(rossi, \"week\", \"arrest\")\n\n rossi.index = np.ones(rossi.shape[0])\n with pytest.raises(IndexError):\n cph.check_assumptions(rossi)\n\n\nclass TestAalenAdditiveFitter:\n @pytest.fixture()\n def aaf(self):\n return AalenAdditiveFitter()\n\n def test_slope_tests_against_R(self, aaf, regression_dataset):\n \"\"\"\n df['E'] = 1\n a = aareg(formula=Surv(T, E) ~ var1 + var2 + var3, data=df)\n plot(a)\n summary(a, test='nrisk')\n \"\"\"\n regression_dataset[\"E\"] = 1\n aaf.fit(regression_dataset, \"T\", \"E\")\n npt.assert_allclose(aaf.summary[\"slope(coef)\"], [0.05141401, 0.01059746, 0.03923360, 0.07753566])\n\n def test_penalizer_reduces_norm_of_hazards(self, rossi):\n from numpy.linalg import norm\n\n aaf_without_penalizer = AalenAdditiveFitter(coef_penalizer=0.0, smoothing_penalizer=0.0)\n assert aaf_without_penalizer.coef_penalizer == aaf_without_penalizer.smoothing_penalizer == 0.0\n aaf_without_penalizer.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n\n aaf_with_penalizer = AalenAdditiveFitter(coef_penalizer=10.0, smoothing_penalizer=10.0)\n aaf_with_penalizer.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n assert norm(aaf_with_penalizer.cumulative_hazards_) <= norm(aaf_without_penalizer.cumulative_hazards_)\n\n def test_input_column_order_is_equal_to_output_hazards_order(self, rossi):\n aaf = AalenAdditiveFitter()\n expected = [\"fin\", \"age\", \"race\", \"wexp\", \"mar\", \"paro\", \"prio\"]\n aaf.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n assert list(aaf.cumulative_hazards_.columns.drop(\"_intercept\")) == expected\n\n aaf = AalenAdditiveFitter(fit_intercept=False)\n expected = [\"fin\", \"age\", \"race\", \"wexp\", \"mar\", \"paro\", \"prio\"]\n aaf.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n assert list(aaf.cumulative_hazards_.columns) == expected\n\n def test_swapping_order_of_columns_in_a_df_is_okay(self, rossi):\n aaf = AalenAdditiveFitter()\n aaf.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n\n misorder = [\"age\", \"race\", \"wexp\", \"mar\", \"paro\", \"prio\", \"fin\"]\n natural_order = rossi.columns.drop([\"week\", \"arrest\"])\n deleted_order = rossi.columns.difference([\"week\", \"arrest\"])\n assert_series_equal(aaf.predict_median(rossi[natural_order]), aaf.predict_median(rossi[misorder]))\n assert_series_equal(aaf.predict_median(rossi[natural_order]), aaf.predict_median(rossi[deleted_order]))\n\n aaf = AalenAdditiveFitter(fit_intercept=False)\n aaf.fit(rossi, event_col=\"arrest\", duration_col=\"week\")\n assert_series_equal(aaf.predict_median(rossi[natural_order]), aaf.predict_median(rossi[misorder]))\n assert_series_equal(aaf.predict_median(rossi[natural_order]), aaf.predict_median(rossi[deleted_order]))\n\n def test_large_dimensions_for_recursion_error(self):\n n = 500\n d = 50\n X = pd.DataFrame(np.random.randn(n, d))\n T = np.random.exponential(size=n)\n X[\"T\"] = T\n aaf = AalenAdditiveFitter(coef_penalizer=0.01)\n aaf.fit(X, duration_col=\"T\")\n\n def test_aalen_additive_median_predictions_split_data(self):\n # This tests to make sure that my median predictions statisfy\n # the prediction are greater than the actual 1/2 the time.\n # generate some hazard rates and a survival data set\n n = 2500\n d = 5\n timeline = np.linspace(0, 70, 5000)\n hz, coef, X = generate_hazard_rates(n, d, timeline)\n T = generate_random_lifetimes(hz, timeline)\n\n X[\"T\"] = T\n X = X.replace([np.inf, -np.inf], 10.0)\n # del X[5]\n\n # fit it to Aalen's model\n aaf = AalenAdditiveFitter(coef_penalizer=0.5, fit_intercept=False)\n aaf.fit(X, \"T\")\n\n # predictions\n T_pred = aaf.predict_median(X[list(range(6))])\n assert abs((T_pred.values > T).mean() - 0.5) < 0.05\n\n def test_dataframe_input_with_nonstandard_index(self):\n aaf = AalenAdditiveFitter(coef_penalizer=5.0)\n df = pd.DataFrame(\n [(16, True, True), (1, True, True), (4, False, True)],\n columns=[\"duration\", \"done_feeding\", \"white\"],\n index=[\"a\", \"b\", \"c\"],\n )\n aaf.fit(df, duration_col=\"duration\", event_col=\"done_feeding\")\n\n def test_crossval_for_aalen_add_concordance_index(self, data_pred2, data_pred1):\n aaf = AalenAdditiveFitter(coef_penalizer=0.1)\n for data_pred in [data_pred1, data_pred2]:\n mean_scores = []\n for repeat in range(20):\n scores = k_fold_cross_validation(\n aaf, data_pred, duration_col=\"t\", event_col=\"E\", k=3, scoring_method=\"concordance_index\"\n )\n mean_scores.append(np.mean(scores))\n\n expected = 0.90\n msg = \"Expected min-mean c-index {:.2f} < {:.2f}\"\n assert np.mean(mean_scores) > expected, msg.format(expected, np.mean(scores))\n\n @pytest.mark.xfail\n def test_crossval_for_aalen_add(self, data_pred2, data_pred1):\n aaf = AalenAdditiveFitter(coef_penalizer=0.1)\n for data_pred in [data_pred1, data_pred2]:\n mean_scores = []\n for repeat in range(20):\n scores = k_fold_cross_validation(\n aaf, data_pred, duration_col=\"t\", event_col=\"E\", k=3, scoring_method=\"log_likelihood\"\n )\n mean_scores.append(np.mean(scores))\n\n expected = 0.90\n msg = \"Expected min-mean c-index {:.2f} < {:.2f}\"\n assert np.mean(mean_scores) > expected, msg.format(expected, np.mean(scores))\n\n def test_predict_cumulative_hazard_inputs(self, data_pred1):\n aaf = AalenAdditiveFitter(coef_penalizer=0.001)\n aaf.fit(data_pred1, duration_col=\"t\", event_col=\"E\")\n x = data_pred1.iloc[:5].drop([\"t\", \"E\"], axis=1)\n y_df = aaf.predict_cumulative_hazard(x)\n y_np = aaf.predict_cumulative_hazard(x.values)\n assert_frame_equal(y_df, y_np)\n\n def test_aalen_additive_fitter_versus_R(self, aaf, rossi):\n \"\"\"\n a = aareg(formula=Surv(week, arrest) ~ fin + age + race+ wexp + mar + paro + prio, data=head(rossi, 432))\n \"\"\"\n aaf.fit(rossi, \"week\", \"arrest\")\n actual = aaf.hazards_\n npt.assert_allclose(actual.loc[:2, \"fin\"].tolist(), [-0.004628582, -0.005842295], rtol=1e-06)\n npt.assert_allclose(actual.loc[:2, \"prio\"].tolist(), [-1.268344e-03, 1.119377e-04], rtol=1e-06)\n npt.assert_allclose(actual.loc[:2, \"_intercept\"].tolist(), [1.913901e-02, -3.297233e-02], rtol=1e-06)\n\n def test_aalen_additive_fitter_versus_R_with_weights(self, aaf, regression_dataset):\n \"\"\"\n df['E'] = 1\n a = aareg(formula=Surv(T, E) ~ var1 + var2, data=df, weights=var3)\n a$coefficient\n \"\"\"\n regression_dataset[\"E\"] = 1\n with pytest.warns(StatisticalWarning, match=\"weights are not integers\"):\n aaf.fit(regression_dataset, \"T\", \"E\", weights_col=\"var3\")\n actual = aaf.hazards_\n npt.assert_allclose(actual.iloc[:3][\"var1\"].tolist(), [1.301523e-02, -4.925302e-04, 2.304792e-02], rtol=1e-06)\n npt.assert_allclose(\n actual.iloc[:3][\"_intercept\"].tolist(), [-9.672957e-03, 1.439187e-03, 1.838915e-03], rtol=1e-06\n )\n\n def test_cumulative_hazards_versus_R(self, aaf, regression_dataset):\n \"\"\"\n df['E'] = 1\n a = aareg(formula=Surv(T, E) ~ var1 + var2 + var3, data=df)\n c = a$coefficient\n apply(c, 2, cumsum)\n \"\"\"\n regression_dataset[\"E\"] = 1\n\n aaf.fit(regression_dataset, \"T\", \"E\")\n actual = aaf.cumulative_hazards_.iloc[-1]\n npt.assert_allclose(actual[\"_intercept\"], 2.1675130235, rtol=1e-06)\n npt.assert_allclose(actual[\"var1\"], 0.6820086125, rtol=1e-06)\n npt.assert_allclose(actual[\"var2\"], -0.0776583514, rtol=1e-06)\n npt.assert_allclose(actual[\"var3\"], 0.5515174017, rtol=1e-06)\n\n\nclass TestCoxTimeVaryingFitter:\n @pytest.fixture()\n def ctv(self):\n return CoxTimeVaryingFitter()\n\n @pytest.fixture()\n def dfcv(self):\n from lifelines.datasets import load_dfcv\n\n return load_dfcv()\n\n @pytest.fixture()\n def heart(self):\n return load_stanford_heart_transplants()\n\n def test_inference_against_known_R_output(self, ctv, dfcv):\n \"\"\"\n from http://www.math.ucsd.edu/~rxu/math284/slect7.pdf\n\n > coxph(formula = Surv(time = start, time2 = stop, event) ~ group + z, data = dfcv)\n\n \"\"\"\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n npt.assert_almost_equal(ctv.summary[\"coef\"].values, [1.826757, 0.705963], decimal=4)\n npt.assert_almost_equal(ctv.summary[\"se(coef)\"].values, [1.229, 1.206], decimal=3)\n npt.assert_almost_equal(ctv.summary[\"p\"].values, [0.14, 0.56], decimal=2)\n\n def test_that_id_col_is_optional(self, dfcv):\n\n ctv_with_id = CoxTimeVaryingFitter().fit(\n dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\"\n )\n ctv_without_id = CoxTimeVaryingFitter().fit(\n dfcv.drop(\"id\", axis=1), start_col=\"start\", stop_col=\"stop\", event_col=\"event\"\n )\n\n assert_frame_equal(ctv_without_id.summary, ctv_with_id.summary)\n\n def test_what_happens_to_nans(self, ctv, dfcv):\n \"\"\"\n from http://www.math.ucsd.edu/~rxu/math284/slect7.pdf\n\n > coxph(formula = Surv(time = start, time2 = stop, event) ~ group + z, data = dfcv)\n\n \"\"\"\n dfcv[\"var4\"] = np.nan\n with pytest.raises(TypeError):\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n def test_inference_against_known_R_output_with_weights(self, ctv, dfcv):\n \"\"\"\n > dfcv['weights'] = [0.46009262, 0.04643257, 0.38150793, 0.11903676, 0.51965860, 0.96173133, 0.32435527, 0.16708398, 0.85464418, 0.15146481, 0.24713429, 0.55198318, 0.16948366, 0.19246483]\n > coxph(formula = Surv(time = start, time2 = stop, event) ~ group + z, data = dfcv)\n\n \"\"\"\n dfcv[\"weights\"] = [\n 0.4600926178338619,\n 0.046432574620396294,\n 0.38150793079960477,\n 0.11903675541025949,\n 0.5196585971574837,\n 0.9617313298681641,\n 0.3243552664091651,\n 0.16708398114269085,\n 0.8546441798716636,\n 0.15146480991643507,\n 0.24713429350878657,\n 0.5519831777187729,\n 0.16948366380884838,\n 0.19246482703103884,\n ]\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\", weights_col=\"weights\")\n npt.assert_almost_equal(ctv.summary[\"coef\"].values, [0.313, 0.423], decimal=3)\n npt.assert_almost_equal(ctv.summary[\"se(coef)\"].values, [1.542, 1.997], decimal=3)\n\n def test_fitter_will_raise_an_error_if_immediate_death_present(self, ctv):\n df = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 0, \"var\": 1.0, \"event\": 1},\n {\"id\": 1, \"start\": 0, \"stop\": 10, \"var\": 2.0, \"event\": 1},\n {\"id\": 2, \"start\": 0, \"stop\": 10, \"var\": 3.0, \"event\": 1},\n ]\n )\n\n with pytest.raises(ValueError):\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n def test_fitter_will_raise_a_warning_if_instaneous_observation_present(self, ctv):\n df = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 0, \"var\": 1.0, \"event\": 0}, # note that start = stop here.\n {\"id\": 1, \"start\": 0, \"stop\": 10, \"var\": 1.0, \"event\": 1},\n {\"id\": 2, \"start\": 0, \"stop\": 10, \"var\": 2.0, \"event\": 1},\n ]\n )\n\n with pytest.warns(RuntimeWarning, match=\"safely dropped\") as w:\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n df = df.loc[~((df[\"start\"] == df[\"stop\"]) & (df[\"start\"] == 0))]\n\n with pytest.warns(None) as w:\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n assert len(w) == 0\n\n def test_fitter_will_error_if_degenerate_time(self, ctv):\n df = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 0, \"event\": 1}, # note the degenerate times\n {\"id\": 2, \"start\": 0, \"stop\": 5, \"event\": 1},\n {\"id\": 3, \"start\": 0, \"stop\": 5, \"event\": 1},\n {\"id\": 4, \"start\": 0, \"stop\": 4, \"event\": 1},\n ]\n )\n with pytest.raises(ValueError):\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n df.loc[(df[\"start\"] == df[\"stop\"]) & (df[\"start\"] == 0) & df[\"event\"], \"stop\"] = 0.5\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n assert True\n\n def test_ctv_fitter_will_handle_trivial_weight_col(self, ctv, dfcv):\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n coefs_no_weights = ctv.summary[\"coef\"].values\n\n dfcv[\"weight\"] = 1.0\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\", weights_col=\"weight\")\n coefs_trivial_weights = ctv.summary[\"coef\"].values\n\n npt.assert_almost_equal(coefs_no_weights, coefs_trivial_weights, decimal=3)\n\n def test_doubling_the_weights_halves_the_variance(self, ctv, dfcv):\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n coefs_no_weights = ctv.summary[\"coef\"].values\n variance_no_weights = ctv.summary[\"se(coef)\"].values ** 2\n\n dfcv[\"weight\"] = 2.0\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\", weights_col=\"weight\")\n coefs_double_weights = ctv.summary[\"coef\"].values\n variance_double_weights = ctv.summary[\"se(coef)\"].values ** 2\n\n npt.assert_almost_equal(coefs_no_weights, coefs_double_weights, decimal=3)\n npt.assert_almost_equal(variance_no_weights, 2 * variance_double_weights, decimal=3)\n\n def test_ctv_fitter_will_give_the_same_results_as_static_cox_model(self, ctv, rossi):\n\n cph = CoxPHFitter()\n cph.fit(rossi, \"week\", \"arrest\")\n expected = cph.params_.values\n\n rossi_ctv = rossi.reset_index()\n rossi_ctv = to_long_format(rossi_ctv, \"week\")\n\n ctv.fit(rossi_ctv, start_col=\"start\", stop_col=\"stop\", event_col=\"arrest\", id_col=\"index\")\n npt.assert_array_almost_equal(ctv.params_.values, expected, decimal=4)\n\n def test_ctv_fitter_will_handle_integer_weight_as_static_model(self, ctv, rossi):\n # deleting some columns to create more duplicates\n del rossi[\"age\"]\n del rossi[\"paro\"]\n del rossi[\"mar\"]\n del rossi[\"prio\"]\n\n rossi_ = rossi.copy()\n rossi_[\"weights\"] = 1.0\n rossi_ = rossi_.groupby(rossi.columns.tolist())[\"weights\"].sum().reset_index()\n\n cph = CoxPHFitter()\n cph.fit(rossi, \"week\", \"arrest\")\n expected = cph.params_.values\n\n # create the id column this way.\n rossi_ = rossi_.reset_index()\n rossi_ = to_long_format(rossi_, \"week\")\n\n ctv.fit(rossi_, start_col=\"start\", stop_col=\"stop\", event_col=\"arrest\", id_col=\"index\", weights_col=\"weights\")\n npt.assert_array_almost_equal(ctv.params_.values, expected, decimal=3)\n\n def test_fitter_accept_boolean_columns(self, ctv):\n df = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 5, \"var\": -1.2, \"bool\": True, \"event\": 1},\n {\"id\": 2, \"start\": 0, \"stop\": 5, \"var\": 1.3, \"bool\": False, \"event\": 1},\n {\"id\": 3, \"start\": 0, \"stop\": 5, \"var\": -1.3, \"bool\": False, \"event\": 1},\n ]\n )\n\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n assert True\n\n def test_warning_is_raised_if_df_has_a_near_constant_column(self, ctv, dfcv):\n dfcv[\"constant\"] = 1.0\n\n with pytest.warns(ConvergenceWarning, match=\"variance\") as w:\n with pytest.raises(ConvergenceError):\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n def test_warning_is_raised_if_df_has_a_near_constant_column_in_one_separation(self, ctv, dfcv):\n # check for a warning if we have complete separation\n ix = dfcv[\"event\"]\n dfcv.loc[ix, \"var3\"] = 1\n dfcv.loc[~ix, \"var3\"] = 0\n\n with pytest.warns(ConvergenceWarning, match=\"complete separation\") as w:\n ctv.fit(dfcv, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n def test_warning_is_raised_if_df_has_start_eq_stop_at_event_time(self, ctv):\n df = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 5, \"event\": 0},\n {\"id\": 1, \"start\": 5, \"stop\": 5, \"event\": 1},\n {\"id\": 2, \"start\": 0, \"stop\": 2, \"event\": 0},\n {\"id\": 2, \"start\": 2, \"stop\": 5, \"event\": 1},\n {\"id\": 3, \"start\": 0, \"stop\": 5, \"event\": 0},\n {\"id\": 3, \"start\": 6, \"stop\": 6, \"event\": 1},\n ]\n )\n\n with pytest.warns(ConvergenceWarning, match=\"with start and stop equal and a death event\") as w:\n ctv.fit(df, id_col=\"id\", start_col=\"start\", stop_col=\"stop\", event_col=\"event\")\n\n def test_summary_output_versus_Rs_against_standford_heart_transplant(self, ctv, heart):\n \"\"\"\n library(survival)\n data(heart)\n coxph(Surv(start, stop, event) ~ age + transplant + surgery + year, data= heart)\n \"\"\"\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n npt.assert_almost_equal(ctv.summary[\"coef\"].values, [0.0272, -0.1463, -0.6372, -0.0103], decimal=3)\n npt.assert_almost_equal(ctv.summary[\"se(coef)\"].values, [0.0137, 0.0705, 0.3672, 0.3138], decimal=3)\n npt.assert_almost_equal(ctv.summary[\"p\"].values, [0.048, 0.038, 0.083, 0.974], decimal=3)\n\n def test_error_is_raised_if_using_non_numeric_data(self, ctv):\n ctv = CoxTimeVaryingFitter(penalizer=1.0)\n df = pd.DataFrame.from_dict(\n {\n \"id\": [1, 2, 3],\n \"start\": [0.0, 0.0, 0.0],\n \"end\": [1.0, 2.0, 3.0],\n \"e\": [1, 1, 1],\n \"bool_\": [True, True, False],\n \"int_\": [1, -1, 0],\n \"uint8_\": pd.Series([1, 3, 0], dtype=\"uint8\"),\n \"string_\": [\"test\", \"a\", \"2.5\"],\n \"float_\": [1.2, -0.5, 0.0],\n \"categorya_\": pd.Series([1, 2, 3], dtype=\"category\"),\n \"categoryb_\": pd.Series([\"a\", \"b\", \"a\"], dtype=\"category\"),\n }\n )\n\n for subset in [[\"start\", \"end\", \"e\", \"id\", \"categoryb_\"], [\"start\", \"end\", \"e\", \"id\", \"string_\"]]:\n with pytest.raises(ValueError):\n ctv.fit(df[subset], id_col=\"id\", event_col=\"e\", stop_col=\"end\")\n\n for subset in [\n [\"start\", \"end\", \"e\", \"id\", \"categorya_\"],\n [\"start\", \"end\", \"e\", \"id\", \"bool_\"],\n [\"start\", \"end\", \"e\", \"id\", \"int_\"],\n [\"start\", \"end\", \"e\", \"id\", \"float_\"],\n [\"start\", \"end\", \"e\", \"id\", \"uint8_\"],\n ]:\n ctv.fit(df[subset], id_col=\"id\", event_col=\"e\", stop_col=\"end\")\n\n def test_ctv_prediction_methods(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n assert ctv.predict_log_partial_hazard(heart).shape[0] == heart.shape[0]\n assert ctv.predict_partial_hazard(heart).shape[0] == heart.shape[0]\n\n def test_ctv_baseline_cumulative_hazard_against_R(self, ctv, heart):\n \"\"\"\n library(survival)\n data(heart)\n r = coxph(Surv(start, stop, event) ~ age + transplant + surgery + year, data=heart)\n\n sest = survfit(r, se.fit = F)\n sest$cumhaz\n \"\"\"\n expected = [\n 0.008576073,\n 0.034766771,\n 0.061749725,\n 0.080302426,\n 0.09929016,\n 0.109040953,\n 0.118986351,\n 0.129150022,\n 0.160562122,\n 0.171388794,\n 0.182287871,\n 0.204408269,\n 0.215630422,\n 0.227109569,\n 0.238852428,\n 0.250765502,\n 0.26291466,\n 0.275185886,\n 0.287814114,\n 0.313833224,\n 0.327131062,\n 0.340816277,\n 0.354672739,\n 0.368767829,\n 0.383148661,\n 0.397832317,\n 0.412847777,\n 0.428152773,\n 0.459970612,\n 0.476275941,\n 0.50977267,\n 0.52716976,\n 0.545297536,\n 0.563803467,\n 0.582672943,\n 0.602305488,\n 0.622619844,\n 0.643438746,\n 0.664737826,\n 0.686688715,\n 0.7093598,\n 0.732698614,\n 0.756553038,\n 0.781435099,\n 0.806850698,\n 0.832604447,\n 0.859118436,\n 0.886325942,\n 0.914877455,\n 0.975077858,\n 1.006355139,\n 1.039447234,\n 1.073414895,\n 1.109428518,\n 1.155787187,\n 1.209776781,\n 1.26991066,\n 1.3421101,\n 1.431890995,\n 1.526763781,\n 1.627902989,\n 1.763620039,\n ]\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n npt.assert_array_almost_equal(ctv.baseline_cumulative_hazard_.values[0:3, 0], expected[0:3], decimal=3)\n npt.assert_array_almost_equal(\n ctv.baseline_cumulative_hazard_.values[:, 0], expected, decimal=2\n ) # errors accumulate fast =(\n\n def test_repr_with_fitter(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n uniques = heart[\"id\"].unique().shape[0]\n assert ctv.__repr__() == \"<lifelines.CoxTimeVaryingFitter: fitted with %d periods, %d subjects, %d events>\" % (\n heart.shape[0],\n uniques,\n heart[\"event\"].sum(),\n )\n\n def test_all_okay_with_non_trivial_index_in_dataframe(self, ctv, heart):\n n = heart.shape[0]\n\n ctv1 = CoxTimeVaryingFitter()\n ctv1.fit(heart, id_col=\"id\", event_col=\"event\")\n\n ctv2 = CoxTimeVaryingFitter()\n heart_new_index = heart.set_index(np.random.randint(n, size=n))\n ctv2.fit(heart_new_index, id_col=\"id\", event_col=\"event\")\n\n assert_frame_equal(ctv2.summary, ctv1.summary)\n\n def test_penalizer(self, heart):\n ctv = CoxTimeVaryingFitter(penalizer=1.0)\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n assert True\n\n def test_likelihood_ratio_test_against_R(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n sr = ctv.log_likelihood_ratio_test()\n test_stat, deg_of_freedom, p_value = sr.test_statistic, sr.degrees_freedom, sr.p_value\n assert abs(test_stat - 15.1) < 0.1\n assert abs(p_value - 0.00448) < 0.001\n assert deg_of_freedom == 4\n\n def test_error_thrown_weights_are_nonpositive(self, ctv, heart):\n heart[\"weights\"] = -1\n with pytest.raises(ValueError):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\", weights_col=\"weights\")\n\n def test_error_thrown_if_column_doesnt_exist(self, ctv, heart):\n with pytest.raises(KeyError):\n ctv.fit(heart, id_col=\"_id_\", event_col=\"event\")\n\n def test_print_summary(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n\n import sys\n\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n\n ctv.fit(heart, id_col=\"id\", event_col=\"event\")\n ctv._time_fit_was_called = \"2018-10-23 02:41:45 UTC\"\n ctv.print_summary()\n output = out.getvalue().strip().split()\n expected = (\n (\n repr(ctv)\n + \"\\n\"\n + \"\"\"\n event col = event\nnumber of subjects = 103\n number of periods = 172\n number of events = 75\n log-likelihood = -290.566\n time fit was run = 2018-10-23 02:41:45 UTC\n\n---\n coef exp(coef) se(coef) z p coef lower 95% coef upper 95%\nage 0.0272 1.0275 0.0137 1.9809 0.0476 0.0003 0.0540\nyear -0.1463 0.8639 0.0705 -2.0768 0.0378 -0.2845 -0.0082\nsurgery -0.6372 0.5288 0.3672 -1.7352 0.0827 -1.3570 0.0825\ntransplant -0.0103 0.9898 0.3138 -0.0327 0.9739 -0.6252 0.6047\n---\n\nLikelihood ratio test = 15.11 on 4 df, -log2(p)=7.80\n\"\"\"\n )\n .strip()\n .split()\n )\n for i in [0, 1, 2, 3, -2, -1, -3, -4, -5]:\n assert output[i] == expected[i]\n finally:\n sys.stdout = saved_stdout\n\n def test_ctv_against_cph_for_static_datasets_but_one_is_long(self):\n rossi = load_rossi()\n long_rossi = to_episodic_format(rossi, \"week\", \"arrest\")\n assert rossi.shape[0] < long_rossi.shape[0]\n\n ctv = CoxTimeVaryingFitter()\n ctv.fit(long_rossi, id_col=\"id\", event_col=\"arrest\")\n\n cph = CoxPHFitter()\n cph.fit(rossi, \"week\", \"arrest\")\n\n assert_frame_equal(cph.summary, ctv.summary, check_like=True, check_less_precise=3)\n\n def test_ctv_with_strata_against_R(self, ctv, heart):\n \"\"\"\n library(survival)\n data(heart)\n r = coxph(Surv(start, stop, event) ~ age + strata(transplant) + surgery + year, data=heart)\n r\n logLik(r)\n \"\"\"\n ctv.fit(heart, id_col=\"id\", event_col=\"event\", strata=\"transplant\")\n summary = ctv.summary.sort_index()\n npt.assert_allclose(summary[\"coef\"].tolist(), [0.0293, -0.6176, -0.1527], atol=0.001)\n npt.assert_allclose(summary[\"se(coef)\"].tolist(), [0.0139, 0.3707, 0.0710], atol=0.001)\n npt.assert_allclose(summary[\"z\"].tolist(), [2.11, -1.67, -2.15], atol=0.01)\n npt.assert_allclose(ctv.log_likelihood_, -254.7144, atol=0.01)\n\n def test_ctv_with_multiple_strata(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\", strata=[\"transplant\", \"surgery\"])\n npt.assert_allclose(ctv.log_likelihood_, -230.6726, atol=0.01)\n\n def test_ctv_ratio_test_with_strata(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\", strata=[\"transplant\"])\n npt.assert_allclose(ctv.log_likelihood_ratio_test().test_statistic, 15.68, atol=0.01)\n\n def test_ctv_ratio_test_with_strata_and_initial_point(self, ctv, heart):\n ctv.fit(heart, id_col=\"id\", event_col=\"event\", strata=[\"transplant\"], initial_point=0.1 * np.ones(3))\n npt.assert_allclose(ctv.log_likelihood_ratio_test().test_statistic, 15.68, atol=0.01)\n\n\nclass TestAalenJohansenFitter:\n @pytest.fixture # pytest fixtures are functions that are \"executed\" before every test\n def duration(self):\n return [1, 2, 3, 4, 5, 6]\n\n @pytest.fixture\n def event_observed(self):\n return [0, 1, 1, 2, 2, 0]\n\n @pytest.fixture\n def fitter(self):\n return AalenJohansenFitter()\n\n @pytest.fixture\n def kmfitter(self):\n return KaplanMeierFitter()\n\n def test_jitter(self, fitter):\n d = pd.Series([1, 1, 1])\n e = fitter._jitter(durations=d, event=pd.Series([1, 1, 1]), jitter_level=0.01)\n\n npt.assert_equal(np.any(np.not_equal(d, e)), True)\n\n def test_tied_input_data(self, fitter):\n # Based on new setup of ties, this counts as a valid tie\n d = [1, 2, 2, 4, 5, 6]\n with pytest.warns(Warning, match=\"Tied event times\"):\n fitter.fit(durations=d, event_observed=[0, 1, 2, 1, 2, 0], event_of_interest=2)\n npt.assert_equal(np.any(np.not_equal([0] + d, fitter.event_table.index)), True)\n\n def test_updated_input_ties(self, fitter):\n # Based on the new setup of ties, should not detect any ties as existing\n d = [1, 2, 2, 4, 5, 6]\n fitter.fit(durations=d, event_observed=[0, 1, 1, 1, 2, 0], event_of_interest=1)\n npt.assert_equal(np.asarray([0, 1, 2, 4, 5, 6]), np.asarray(fitter.event_table.index))\n\n def test_updated_censor_ties(self, fitter):\n # Based on the new setup of ties, should not detect any ties as existing\n d = [1, 2, 2, 4, 5, 6]\n fitter.fit(durations=d, event_observed=[0, 0, 1, 1, 2, 0], event_of_interest=1)\n npt.assert_equal(np.asarray([0, 1, 2, 4, 5, 6]), np.asarray(fitter.event_table.index))\n\n def test_event_table_is_correct(self, fitter, duration, event_observed):\n fitter.fit(duration, event_observed, event_of_interest=2)\n\n expected_event_table = pd.DataFrame.from_records(\n [\n {\n \"event_at\": 0,\n \"removed\": 0,\n \"observed\": 0,\n \"observed_2\": 0,\n \"censored\": 0,\n \"entrance\": 6,\n \"at_risk\": 6,\n },\n {\n \"event_at\": 1,\n \"removed\": 1,\n \"observed\": 0,\n \"observed_2\": 0,\n \"censored\": 1,\n \"entrance\": 0,\n \"at_risk\": 6,\n },\n {\n \"event_at\": 2,\n \"removed\": 1,\n \"observed\": 1,\n \"observed_2\": 0,\n \"censored\": 0,\n \"entrance\": 0,\n \"at_risk\": 5,\n },\n {\n \"event_at\": 3,\n \"removed\": 1,\n \"observed\": 1,\n \"observed_2\": 0,\n \"censored\": 0,\n \"entrance\": 0,\n \"at_risk\": 4,\n },\n {\n \"event_at\": 4,\n \"removed\": 1,\n \"observed\": 1,\n \"observed_2\": 1,\n \"censored\": 0,\n \"entrance\": 0,\n \"at_risk\": 3,\n },\n {\n \"event_at\": 5,\n \"removed\": 1,\n \"observed\": 1,\n \"observed_2\": 1,\n \"censored\": 0,\n \"entrance\": 0,\n \"at_risk\": 2,\n },\n {\n \"event_at\": 6,\n \"removed\": 1,\n \"observed\": 0,\n \"observed_2\": 0,\n \"censored\": 1,\n \"entrance\": 0,\n \"at_risk\": 1,\n },\n ]\n ).set_index(\"event_at\")[[\"removed\", \"observed\", \"observed_2\", \"censored\", \"entrance\", \"at_risk\"]]\n # pandas util for checking if two dataframes are equal\n assert_frame_equal(\n fitter.event_table, expected_event_table, check_dtype=False, check_like=True\n ) # Ignores dtype to avoid int32 vs int64 difference\n\n def test_aj_less_than_km(self, fitter, kmfitter, duration, event_observed):\n # In presence of competing risk, CIF_{AJ} >= CIF_{KM}\n fitter.fit(duration, event_observed, event_of_interest=2) # Aalen-Johansen\n kmfitter.fit(duration, event_observed)\n\n x = np.all(\n np.where(np.array(1 - kmfitter.survival_function_) >= np.array(fitter.cumulative_density_), True, False)\n )\n assert x\n\n def test_no_competing_risk(self, fitter, kmfitter, duration):\n # In presence of no competing risk, CIF_{AJ} == CIF_{KM}\n same_events = [0, 2, 2, 2, 2, 0]\n fitter.fit(duration, same_events, event_of_interest=2) # Aalen-Johansen\n kmfitter.fit(duration, same_events) # Kaplan-Meier\n npt.assert_allclose(np.array(1 - kmfitter.survival_function_), np.array(fitter.cumulative_density_))\n\n def test_variance_calculation_against_sas(self, fitter, duration, event_observed):\n variance_from_sas = np.array([0.0, 0.0, 0.0, 0.0, 0.032, 0.048, 0.048])\n\n fitter.fit(duration, event_observed, event_of_interest=2)\n npt.assert_allclose(variance_from_sas, np.array(fitter.variance_))\n\n def test_ci_calculation_against_sas(self, fitter, duration, event_observed):\n ci_from_sas = np.array(\n [\n [np.nan, np.nan],\n [np.nan, np.nan],\n [np.nan, np.nan],\n [np.nan, np.nan],\n [0.00836904, 0.58185303],\n [0.05197575, 0.75281579],\n [0.05197575, 0.75281579],\n ]\n )\n\n fitter.fit(duration, event_observed, event_of_interest=2)\n npt.assert_allclose(ci_from_sas, np.array(fitter.confidence_interval_))\n\n\nclass TestMixtureCureFitter:\n def test_exponential_data_produces_correct_inference_for_both_cure_and_non_cure_fractions(self):\n N = 1000000\n scale = 5\n T = np.random.exponential(scale, size=N)\n observed = np.ones(N, dtype=bool)\n\n # Censor the data at time = 8\n last_observation_time = 8.0\n mask = T > last_observation_time\n T[mask] = last_observation_time\n observed[mask] = False\n\n # Add in some 'cured' samples, to make it 20% cured\n C = int(N / 4)\n T = np.concatenate([T, last_observation_time * np.ones(C)])\n observed = np.concatenate([observed, np.zeros(C, dtype=bool)])\n\n fitter = MixtureCureFitter(base_fitter=ExponentialFitter())\n fitter.fit(T, event_observed=observed)\n assert abs(fitter.cured_fraction_ - 0.2) < 0.01\n assert abs(fitter.lambda_ / scale - 1) < 0.01\n assert abs(fitter._survival_function([0.2, 1], 1) - 0.49430) < 0.01\n assert abs(fitter.percentile(0.6) - scale * np.log(2)) < 0.01\n\n assert fitter.percentile(0.19) is np.inf\n\n def test_should_raise_exception_if_cure_parameter_is_already_in_list_of_parameter_names(self):\n with pytest.raises(\n NameError,\n match=\"'cured_fraction_' in _fitted_parameter_names is a lifelines reserved word.\"\n \" Try something else instead.\",\n ):\n MixtureCureFitter(MixtureCureFitter(base_fitter=ExponentialFitter()))\n"
] | [
[
"scipy.stats.invgamma",
"pandas.to_datetime",
"numpy.minimum",
"pandas.testing.assert_series_equal",
"numpy.linspace",
"pandas.Series",
"numpy.random.weibull",
"numpy.asarray",
"numpy.linalg.norm.cdf",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.dtype",
"numpy.linalg.norm.rvs",
"numpy.concatenate",
"pandas.testing.assert_frame_equal",
"numpy.random.randn",
"numpy.mean",
"pandas.DataFrame.from_records",
"numpy.exp",
"numpy.where",
"numpy.random.randint",
"pandas.read_csv",
"numpy.ones_like",
"numpy.arange",
"scipy.stats.weibull_min.rvs",
"numpy.testing.assert_almost_equal",
"numpy.std",
"numpy.argmax",
"numpy.insert",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.stats.invweibull",
"numpy.log",
"scipy.stats.logistic.rvs",
"numpy.min",
"numpy.isnan",
"numpy.median",
"numpy.cumprod",
"numpy.random.rand",
"scipy.stats.fisk.rvs",
"numpy.testing.assert_allclose",
"numpy.random.binomial",
"numpy.floor",
"numpy.not_equal",
"numpy.array",
"numpy.maximum",
"numpy.abs",
"numpy.random.seed",
"numpy.random.random",
"pandas.isnull",
"numpy.random.exponential",
"numpy.linalg.norm",
"numpy.percentile",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.random.normal",
"numpy.random.gamma",
"numpy.random.uniform",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wiseodd/rgpr | [
"263923dd160d40d64894e85190a7341aaa1e4080"
] | [
"aggregate_OOD.py"
] | [
"import numpy as np\nimport pickle\nimport os, sys, argparse\nfrom util.tables import *\nfrom collections import defaultdict, namedtuple\nimport pandas as pd\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mean_only', default=False, action='store_true')\nparser.add_argument('--metrics', default='mmc_fpr95', choices=['mmc_fpr95', 'aur_aupr', 'acc_cal'])\nparser.add_argument('--ood_dset', default='smooth', choices=['smooth', 'imagenet'])\nargs = parser.parse_args()\n\n\npath = f'./results/hyperopt{\"/imagenet\" if args.ood_dset == \"imagenet\" else \"\"}'\n_, _, filenames = next(os.walk(path))\n\nif args.ood_dset != 'imagenet':\n method_types = ['MAP', 'Temp', 'DE', 'BNO', 'LLL', 'LLL-RGPR-loglik', 'LLL-RGPR-ood']\nelse:\n method_types = ['LLL-RGPR-ood']\n\nmethod2str = {'MAP': 'MAP', 'Temp': 'Temp. Scaling', 'DE': 'Deep Ens.', 'BNO': 'GP-DSCS',\n 'DKL': 'SVDKL', 'DKL-RGPR': 'SVDKL-RGPR',\n 'LLL': 'LLL', 'LLL-RGPR-ood': 'LLL-RGPR-OOD', 'LLL-RGPR-loglik': 'LLL-RGPR-LL'\n}\nmetric2str = {'fpr95': 'FPR@95', 'mmc': 'MMC', 'aur': 'AUROC', 'aupr': 'AUPRC'}\ndatasets = ['MNIST', 'CIFAR10', 'SVHN', 'CIFAR100']\n\nTEXTBF = '\\\\textbf'\n\n\ndef get_dfs(dset, type='mmc', return_dicts=False):\n def cond(fname, str):\n return f'_{dset.lower()}_' in fname and str in fname\n\n temps = []\n\n fnames = [fname for fname in filenames if cond(fname, f'_{type}_')]\n\n for fname in fnames:\n with open(f'{path}/{fname}', 'rb') as f:\n d = pickle.load(f)\n\n for k in list(d.keys()):\n if not d[k]: # d[k] is an empty dict\n del d[k]\n\n # print(fname)\n # print(d);input()\n\n if return_dicts:\n temps.append(d)\n else:\n temps.append(pd.DataFrame(d))\n\n if return_dicts:\n return temps\n\n df = pd.concat(temps, ignore_index=False)\n df = df[(m for m in method_types)]\n df_mean = df.groupby(df.index).mean() * 100\n df_std = df.groupby(df.index).sem() * 100\n\n return df_mean, df_std\n\n\ndef get_str(test_dset, method_type, df_mean, df_std, bold=True):\n try:\n mean = df_mean[method_type][test_dset]\n std = df_std[method_type][test_dset]\n except KeyError:\n mean, std = np.NaN, np.NaN\n\n mean = round(mean, 1)\n\n if not np.isnan(mean):\n mean_str = f'\\\\textbf{{{mean:.1f}}}' if bold else f'{mean:.1f}'\n str = f'{mean_str}'\n\n if method_type not in ['MAP', 'DE']:\n str += f'$\\\\pm${std:.1f}'\n else:\n str = '-'\n\n return str\n\n\nif args.metrics != 'acc_cal':\n if args.mean_only:\n metrics = args.metrics.split('_')\n vals = {m: {metrics[0]: [], metrics[1]: []} for m in method_types}\n\n for dset in datasets:\n for metric in metrics:\n df, _ = get_dfs(dset, type=metric)\n\n if metric == 'mmc':\n df = df.drop(index=dset)\n\n for method in method_types:\n vals[method][metric].append(f'{df[method].mean():.1f}')\n\n print()\n for i, metric in enumerate(metrics):\n print(f'\\\\textbf{{{metric2str[metric]}}} $\\\\downarrow$ \\\\\\\\')\n\n for method in method_types:\n if method == 'LLL-RGPR':\n print('\\\\midrule')\n print(f'{method2str[method]} & {\" & \".join(vals[method][metric])} \\\\\\\\')\n\n if i < len(metrics)-1:\n print('\\n\\\\midrule\\n\\\\midrule\\n')\n else:\n values = {dset: defaultdict(list) for dset in datasets}\n\n for dset in datasets:\n metric1, metric2 = args.metrics.split('_')\n\n df1_mean, df1_std = get_dfs(dset, type=metric1)\n df2_mean, df2_std = get_dfs(dset, type=metric2)\n\n for test_dset in df1_mean.index:\n str = []\n\n vals1 = df1_mean.loc[test_dset].round(1).to_numpy()\n\n try:\n vals2 = df2_mean.loc[test_dset].round(1).to_numpy()\n except KeyError:\n vals2 = np.array([np.NaN]*len(vals1))\n\n best1 = vals1.min() if metric1 == 'mmc' else vals1.max()\n idx_best1 = vals1.argmin() if metric1 == 'mmc' else vals1.argmax()\n\n best2 = vals2.min() if metric2 == 'fpr95' else vals2.max()\n idx_best2 = vals2.argmin() if metric2 == 'fpr95' else vals2.argmax()\n\n # With error bars to test significance --- for bolding values\n best1_bar = df1_std.loc[test_dset][idx_best1].round(1)\n\n try:\n best2_bar = df2_std.loc[test_dset][idx_best2].round(1)\n except KeyError:\n best2_bar = np.array([np.NaN]*len(vals1))\n\n # print(max_aur, max_aur_bar)\n\n for method_type in method_types:\n if metric1 == 'mmc':\n # * is not significant if against o if: ---(---o-*-)---\n bold = df1_mean[method_type][test_dset].round(1) <= round(best1 + best1_bar, 1)\n else:\n # * is not significant if against o if: ---(-*-o---)---\n bold = df1_mean[method_type][test_dset].round(1) >= round(best1 - best1_bar, 1)\n str1 = get_str(test_dset, method_type, df1_mean, df1_std, bold=False if test_dset == dset else bold)\n\n try:\n if metric2 == 'fpr95':\n # * is not significant if against o if: ---(---o-*-)---\n bold = df2_mean[method_type][test_dset].round(1) <= round(best2 + best2_bar, 1)\n else:\n # * is not significant if against o if: ---(-*-o---)---\n bold = df2_mean[method_type][test_dset].round(1) >= round(best2 - best2_bar, 1)\n except KeyError:\n bold = [False]*len(vals1)\n\n str2 = get_str(test_dset, method_type, df2_mean, df2_std, bold=False if test_dset == dset else bold)\n\n str.append(str1)\n str.append(str2)\n\n values[dset][test_dset] = str\n\n print()\n\n\n ood_noise_names = ['UniformNoise']\n # ood_noise_names = []\n ood_test_names = {\n 'MNIST': ['EMNIST', 'KMNIST', 'FMNIST', 'GrayCIFAR10'],\n 'CIFAR10': ['SVHN', 'LSUN', 'CIFAR100', 'FMNIST3D'],\n 'SVHN': ['CIFAR10', 'LSUN', 'CIFAR100', 'FMNIST3D'],\n 'CIFAR100': ['SVHN', 'LSUN', 'CIFAR10', 'FMNIST3D'],\n }\n\n\n for i, dset in enumerate(datasets):\n print(f'\\\\textbf{{{dset}}} & {\" & \".join(values[dset][dset])} \\\\\\\\')\n\n for ood_dset in ood_test_names[dset] + ood_noise_names:\n print(f'{ood_dset} & {\" & \".join(values[dset][ood_dset])} \\\\\\\\')\n\n if i < len(datasets)-1:\n print()\n print('\\\\midrule')\n print()\n\n print()\n\nelse:\n # Accuracy & Calibration\n # ----------------------\n\n for method_type in method_types:\n if method_type == 'LLL-RGPR-loglik':\n print(r'\\midrule')\n\n str = method2str[method_type] + ' '\n\n for dset in datasets:\n df_acc = pd.DataFrame(get_dfs(dset, 'acc', return_dicts=True)).mean()\n str += f'& {df_acc[method_type]*100:.1f} '\n\n print(str + r'\\\\')\n\n print()\n print(r'\\midrule')\n print(r'\\midrule')\n print()\n\n for method_type in method_types:\n if method_type == 'LLL-RGPR-loglik':\n print(r'\\midrule')\n\n str = method2str[method_type] + ' '\n\n for dset in datasets:\n df_acc = pd.DataFrame(get_dfs(dset, 'cal', return_dicts=True)).mean()\n str += f'& {df_acc[method_type]:.1f} '\n\n print(str + r'\\\\')\n"
] | [
[
"numpy.isnan",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
asthajn/robotics | [
"727ab3ee5450054687c5637d399c7b5aabd48672"
] | [
"Assignment/graph.py"
] | [
"from matplotlib import pyplot\n\ndef plotGraph(x, y):\n\tpyplot.figure(1)\n\tpyplot.plot(x,y,'ro-',label='Robot motion path')\n\tpyplot.grid(axis='both')\n\tpyplot.xlabel('x coordinate')\n\tpyplot.ylabel('y coordinate')\n\tpyplot.axis([0, 1, 0, 1])\n\tpyplot.title('Plot of Robot Motion')\n\tpyplot.legend(loc=2)\n\tpyplot.show()\n\t#save(\"1\", ext=\"png\", close=False, verbose=True)\n\t\ndef main():\n\tx = []\n\ty = []\n\twith open(\"input.txt\") as f:\n\t\tfor line in f:\n\t\t\tcoord = line.split(\" \")\n\t\t\tx.append(float(coord[0]))\n\t\t\ty.append(float(coord[1]))\n\tplotGraph(x, y)\n\nif __name__ == '__main__':\n\tmain()\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcosSalib/Cocktail_MOOC | [
"46279c2ec642554537c639702ed8e540ea49afdf"
] | [
"Preliminaries/Mathematics For ML - ICL/1. Linear Algebra/readonly/bearNecessities.py"
] | [
"import matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.linalg as la\n\nbear_black = (0.141, 0.11, 0.11)\nbear_white = (0.89, 0.856, 0.856)\nmagenta = (0xfc / 255, 0x75 / 255, 0xdb / 255) # Brighter magenta\norange = (218 / 255, 171 / 255, 115 / 255)\ngreen = (175 / 255, 219 / 255, 133 / 255)\nwhite = (240 / 255, 245 / 255, 250 / 255)\nblue1 = (70 / 255, 101 / 255, 137 / 255)\nblue2 = (122 / 255, 174 / 255, 215 / 255)\n\n\ndef gsBasis(A):\n B = np.array(A, dtype=np.float_)\n B[:, 0] = B[:, 0] / la.norm(B[:, 0])\n B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0]\n if la.norm(B[:, 1]) > 1e-14:\n B[:, 1] = B[:, 1] / la.norm(B[:, 1])\n else:\n B[:, 1] = np.zeros_like(B[:, 1])\n return B\n\n\ndef draw_mirror(bearVectors):\n fig, ax = plt.subplots(figsize=(12, 12), dpi=80)\n ax.set_xlim([-3.50, 3.50])\n ax.set_ylim([-3.50, 3.50])\n ax.set_aspect(1)\n # ax.set_axis_bgcolor(blue1)\n ax.set_facecolor(blue1)\n\n gs = gsBasis(bearVectors)\n ax.plot([gs[0, 0] * -5, gs[0, 0] * 5], [gs[1, 0] * -5, gs[1, 0] * 5], lw=2, color=green, zorder=4)\n ax.fill([\n -5 * gs[0, 0], -5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0]\n ], [\n -5 * gs[1, 0], -5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0]\n ], color=blue2, zorder=0)\n ax.arrow(0, 0, bearVectors[0, 0], bearVectors[1, 0], lw=3, color=orange, zorder=5, head_width=0.1)\n ax.arrow(0, 0, bearVectors[0, 1], bearVectors[1, 1], lw=3, color=orange, zorder=5, head_width=0.1)\n ax.arrow(0, 0, gs[0, 0], gs[1, 0], lw=3, color=magenta, zorder=6, head_width=0.1)\n ax.arrow(0, 0, gs[0, 1], gs[1, 1], lw=3, color=magenta, zorder=6, head_width=0.1)\n return ax\n\n\nbear_black_fur = np.array(\n [[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726,\n 1.8974666, 1.8924396, 2.0030351, np.nan, 2.7017972,\n 2.8500957, 2.9707453, 3.0159889, 2.94561, 2.8299874,\n 2.7017972, np.nan, 2.1639012, 2.2317666, 2.3147132,\n 2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387,\n 2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642,\n 2.633986, 2.5536071, 2.5057417, 2.4982011, np.nan,\n 2.2468478, 2.3247673, 2.4429034, 2.4303357, 2.3448755,\n 2.2820372, 2.2468478, np.nan, 2.1966706, 2.2722074,\n 2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501,\n 2.222442, 2.1984479, 2.1966706, np.nan, 1.847196,\n 1.7818441, 1.7290599, 1.6310321, 1.4575984, 1.3369488,\n 1.2791375, 1.3671112, 1.8044659, 1.9577914, 2.2367936,\n 2.5962289, 2.7520679, 2.9028799, 3.4005595, 3.3150993,\n 3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003,\n 2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517,\n 2.5112591, 2.3175294, 2.1326865, 2.0491517],\n [-1.3186252, -1.0902537, -0.99238015, -0.96477475, -0.99488975,\n -1.1153494, -1.2408283, -1.3186252, np.nan, -1.1881273,\n -1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808,\n -1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413,\n -1.3512497, -1.3412115, -1.3989317, -1.4917862, -1.5419777,\n -1.5545256, np.nan, -1.4265371, -1.3964222, -1.4968054,\n -1.6097363, -1.64738, -1.5545256, -1.4265371, np.nan,\n -1.6423608, -1.6699662, -1.677495, -1.7176483, -1.7477632,\n -1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781,\n -1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791,\n -1.9672374, -1.913114, -1.7223509, np.nan, -1.5043341,\n -1.5444873, -1.486767, -1.1504836, -1.0626484, -1.11284,\n -1.2558858, -1.7452537, -2.3902152, -2.4378972, -2.3575907,\n -2.1467861, -2.2446597, -2.5527822, -2.5527822, -2.1919586,\n -1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836,\n -2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822,\n -2.5527822, -2.4570104, -2.4463632, -2.5527822]])\n\nbear_white_fur = np.array(\n [[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905,\n 2.7746897, 2.4052003, 2.2795237, 2.1639012, 1.847196,\n 2.0030351, 2.229253, np.nan, 1.8044659, 1.8974666,\n 2.0491517, 2.1326865, 2.3175294, 2.5112591, 2.9028799,\n 2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659],\n [-1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495,\n -1.8431272, -2.028836, -2.0363647, -1.9485295, -1.5043341,\n -1.3186252, -1.0902537, np.nan, -2.3902152, -2.5527822,\n -2.5527822, -2.4463632, -2.4570104, -2.5527822, -2.5527822,\n -2.2446597, -2.1467861, -2.3575907, -2.4378972, -2.3902152]])\n\nbear_face = np.array(\n [[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943,\n np.nan, 2.5258499, 2.5113971, 2.5327621, 2.5632387,\n 2.5780058, 2.5726645, 2.5475292, 2.5258499, np.nan,\n 2.2858075, 2.2704121, 2.2402497, 2.2283105, 2.2484187,\n 2.273554, 2.2858075],\n [-1.7605035, -1.9432811, -1.9707865, -1.9654629, -1.781798,\n np.nan, -1.4688862, -1.4942957, -1.5099806, -1.5112354,\n -1.4877081, -1.466063, -1.4588479, -1.4688862, np.nan,\n -1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194,\n -1.4083427, -1.4346933]])\n"
] | [
[
"matplotlib.use",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots",
"numpy.zeros_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
schnur/oss-repo-template | [
"d9e3ea7cae43dd1dd1ff7acef8b1249f3a95a848"
] | [
"labs/lab-06/example1.py"
] | [
"import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nG = nx.gnp_random_graph(100, 0.02, seed=10374196)\n\ndegree_sequence = sorted((d for n, d in G.degree()), reverse=True)\ndmax = max(degree_sequence)\n\nfig = plt.figure(\"Degree of a random graph\", figsize=(8, 8))\n# Create a gridspec for adding subplots of different sizes\naxgrid = fig.add_gridspec(5, 4)\n\nax0 = fig.add_subplot(axgrid[0:3, :])\nGcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])\npos = nx.spring_layout(Gcc, seed=10396953)\nnx.draw_networkx_nodes(Gcc, pos, ax=ax0, node_size=20)\nnx.draw_networkx_edges(Gcc, pos, ax=ax0, alpha=0.4)\nax0.set_title(\"Connected components of G\")\nax0.set_axis_off()\n\nax1 = fig.add_subplot(axgrid[3:, :2])\nax1.plot(degree_sequence, \"b-\", marker=\"o\")\nax1.set_title(\"Degree Rank Plot\")\nax1.set_ylabel(\"Degree\")\nax1.set_xlabel(\"Rank\")\n\nax2 = fig.add_subplot(axgrid[3:, 2:])\nax2.bar(*np.unique(degree_sequence, return_counts=True))\nax2.set_title(\"Degree histogram\")\nax2.set_xlabel(\"Degree\")\nax2.set_ylabel(\"# of Nodes\")\n\nfig.tight_layout()\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"numpy.unique",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SimonBoothroyd/surrogates | [
"a53bee444b4587d0290e8dcacd8be3ff6011fd02"
] | [
"studies/mcmc/gaussian/run_3d.py"
] | [
"import numpy\nfrom matplotlib import pyplot\n\nfrom surrogates.kernels import MCMCSimulation\nfrom surrogates.kernels.samplers.hmc import Hamiltonian\nfrom surrogates.models.simple import UnconditionedModel\nfrom surrogates.utils.distributions import Normal\nfrom surrogates.utils.file import change_directory\nfrom surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace\n\n\ndef main():\n\n std_devs = {\"a\": 0.05, \"b\": 50.0, \"c\": 5000.0}\n\n priors = {\n \"a\": Normal(numpy.array([0.0]), numpy.array([std_devs[\"a\"]])),\n \"b\": Normal(numpy.array([100.0]), numpy.array([std_devs[\"b\"]])),\n \"c\": Normal(numpy.array([0.0]), numpy.array([std_devs[\"c\"]])),\n }\n model = UnconditionedModel(priors)\n\n # Construct and run the simulation object.\n initial_parameters = {\n \"a\": numpy.array([0.0]),\n \"b\": numpy.array([0.0]),\n \"c\": numpy.array([0.0]),\n }\n\n # Setup the sampler\n sampler = Hamiltonian(\n model,\n momentum_scales={\n \"a\": numpy.array([1.0 / std_devs[\"a\"]]),\n \"b\": numpy.array([1.0 / std_devs[\"b\"]]),\n \"c\": numpy.array([1.0 / std_devs[\"c\"]]),\n },\n step_size=1.0,\n n_steps=10,\n )\n\n # Run the simulation.\n with change_directory(\"3d_univariate\"):\n\n simulation = MCMCSimulation(\n model, initial_parameters, sampler=sampler, random_seed=42\n )\n simulation.run(2000, 20000)\n\n # Plot the output.\n trace_figure = plot_trace(simulation.trace, show=False)\n trace_figure.savefig(\"trace.png\")\n pyplot.close(trace_figure)\n\n corner_figure = plot_corner(\n simulation.trace, model.trainable_parameters, show=False\n )\n corner_figure.savefig(\"corner.png\")\n pyplot.close(corner_figure)\n\n log_p_figure = plot_log_p(simulation.log_p_trace, show=False)\n log_p_figure.savefig(\"log_p.png\")\n pyplot.close(log_p_figure)\n\n for label in std_devs:\n\n print(\n f\"{label}: std estimated={numpy.std(simulation.trace[label])} \"\n f\"real={std_devs[label]}\"\n )\n print(\n f\"{label}: mean estimated={numpy.mean(simulation.trace[label])} real={0.0}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.std",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
terryli710/SIIM-ACR-Pneumothorax-Classification | [
"8b278a9885b71c919d7064b2df42863b53f7adf3"
] | [
"transfer_learning_discussion.py"
] | [
"# Adapted from .ipynb file\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom imblearn.under_sampling import RandomUnderSampler\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import confusion_matrix\nfrom tensorflow.keras.applications import VGG16, InceptionV3\nfrom tensorflow.keras import layers\nfrom keras.losses import binary_crossentropy\nimport matplotlib.pyplot as plt\nfrom util import getXY, dicom2df, flattenimg, lossCurve\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\nfrom tensorflow.python.client import device_lib\n\n#%%\n\ntf.config.experimental.list_physical_devices('GPU')\n\n# Loading data\nrle_df = pd.read_csv('train-rle.csv')\nrle_df.columns = ['ImageId', 'EncodedPixels']\n\n#%%\n\ntrain_file_list = sorted(glob('dicom-images-train/*/*/*.dcm'))\nmetadata_df = dicom2df(train_file_list, rle_df)\n\n#%%\n\n# x, y = getXY(metadata_df, verbose=True)\n\n#%%\n\n# A smaller data set?\nX, Y = getXY(metadata_df, verbose=True)\n# x_test, y_test = getXY(metadata_df.iloc[5000:5500], verbose=True)\nx_train, x_test, y_train, y_test = train_test_split(X, Y, stratify=Y, test_size=0.1, random_state=9001)\n\n#%%\n# Preprocessing input\nX_processed = preprocess_input(x_train)\nx_test_processed = preprocess_input(x_test)\n\n\n#%%\n#balanced dataset\nindex = np.arange(0, x_train.shape[0], 1)\nrus = RandomUnderSampler(random_state=9001)\ni_rus, y_rus = rus.fit_resample(np.expand_dims(index, axis=-1), y_train)\nx_rus = x_train[i_rus[:,0]]\nprint('Balanced data set: positive cases {}; negative cases {}'.format(np.sum(y_rus==1), np.sum(y_rus==0)))\n#%%\n\ny_train = OneHotEncoder().fit_transform(y_rus.reshape(-1,1)).toarray()\ny_test = OneHotEncoder().fit_transform(y_test.reshape(-1,1)).toarray()\n\n#%%\nkr = keras.regularizers.l1_l2(0.01, 0.01)\nbase_model = VGG16(include_top=False, weights='imagenet',\n input_shape=(224, 224, 3))\nbase_model.trainable = False\ninputs = tf.keras.Input(shape=(224, 224, 3))\nx = base_model(inputs)\nx = tf.keras.layers.Flatten()(x)\nx = tf.keras.layers.Dropout(0.2)(x)\nx = tf.keras.layers.Dense(256, activation='relu', kernel_regularizer=kr)(x)\nx = tf.keras.layers.Dropout(0.2)(x)\nx = tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=kr)(x)\noutputs = tf.keras.layers.Dense(2, activation='softmax')(x)\nmodel = keras.Model(inputs, outputs)\n\n#%%\n\nfor l in model.layers: print(l.name, l.trainable)\nmodel.summary()\n\n#%%\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),loss='categorical_crossentropy', metrics=[\"accuracy\", \"AUC\"])\n\n#%%\n\nhistory = model.fit(x_rus, y_train, batch_size=32, epochs=50, shuffle=True, validation_split=0.1)\n#%%\n\nlossCurve(history)\ny_pred = model.predict(x_test)\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(score)\ncm = confusion_matrix(y_test[:,0]==1, y_pred[:,0]>0.5)\ncm\nplt.matshow(cm)\n\n\n#%%\nbase_model = InceptionV3(include_top=False, weights=\"imagenet\", input_shape=(224, 224,3))\n\nbase_model.trainable = False\n#for l in base_model.layers:\n# l.trainable = False\n#base_model.layers[-2].trainable = True\n#base_model.layers[-3].trainable = True\n#base_model.layers[-4].trainable = True\n\n\ninputs = tf.keras.Input(shape=(224, 224, 3))\nx = base_model(inputs)\nx = tf.keras.layers.Flatten()(x)\n#x = tf.keras.layers.Dense(64, activation='relu')(x)\n#x = tf.keras.layers.Dropout(0.2)(x)\n#x = tf.keras.layers.Dense(32, activation='relu')(x)\n#x = tf.keras.layers.Dropout(0.2)(x)\noutputs = tf.keras.layers.Dense(2, activation=tf.nn.sigmoid)(x)\nmodel = keras.Model(inputs, outputs)\n\nmodel.summary()\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),loss='categorical_crossentropy',metrics=[\"accuracy\", \"AUC\"])\n\nhistory_2 = model.fit(x_rus, y_train, batch_size=32, epochs=20, shuffle=True, validation_split=0.15)\n\n#%%\n\nlossCurve(history_2)\ny_pred = model.predict(x_rus)\nscore = model.evaluate(x_test, y_test, verbose=0)\n# print(score[0],score[1])\ncm = confusion_matrix(y_train[:,0]==1, y_pred[:,0]>0.5)\ncm\nplt.matshow(cm)"
] | [
[
"numpy.expand_dims",
"tensorflow.keras.applications.InceptionV3",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.matshow",
"tensorflow.keras.regularizers.l1_l2",
"pandas.read_csv",
"tensorflow.keras.Input",
"numpy.arange",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.config.experimental.list_physical_devices",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.Model",
"tensorflow.keras.applications.VGG16",
"numpy.sum",
"sklearn.preprocessing.OneHotEncoder",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.applications.vgg16.preprocess_input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
amichalski2/WBC-SHAP | [
"b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0"
] | [
"scripts/data_extract.py"
] | [
"import os\nimport cv2\nimport random\nimport numpy as np\nfrom tensorflow.keras.utils import to_categorical\nfrom scripts.consts import class_dict\n\n\ndef get_data(path, split=0.2):\n\n X, y = [], []\n\n for directory in os.listdir(path):\n\n dirpath = os.path.join(path, directory)\n print(directory, len(os.listdir(dirpath)))\n\n for file in os.listdir(dirpath):\n\n filepath = os.path.join(dirpath, file)\n img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)\n \n if img.shape != (360, 363, 3):\n img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)\n\n X.append(img)\n y.append(class_dict[directory])\n\n data = list(zip(X, y))\n\n random.shuffle(data)\n\n X, y = zip(*data)\n\n num_train = int((1.0 - split) * len(y))\n\n X_train, X_valid = np.array(X[:num_train]).astype(\n 'float32'), np.array(X[num_train:]).astype('float32')\n y_train, y_valid = np.array(\n y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))\n\n X_train = X_train / 255.0\n X_valid = X_valid / 255.0\n\n y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)\n\n print(X_train.shape, y_train.shape)\n print(X_valid.shape, y_valid.shape)\n\n return X_train, y_train, X_valid, y_valid\n"
] | [
[
"numpy.array",
"tensorflow.keras.utils.to_categorical"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Virinas-code/GobyChess | [
"dc6129a4d5a5e061714714402d9cd472efc599f8"
] | [
"gobychess/train.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nTry to train evaluation in supervised fashion with engineered loss function\n\"\"\"\n\nimport sys\n\nimport chess\nimport h5py\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.math import log, sigmoid, pow\n\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(100, activation=tf.nn.relu, input_shape=(768,)), # input shape required\n tf.keras.layers.Dense(50, activation=tf.nn.relu),\n tf.keras.layers.Dense(1)\n])\n\nmodel.summary()\n\nf_data = h5py.File('data/data.h5', 'r')\ndset_data = f_data['features']\n\nf_meta = h5py.File('data/meta.h5', 'r')\ndset_meta = f_meta['features']\n\nf_val_data = h5py.File('data/test_data.h5', 'r')\ndset_val_data = f_val_data['features']\n\nf_val_eval = h5py.File('data/test_eval.h5', 'r')\ndset_val_eval = f_val_eval['features']\n\nbatch_size = 32\nnum_batches = dset_data.shape[0] // batch_size\ntraining_samples = num_batches * batch_size\nkappa = 10\n\ndef loss(pmodel, pposition, pnext_position, prandom_position, last, result, to_move, training):\n y_position = pmodel(pposition, training=training)\n y_next_position = pmodel(pnext_position, training=training)\n y_random_position = pmodel(prandom_position, training=training)\n last = tf.cast(last, dtype=bool)\n\n y_next_position = tf.where(tf.reshape(last, [32,1]),\n tf.reshape(result, [32,1]),\n tf.reshape(y_next_position, [32, 1]))\n\n return -(tf.reduce_mean(log(sigmoid(tf.cast(tf.reshape(tf.math.pow(-1, to_move), [32, 1]), dtype=tf.float32) * (y_random_position - y_next_position)))\n + kappa * log(sigmoid(- y_position + y_next_position))\n + kappa * log(sigmoid(y_position - y_next_position))))\n\n\ndef grad(pmodel, pposition, pnext_position, prandom_position, last, result, to_move):\n with tf.GradientTape() as tape:\n loss_val = loss(pmodel, pposition, pnext_position, prandom_position, last, result, to_move, training=True)\n return loss_val, tape.gradient(loss_val, pmodel.trainable_variables)\n\n\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n\n\n# # Keep results for plotting\ntrain_loss_results = []\ntrain_accuracy_results = []\n\nnum_epochs = 30\n\nfval = h5py.File('data/test.h5', 'r')\ndset_val = fval['features']\n\nfor epoch in range(num_epochs):\n epoch_loss_avg = tf.keras.metrics.Mean()\n\n for i in range(num_batches):\n\n print(f\"Batch: {i}\", end=\"\\r\")\n position = np.reshape(dset_data[i:i+batch_size, 0, :, :], (batch_size, 768))\n next_position = np.reshape(dset_data[i:i+batch_size, 1, :, :], (batch_size, 768))\n random_position = np.reshape(dset_data[i:i+batch_size, 2, :, :], (batch_size, 768))\n last = dset_meta[i:i+batch_size, 0]\n result = dset_meta[i:i+batch_size, 1]\n to_move = dset_meta[i:i+batch_size, 2]\n\n loss_value, grads = grad(model, position, next_position, random_position, last, result, to_move)\n\n\n epoch_loss_avg.update_state(loss_value) # Add current batch loss\n\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n #print(f\"Trained {num_game} games\", end=\"\\r\")\n\n # End epoch\n train_loss_results.append(epoch_loss_avg.result())\n\n if epoch % 1 == 0:\n test_pos_0 = model(np.reshape(dset_val_data[1], (1, 768)))\n test_pos_1 = model(np.reshape(dset_val_data[8], (1, 768)))\n test_pos_2 = model(np.reshape(dset_val_data[10], (1, 768)))\n mse = tf.reduce_mean(tf.math.pow(model(np.reshape(dset_val_data, (dset_val_data[:].shape[0], 768))) - dset_val_eval[:], 2))\n print(\"Epoch {:03d}: Loss: {:.3f}: mse: {}, Test Pos. 0: {}, Test Pos. -1: {}, Test Pos. +1: {}\".format(epoch, epoch_loss_avg.result(), mse,\n test_pos_0, test_pos_1, test_pos_2))\n\n\n"
] | [
[
"numpy.reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.math.sigmoid",
"tensorflow.GradientTape",
"tensorflow.math.pow",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.optimizers.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
xu-hong-/scipy | [
"f737001cf0a75654efe09a1de5cdf5d1895bda59"
] | [
"scipy/spatial/tests/test_spherical_voronoi.py"
] | [
"from __future__ import print_function\nimport numpy as np\nfrom numpy.testing import (TestCase,\n assert_almost_equal,\n assert_array_equal,\n assert_array_almost_equal)\nfrom scipy.spatial import SphericalVoronoi, distance\nfrom scipy.spatial import _spherical_voronoi as spherical_voronoi\n\n\nclass TestCircumcenters(TestCase):\n\n def test_circumcenters(self):\n tetrahedrons = np.array([\n [[1, 2, 3],\n [-1.1, -2.1, -3.1],\n [-1.2, 2.2, 3.2],\n [-1.3, -2.3, 3.3]],\n [[10, 20, 30],\n [-10.1, -20.1, -30.1],\n [-10.2, 20.2, 30.2],\n [-10.3, -20.3, 30.3]]\n ])\n\n result = spherical_voronoi.calc_circumcenters(tetrahedrons)\n\n expected = [\n [-0.5680861153262529, -0.133279590288315, 0.1843323216995444],\n [-0.5965330784014926, -0.1480377040397778, 0.1981967854886021]\n ]\n\n assert_array_almost_equal(result, expected)\n\n\nclass TestProjectToSphere(TestCase):\n\n def test_unit_sphere(self):\n points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n center = np.array([0, 0, 0])\n radius = 1\n projected = spherical_voronoi.project_to_sphere(points, center, radius)\n assert_array_almost_equal(points, projected)\n\n def test_scaled_points(self):\n points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n center = np.array([0, 0, 0])\n radius = 1\n scaled = points * 2\n projected = spherical_voronoi.project_to_sphere(scaled, center, radius)\n assert_array_almost_equal(points, projected)\n\n def test_translated_sphere(self):\n points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n center = np.array([1, 2, 3])\n translated = points + center\n radius = 1\n projected = spherical_voronoi.project_to_sphere(translated, center,\n radius)\n assert_array_almost_equal(translated, projected)\n\n\nclass TestSphericalVoronoi(TestCase):\n\n def setUp(self):\n self.points = np.array([\n [-0.78928481, -0.16341094, 0.59188373],\n [-0.66839141, 0.73309634, 0.12578818],\n [0.32535778, -0.92476944, -0.19734181],\n [-0.90177102, -0.03785291, -0.43055335],\n [0.71781344, 0.68428936, 0.12842096],\n [-0.96064876, 0.23492353, -0.14820556],\n [0.73181537, -0.22025898, -0.6449281],\n [0.79979205, 0.54555747, 0.25039913]]\n )\n\n def test_constructor(self):\n center = np.array([1, 2, 3])\n radius = 2\n s1 = SphericalVoronoi(self.points)\n s2 = SphericalVoronoi(self.points, radius)\n s3 = SphericalVoronoi(self.points, None, center)\n s4 = SphericalVoronoi(self.points, radius, center)\n assert_array_equal(s1.center, np.array([0, 0, 0]))\n self.assertEqual(s1.radius, 1)\n assert_array_equal(s2.center, np.array([0, 0, 0]))\n self.assertEqual(s2.radius, 2)\n assert_array_equal(s3.center, center)\n self.assertEqual(s3.radius, 1)\n assert_array_equal(s4.center, center)\n self.assertEqual(s4.radius, radius)\n\n def test_vertices_regions_translation_invariance(self):\n sv_origin = SphericalVoronoi(self.points)\n center = np.array([1, 1, 1])\n sv_translated = SphericalVoronoi(self.points + center, None, center)\n assert_array_equal(sv_origin.regions, sv_translated.regions)\n assert_array_almost_equal(sv_origin.vertices + center,\n sv_translated.vertices)\n\n def test_vertices_regions_scaling_invariance(self):\n sv_unit = SphericalVoronoi(self.points)\n sv_scaled = SphericalVoronoi(self.points * 2, 2)\n assert_array_equal(sv_unit.regions, sv_scaled.regions)\n assert_array_almost_equal(sv_unit.vertices * 2,\n sv_scaled.vertices)\n\n def test_sort_vertices_of_regions(self):\n sv = SphericalVoronoi(self.points)\n unsorted_regions = sv.regions\n sv.sort_vertices_of_regions()\n assert_array_equal(sorted(sv.regions), sorted(unsorted_regions))\n\n def test_voronoi_circles(self):\n sv = spherical_voronoi.SphericalVoronoi(self.points)\n for vertex in sv.vertices:\n distances = distance.cdist(sv.points,np.array([vertex]))\n closest = np.array(sorted(distances)[0:3])\n assert_almost_equal(closest[0], closest[1], 7, str(vertex))\n assert_almost_equal(closest[0], closest[2], 7, str(vertex))\n"
] | [
[
"scipy.spatial._spherical_voronoi.project_to_sphere",
"numpy.testing.assert_array_equal",
"scipy.spatial._spherical_voronoi.SphericalVoronoi",
"scipy.spatial._spherical_voronoi.calc_circumcenters",
"numpy.array",
"scipy.spatial.SphericalVoronoi",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
WrathTitan/twentyone | [
"22665214b2e7699d9c785b22174bdab8992cbb7d"
] | [
"Files/timeseries.py"
] | [
"from pmdarima import auto_arima\n#from fbprophet import Prophet\nimport json\n#from fbprophet.serialize import model_to_json, model_from_json\nimport os\nimport yaml\nfrom yaml.loader import FullLoader\nimport plotly\nimport pandas as pd\nimport plotly.express as ex\nimport shutil\n#import kaleido\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport pickle\n\nfrom Files.metrics import Metrics as met\nclass timeseries:\n def createprophet(self,dataconfig):\n with open(dataconfig) as f:\n dataconfigfile= yaml.load(f,Loader=FullLoader)\n data=dataconfig[\"data\"]\n location=dataconfig[\"location\"]\n model=Prophet()\n testsize=int(len(data)*0.2)\n train=data.iloc[:-testsize]\n test=data.iloc[-testsize:]\n model.fit(train)\n pred=model.predict(test)\n pred=pred.yhat\n actual=test.y\n\n metrics=met.calculate_metrics(\"fbprophet\",\"Regression\",pred,actual)\n metricsLocation=os.path.join(dataconfigfile[\"location\"],\"metrics.csv\")\n metrics.to_csv(metricsLocation, index=True)\n\n compare=pd.DataFrame(pred.values,columns=['predictions'])\n compare['actual']=actual.values\n print(compare)\n fig=compare.plot(legend=True)\n plotly.offline.plot(fig,filename=os.path.join(location,\"fbtestvspred.html\"))\n\n modelfinal=Prophet()\n modelfinal.fit(data)\n location=\"serialized_model.json\"\n location=os.path.join(dataconfigfile['location'],str(dataconfigfile['projectname'])+str('fb'))\n with open(location, 'w') as fout: #save the model\n json.dump(model_to_json(modelfinal), fout)\n return location\n\n def fbinference(self,location,number):\n with open(location, 'r') as fin:\n model = model_from_json(json.load(fin))\n future=model.make_future_dataframe(periods=number)\n pred=model.predict(future)\n return pred\n\n def createarima(self,dataconfig):\n with open(dataconfig) as f:\n dataconfigfile= yaml.load(f,Loader=FullLoader)\n metrics=pd.DataFrame(columns=['modelname','mean_absolute_error','mean_squared_error','r2_score','mean_squared_log_error'])\n \n data=pd.read_csv(dataconfigfile[\"clean_data_address\"])\n location=dataconfigfile[\"location\"]\n testsize=int(len(data)*0.2)\n train=data.iloc[:-testsize]\n test=data.iloc[-testsize:]\n model = auto_arima(train['y'],trace=True, seasonal=True) \n testpred=model.predict(testsize)\n testactual=test.y\n\n metrics_new_row=met.calculate_metrics(\"arima\",\"Regression\",testpred,testactual)\n metricsLocation=os.path.join(dataconfigfile[\"location\"],\"metrics.csv\")\n metrics.loc[len(metrics.index)]=metrics_new_row\n metrics.to_csv(metricsLocation, index=True)\n compare=pd.DataFrame(testpred,columns=['predictions'])\n compare['actual']=testactual.values\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=compare.index,y=compare.actual,name=\"actual\"))\n\n fig.add_trace(go.Scatter(x=compare.index,y=compare.predictions,name=\"predictions\"))\n \n \n plotlocation=dataconfigfile['location']\n fig.write_html(os.path.join(plotlocation,\"plot.html\"))\n plotlocation=os.path.join(plotlocation,\"plot.html\")\n\n modelfinal=auto_arima(data['y'], trace=True,suppress_warnings=True, seasonal=True)\n location=os.path.join(dataconfigfile[\"location\"],str(dataconfigfile[\"id\"])+\"_model\")\n os.makedirs(location)\n name=str(dataconfigfile[\"experimentname\"])+str(dataconfigfile[\"id\"])+\"_model\"\n # modelfinal.save(name)\n with open(name, 'wb') as pkl:\n pickle.dump(modelfinal, pkl)\n\n shutil.move(name,location)\n\n pickleFilePath =os.path.join(location,name)\n \n return {\"Successful\": True, \"cleanDataPath\": dataconfigfile[\"clean_data_address\"], \"metricsLocation\":metricsLocation, \"pickleFolderPath\":location, \"pickleFilePath\":pickleFilePath, \"plotLocation\":plotlocation}\n "
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
1170300521/RCCF | [
"561b567350c6d172402166b67fe1891f8ec127c8"
] | [
"lib/datasets/sample/refdet.py"
] | [
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport torch.utils.data as data\r\nimport numpy as np\r\nimport torch\r\nimport json\r\nimport cv2\r\nimport os\r\nfrom utils.image import flip, color_aug\r\nfrom utils.image import get_affine_transform, affine_transform\r\nfrom utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\r\nfrom utils.image import draw_dense_reg\r\nimport math\r\n\r\nclass RefDetDataset(data.Dataset):\r\n def _coco_box_to_bbox(self, box):\r\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\r\n dtype=np.float32)\r\n return bbox\r\n\r\n def _get_border(self, border, size):\r\n i = 1\r\n while size - border // i <= border // i:\r\n i *= 2\r\n return border // i\r\n\r\n def __getitem__(self, index):\r\n sent_id = self.sent_ids_split[index]\r\n #print(sent_id)\r\n sent_label = self.data_h5['labels'][sent_id]\r\n ref = self.sentToRef[sent_id]\r\n img_id = ref['image_id']\r\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\r\n img_path = os.path.join(self.img_dir, file_name)\r\n ann_id = ref['ann_id']\r\n box = ref['box']\r\n img = cv2.imread(img_path)\r\n\r\n height, width = img.shape[0], img.shape[1]\r\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\r\n if self.opt.keep_res:\r\n input_h = (height | self.opt.pad) + 1\r\n input_w = (width | self.opt.pad) + 1\r\n s = np.array([input_w, input_h], dtype=np.float32)\r\n else:\r\n s = max(img.shape[0], img.shape[1]) * 1.0\r\n input_h, input_w = self.opt.input_h, self.opt.input_w\r\n \r\n # flipped = False\r\n # if self.split == 'train':\r\n # if not self.opt.not_rand_crop:\r\n # s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))\r\n # w_border = self._get_border(128, img.shape[1])\r\n # h_border = self._get_border(128, img.shape[0])\r\n # c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)\r\n # c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)\r\n # else:\r\n # sf = self.opt.scale\r\n # cf = self.opt.shift\r\n # c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\r\n # c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\r\n # s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\r\n \r\n # if np.random.random() < self.opt.flip:\r\n # flipped = True\r\n # img = img[:, ::-1, :]\r\n # c[0] = width - c[0] - 1\r\n \r\n\r\n trans_input = get_affine_transform(\r\n c, s, 0, [input_w, input_h])\r\n inp = cv2.warpAffine(img, trans_input, \r\n (input_w, input_h),\r\n flags=cv2.INTER_LINEAR)\r\n inp = (inp.astype(np.float32) / 255.)\r\n if self.split == 'train' and not self.opt.no_color_aug:\r\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\r\n inp = (inp - self.mean) / self.std\r\n inp = inp.transpose(2, 0, 1)\r\n\r\n output_h = input_h // self.opt.down_ratio\r\n output_w = input_w // self.opt.down_ratio\r\n num_classes = 1 # RCCF center point of subject\r\n# num_classes = self.num_classes\r\n trans_output = get_affine_transform(c, s, 0, [output_w, output_h])\r\n\r\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\r\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\r\n dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)\r\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\r\n ind = np.zeros((self.max_objs), dtype=np.int64)\r\n reg_mask = np.zeros((self.max_objs), dtype=np.uint8)\r\n # cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)\r\n # cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)\r\n \r\n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \\\r\n draw_umich_gaussian\r\n\r\n gt_det = []\r\n gt_bbox = box.copy()\r\n bbox = self._coco_box_to_bbox(box)\r\n # cls_id = int(self.cat_ids[ann['category_id']])\r\n # if flipped:\r\n #bbox[[0, 2]] = width - bbox[[2, 0]] - 1\r\n bbox[:2] = affine_transform(bbox[:2], trans_output)\r\n bbox[2:] = affine_transform(bbox[2:], trans_output)\r\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\r\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\r\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\r\n if h > 0 and w > 0:\r\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\r\n radius = max(0, int(radius))\r\n radius = self.opt.hm_gauss if self.opt.mse_loss else radius\r\n ct = np.array(\r\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\r\n ct_int = ct.astype(np.int32)\r\n draw_gaussian(hm[0], ct_int, radius)\r\n wh[0] = 1. * w, 1. * h\r\n ind[0] = ct_int[1] * output_w + ct_int[0]\r\n reg[0] = ct - ct_int\r\n reg_mask[0] = 1\r\n # cat_spec_wh[0, cls_id * 2: cls_id * 2 + 2] = wh[0]\r\n # cat_spec_mask[0, cls_id * 2: cls_id * 2 + 2] = 1\r\n if self.opt.dense_wh:\r\n draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[0], radius)\r\n gt_det.append([ct[0] - w / 2, ct[1] - h / 2, \r\n ct[0] + w / 2, ct[1] + h / 2, 1, 0])\r\n # for k in range(num_objs):\r\n # ann = anns[k]\r\n # bbox = self._coco_box_to_bbox(ann['bbox'])\r\n # cls_id = int(self.cat_ids[ann['category_id']])\r\n # if flipped:\r\n # bbox[[0, 2]] = width - bbox[[2, 0]] - 1\r\n # bbox[:2] = affine_transform(bbox[:2], trans_output)\r\n # bbox[2:] = affine_transform(bbox[2:], trans_output)\r\n # bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\r\n # bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\r\n # h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\r\n # if h > 0 and w > 0:\r\n # radius = gaussian_radius((math.ceil(h), math.ceil(w)))\r\n # radius = max(0, int(radius))\r\n # radius = self.opt.hm_gauss if self.opt.mse_loss else radius\r\n # ct = np.array(\r\n # [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\r\n # ct_int = ct.astype(np.int32)\r\n # draw_gaussian(hm[cls_id], ct_int, radius)\r\n # wh[k] = 1. * w, 1. * h\r\n # ind[k] = ct_int[1] * output_w + ct_int[0]\r\n # reg[k] = ct - ct_int\r\n # reg_mask[k] = 1\r\n # cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]\r\n # cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1\r\n # if self.opt.dense_wh:\r\n # draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)\r\n # gt_det.append([ct[0] - w / 2, ct[1] - h / 2, \r\n # ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])\r\n \r\n ret = {'input': inp, 'sentence': sent_label, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}\r\n if self.opt.dense_wh:\r\n hm_a = hm.max(axis=0, keepdims=True)\r\n dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)\r\n ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})\r\n del ret['wh']\r\n # elif self.opt.cat_spec_wh:\r\n # ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})\r\n # del ret['wh']\r\n if self.opt.reg_offset:\r\n ret.update({'reg': reg})\r\n# if self.opt.debug > 0 or not self.split == 'train':\r\n if True:\r\n gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \\\r\n np.zeros((1, 6), dtype=np.float32)\r\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}\r\n ret['meta'] = meta\r\n if self.opt.use_aux:\r\n ret['objects'] = self.getitem_obj(img_id, sent_id)\r\n return ret\r\n\r\n\r\n def getitem_obj(self, img_id, sent_id):\r\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\r\n img_path = os.path.join(self.img_dir, file_name)\r\n# ann_ids = self.coco.getAnnIds(imgIds=[img_id])\r\n ann_ids = self.imgToObj[str(sent_id)]\r\n anns = self.coco.loadAnns(ids=ann_ids)\r\n num_objs = min(len(anns), self.max_objs)\r\n\r\n img = cv2.imread(img_path)\r\n\r\n height, width = img.shape[0], img.shape[1]\r\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\r\n if self.opt.keep_res:\r\n input_h = (height | self.opt.pad) + 1\r\n input_w = (width | self.opt.pad) + 1\r\n s = np.array([input_w, input_h], dtype=np.float32)\r\n else:\r\n s = max(img.shape[0], img.shape[1]) * 1.0\r\n input_h, input_w = self.opt.input_h, self.opt.input_w\r\n \r\n\r\n trans_input = get_affine_transform(\r\n c, s, 0, [input_w, input_h])\r\n inp = cv2.warpAffine(img, trans_input, \r\n (input_w, input_h),\r\n flags=cv2.INTER_LINEAR)\r\n inp = (inp.astype(np.float32) / 255.)\r\n if self.split == 'train' and not self.opt.no_color_aug:\r\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\r\n inp = (inp - self.mean) / self.std\r\n inp = inp.transpose(2, 0, 1)\r\n\r\n output_h = input_h // self.opt.down_ratio\r\n output_w = input_w // self.opt.down_ratio\r\n num_classes = self.num_classes\r\n trans_output = get_affine_transform(c, s, 0, [output_w, output_h])\r\n\r\n hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)\r\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\r\n dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)\r\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\r\n ind = np.zeros((self.max_objs), dtype=np.int64)\r\n reg_mask = np.zeros((self.max_objs), dtype=np.uint8)\r\n cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)\r\n cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)\r\n \r\n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \\\r\n draw_umich_gaussian\r\n\r\n gt_det = []\r\n for k in range(num_objs):\r\n ann = anns[k]\r\n bbox = self._coco_box_to_bbox(ann['bbox'])\r\n cls_id = int(self.cat_ids[ann['category_id']])\r\n# if flipped:\r\n# bbox[[0, 2]] = width - bbox[[2, 0]] - 1\r\n bbox[:2] = affine_transform(bbox[:2], trans_output)\r\n bbox[2:] = affine_transform(bbox[2:], trans_output)\r\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)\r\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\r\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\r\n if h > 0 and w > 0:\r\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\r\n radius = max(0, int(radius))\r\n radius = self.opt.hm_gauss if self.opt.mse_loss else radius\r\n ct = np.array(\r\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\r\n ct_int = ct.astype(np.int32)\r\n draw_gaussian(hm[cls_id], ct_int, radius)\r\n wh[k] = 1. * w, 1. * h\r\n ind[k] = ct_int[1] * output_w + ct_int[0]\r\n reg[k] = ct - ct_int\r\n reg_mask[k] = 1\r\n cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]\r\n cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1\r\n if self.opt.dense_wh:\r\n draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)\r\n gt_det.append([ct[0] - w / 2, ct[1] - h / 2, \r\n ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])\r\n \r\n ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}\r\n if self.opt.dense_wh:\r\n hm_a = hm.max(axis=0, keepdims=True)\r\n dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)\r\n ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})\r\n del ret['wh']\r\n elif self.opt.cat_spec_wh:\r\n ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})\r\n del ret['wh']\r\n if self.opt.reg_offset:\r\n ret.update({'reg': reg})\r\n if self.opt.debug > 0 or not self.split == 'train':\r\n gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \\\r\n np.zeros((1, 6), dtype=np.float32)\r\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}\r\n ret['meta'] = meta\r\n return ret\r\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mohanksriram/acme | [
"d6e102156dca77e0001f262d255ca01bceff10ea"
] | [
"examples/control/eval_lift_dmpo.py"
] | [
"# Include all the imports here\nfrom typing import Dict, Sequence\n\nfrom absl import app\nfrom absl import flags\nimport acme\nfrom acme import specs\nfrom acme import types\nfrom acme import wrappers\nfrom acme.agents.tf import dmpo\nfrom acme.tf import networks\nfrom acme.tf import utils as tf2_utils\nimport tensorflow as tf\n\nfrom datetime import datetime\nimport imageio\nimport numpy as np\nimport sonnet as snt\n\nfrom examples.offline import bc_robo_utils\n\nimport robosuite as suite\nfrom robosuite.wrappers import GymWrapper\nfrom robosuite.controllers import load_controller_config\n\nflags.DEFINE_integer('num_episodes', 100, 'Number of episodes to run for.')\nFLAGS = flags.FLAGS\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ndemo_path = \"/home/mohan/research/experiments/bc/panda_lift/expert_demonstrations/1622106811_9832993/demo.hdf5\"\n\n\ndef make_environment(env_config, controller_config, keys):\n env_suite = suite.make(**env_config,\n has_renderer=False,\n has_offscreen_renderer=False,\n use_camera_obs=False,\n reward_shaping=True,\n controller_configs=controller_config,\n )\n env = GymWrapper(env_suite, keys=keys)\n env = wrappers.gym_wrapper.GymWrapper(env)\n env = wrappers.SinglePrecisionWrapper(env)\n \n spec = specs.make_environment_spec(env)\n \n return env, spec\n\n\n\n\n# Prepare the agent\n\ndef make_networks(\n action_spec: specs.BoundedArray,\n policy_layer_sizes: Sequence[int] = (256, 256, 256),\n critic_layer_sizes: Sequence[int] = (512, 512, 256),\n vmin: float = -500.,\n vmax: float = 500.,\n num_atoms: int = 51,\n) -> Dict[str, types.TensorTransformation]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n\n # Create the shared observation network; here simply a state-less operation.\n observation_network = tf2_utils.batch_concat\n\n # Create the policy network.\n policy_network = snt.Sequential([\n networks.LayerNormMLP(policy_layer_sizes),\n networks.MultivariateNormalDiagHead(num_dimensions)\n ])\n\n # The multiplexer transforms concatenates the observations/actions.\n multiplexer = networks.CriticMultiplexer(\n critic_network=networks.LayerNormMLP(critic_layer_sizes),\n action_network=networks.ClipToSpec(action_spec))\n\n # Create the critic network.\n critic_network = snt.Sequential([\n multiplexer,\n networks.DiscreteValuedHead(vmin, vmax, num_atoms),\n ])\n\n return {\n 'policy': policy_network,\n 'critic': critic_network,\n 'observation': observation_network,\n }\n\ndef main(_):\n # Prepare the environment\n env_config = {\n \"control_freq\": 20,\n \"env_name\": \"Lift\",\n \"hard_reset\": False,\n \"horizon\": 500,\n \"ignore_done\": False,\n \"reward_scale\": 1.0,\n \"camera_names\": \"frontview\",\n \"robots\": [\n \"Panda\"\n ]\n }\n controller_config = load_controller_config(default_controller=\"OSC_POSE\")\n\n keys = [\"object-state\"]\n for idx in range(1):\n keys.append(f\"robot{idx}_proprio-state\")\n\n env, spec = make_environment(env_config, controller_config, keys)\n\n agent_networks = make_networks(spec.actions)\n\n # construct the agent\n agent = dmpo.DistributionalMPO(\n environment_spec=spec,\n checkpoint=True,\n policy_network=agent_networks['policy'],\n critic_network=agent_networks['critic'],\n observation_network=agent_networks['observation'], # pytype: disable=wrong-arg-types\n )\n\n # agent._learner._checkpointer._time_delta_minutes = 5.\n\n robot_name = 'Panda'\n\n eval_env = suite.make(\n **env_config,\n has_renderer=False,\n has_offscreen_renderer=True,\n use_camera_obs=True,\n reward_shaping=True,\n camera_heights=512,\n camera_widths=512,\n controller_configs=controller_config\n )\n\n print(f\"model loaded successfully\")\n eval_steps = 500\n video_path = \"/home/mohan/research/experiments/dmpo/panda_lift/eval_rollouts/\"\n\n for run in range(FLAGS.num_episodes):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n \n cur_path = video_path + f\"eval_run_{current_time}_{robot_name}_{run}.mp4\"\n # create a video writer with imageio\n writer = imageio.get_writer(cur_path, fps=20)\n\n full_obs = eval_env.reset()\n flat_obs = np.concatenate([full_obs[key] for key in keys])\n flat_obs = np.float32(flat_obs)\n print(f\"obs type is: {flat_obs.dtype}\")\n action = agent.select_action(flat_obs)\n print(f\"action dtype is: {action.dtype}\")\n total_reward = 0\n for i in range(eval_steps):\n # act and observe\n obs, reward, done, _ = eval_env.step(action)\n # eval_env.render()\n total_reward += reward\n # compute next action\n flat_obs = np.concatenate([obs[key] for key in keys])\n action = agent.select_action(np.float32(flat_obs))\n\n # dump a frame from every K frames\n if i % 1 == 0:\n frame = obs[\"frontview_image\"]\n frame = np.flip(frame, 0)\n writer.append_data(frame)\n if done:\n break\n print(f\"total eval reward: {total_reward}\")\n\n # Start the training process\n \n # loop = acme.EnvironmentLoop(env, agent)\n\n # num_episodes = FLAGS.num_episodes\n # loop.run(num_episodes=num_episodes)\n\nif __name__ == '__main__':\n app.run(main)\n\n\n\n"
] | [
[
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.concatenate",
"numpy.float32",
"numpy.prod",
"numpy.flip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
persianyagami90xs/darwin-py | [
"f8a0d8d806831b9e418800b7aae5c7b70c03580e"
] | [
"darwin/importer/formats/coco.py"
] | [
"import json\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport numpy as np\nfrom upolygon import find_contours\n\nimport darwin.datatypes as dt\n\n\ndef parse_file(path: Path) -> Optional[List[dt.AnnotationFile]]:\n if path.suffix != \".json\":\n return\n\n with path.open() as f:\n data = json.load(f)\n return list(parse_json(path, data))\n\n\ndef parse_json(path, data):\n annotations = data[\"annotations\"]\n image_lookup_table = {image[\"id\"]: image for image in data[\"images\"]}\n category_lookup_table = {category[\"id\"]: category for category in data[\"categories\"]}\n image_annotations = {}\n\n for annotation in annotations:\n image_id = annotation[\"image_id\"]\n annotation[\"category_id\"]\n annotation[\"segmentation\"]\n if image_id not in image_annotations:\n image_annotations[image_id] = []\n image_annotations[image_id].append(parse_annotation(annotation, category_lookup_table))\n\n for image_id in image_annotations.keys():\n image = image_lookup_table[image_id]\n annotations = list(filter(None, image_annotations[image_id]))\n annotation_classes = set([annotation.annotation_class for annotation in annotations])\n yield dt.AnnotationFile(path, image[\"file_name\"], annotation_classes, annotations)\n\n\ndef parse_annotation(annotation, category_lookup_table):\n category = category_lookup_table[annotation[\"category_id\"]]\n segmentation = annotation[\"segmentation\"]\n iscrowd = annotation.get(\"iscrowd\") == 1\n\n if iscrowd:\n print(\"Warning, unsupported RLE, skipping\")\n return None\n\n if len(segmentation) == 0 and len(annotation[\"bbox\"]) == 4:\n x, y, w, h = map(int, annotation[\"bbox\"])\n return dt.make_bounding_box(category[\"name\"], x, y, w, h)\n elif len(segmentation) > 1:\n print(\"warning, converting complex coco rle mask to polygon, could take some time\")\n mask = rle_decoding(segmentation[\"counts\"], segmentation[\"size\"])\n _labels, external, _internal = find_contours(mask)\n paths = []\n for external_path in external:\n path = []\n points = iter(external_path)\n while True:\n try:\n x, y = next(points), next(points)\n path.append({\"x\": x, \"y\": y})\n except StopIteration:\n break\n paths.append(path)\n return dt.make_complex_polygon(category[\"name\"], paths)\n elif len(segmentation) == 1:\n path = []\n points = iter(segmentation[0])\n while True:\n try:\n x, y = next(points), next(points)\n path.append({\"x\": x, \"y\": y})\n except StopIteration:\n break\n return dt.make_polygon(category[\"name\"], path)\n else:\n return None\n\n\ndef rle_decoding(counts, shape):\n img = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n val = 1\n n = 0\n for pos in range(len(counts)):\n val = not val\n img[n : n + counts[pos]] = val\n n += counts[pos]\n return img.reshape(shape).T\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JizhiziLi/P3M | [
"d66b8a46ae017ffc64c04f05e551158db635a083"
] | [
"core/network/P3mNet.py"
] | [
"\"\"\"\nPrivacy-Preserving Portrait Matting [ACM MM-21]\nMain test file.\n\nCopyright (c) 2021, Jizhizi Li ([email protected]) and Sihan Ma ([email protected])\nLicensed under the MIT License (see LICENSE for details)\nGithub repo: https://github.com/JizhiziLi/P3M\nPaper link : https://dl.acm.org/doi/10.1145/3474085.3475512\n\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nfrom config import *\nimport torch.nn.functional as F\nfrom util import *\nfrom network.resnet_mp import *\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\nclass TFI(nn.Module):\n expansion = 1\n def __init__(self, planes,stride=1):\n super(TFI, self).__init__()\n middle_planes = int(planes/2)\n self.transform = conv1x1(planes, middle_planes)\n self.conv1 = conv3x3(middle_planes*3, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.stride = stride\n def forward(self, input_s_guidance, input_m_decoder, input_m_encoder):\n input_s_guidance_transform = self.transform(input_s_guidance)\n input_m_decoder_transform = self.transform(input_m_decoder)\n input_m_encoder_transform = self.transform(input_m_encoder)\n x = torch.cat((input_s_guidance_transform,input_m_decoder_transform,input_m_encoder_transform),1)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n return out\nclass SBFI(nn.Module):\n def __init__(self, planes,stride=1):\n super(SBFI, self).__init__()\n self.stride = stride\n self.transform1 = conv1x1(planes, int(planes/2))\n self.transform2 = conv1x1(64, int(planes/2))\n self.maxpool = nn.MaxPool2d(2, stride=stride)\n self.conv1 = conv3x3(planes, planes, 1)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n def forward(self, input_m_decoder,e0):\n input_m_decoder_transform = self.transform1(input_m_decoder)\n e0_maxpool = self.maxpool(e0)\n e0_transform = self.transform2(e0_maxpool)\n x = torch.cat((input_m_decoder_transform,e0_transform),1)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = out+input_m_decoder\n return out\nclass DBFI(nn.Module):\n def __init__(self, planes,stride=1):\n super(DBFI, self).__init__()\n self.stride = stride\n self.transform1 = conv1x1(planes, int(planes/2))\n self.transform2 = conv1x1(512, int(planes/2))\n self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear')\n self.conv1 = conv3x3(planes, planes, 1)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, 3, 1)\n self.upsample2 = nn.Upsample(scale_factor=int(32/stride), mode='bilinear')\n def forward(self, input_s_decoder,e4):\n input_s_decoder_transform = self.transform1(input_s_decoder)\n e4_transform = self.transform2(e4)\n e4_upsample = self.upsample(e4_transform)\n x = torch.cat((input_s_decoder_transform,e4_upsample),1)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = out+input_s_decoder\n out_side = self.conv2(out)\n out_side = self.upsample2(out_side)\n return out, out_side\nclass P3mNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.resnet = resnet34_mp()\n ############################\n ### Encoder part - RESNETMP\n ############################\n self.encoder0 = nn.Sequential(\n self.resnet.conv1,\n self.resnet.bn1,\n self.resnet.relu,\n )\n self.mp0 = self.resnet.maxpool1\n self.encoder1 = nn.Sequential(\n self.resnet.layer1)\n self.mp1 = self.resnet.maxpool2\n self.encoder2 = self.resnet.layer2\n self.mp2 = self.resnet.maxpool3\n self.encoder3 = self.resnet.layer3\n self.mp3 = self.resnet.maxpool4\n self.encoder4 = self.resnet.layer4\n self.mp4 = self.resnet.maxpool5\n\n self.tfi_3 = TFI(256)\n self.tfi_2 = TFI(128)\n self.tfi_1 = TFI(64)\n self.tfi_0 = TFI(64)\n\n self.sbfi_2 = SBFI(128, 8)\n self.sbfi_1 = SBFI(64, 4)\n self.sbfi_0 = SBFI(64, 2)\n\n self.dbfi_2 = DBFI(128, 4)\n self.dbfi_1 = DBFI(64, 8)\n self.dbfi_0 = DBFI(64, 16)\n\n ##########################\n ### Decoder part - GLOBAL\n ##########################\n self.decoder4_g = nn.Sequential(\n nn.Conv2d(512,512,3,padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512,512,3,padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Upsample(scale_factor=2, mode='bilinear') )\n self.decoder3_g = nn.Sequential(\n nn.Conv2d(256,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Upsample(scale_factor=2, mode='bilinear') )\n self.decoder2_g = nn.Sequential(\n nn.Conv2d(128,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Upsample(scale_factor=2, mode='bilinear'))\n self.decoder1_g = nn.Sequential(\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Upsample(scale_factor=2, mode='bilinear'))\n self.decoder0_g = nn.Sequential(\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,3,3,padding=1),\n nn.Upsample(scale_factor=2, mode='bilinear'))\n\n ##########################\n ### Decoder part - LOCAL\n ##########################\n self.decoder4_l = nn.Sequential(\n nn.Conv2d(512,512,3,padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512,512,3,padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True))\n self.decoder3_l = nn.Sequential(\n nn.Conv2d(256,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True))\n self.decoder2_l = nn.Sequential(\n nn.Conv2d(128,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True))\n self.decoder1_l = nn.Sequential(\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True))\n self.decoder0_l = nn.Sequential(\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True))\n self.decoder_final_l = nn.Conv2d(64,1,3,padding=1)\n\n \n def forward(self, input):\n ##########################\n ### Encoder part - RESNET\n ##########################\n e0 = self.encoder0(input)\n e0p, id0 = self.mp0(e0)\n e1p, id1 = self.mp1(e0p)\n e1 = self.encoder1(e1p)\n e2p, id2 = self.mp2(e1)\n e2 = self.encoder2(e2p)\n e3p, id3 = self.mp3(e2)\n e3 = self.encoder3(e3p)\n e4p, id4 = self.mp4(e3)\n e4 = self.encoder4(e4p)\n ###########################\n ### Decoder part - Global\n ###########################\n d4_g = self.decoder4_g(e4)\n d3_g = self.decoder3_g(d4_g)\n d2_g, global_sigmoid_side2 = self.dbfi_2(d3_g, e4)\n d2_g = self.decoder2_g(d2_g)\n d1_g, global_sigmoid_side1 = self.dbfi_1(d2_g, e4)\n d1_g = self.decoder1_g(d1_g)\n d0_g, global_sigmoid_side0 = self.dbfi_0(d1_g, e4)\n d0_g = self.decoder0_g(d0_g)\n global_sigmoid = d0_g\n ###########################\n ### Decoder part - Local\n ###########################\n d4_l = self.decoder4_l(e4)\n d4_l = F.max_unpool2d(d4_l, id4, kernel_size=2, stride=2)\n d3_l = self.tfi_3(d4_g, d4_l, e3)\n d3_l = self.decoder3_l(d3_l)\n d3_l = F.max_unpool2d(d3_l, id3, kernel_size=2, stride=2)\n d2_l = self.tfi_2(d3_g, d3_l, e2)\n d2_l = self.sbfi_2(d2_l, e0)\n d2_l = self.decoder2_l(d2_l)\n d2_l = F.max_unpool2d(d2_l, id2, kernel_size=2, stride=2)\n d1_l = self.tfi_1(d2_g, d2_l, e1)\n d1_l = self.sbfi_1(d1_l, e0)\n d1_l = self.decoder1_l(d1_l)\n d1_l = F.max_unpool2d(d1_l, id1, kernel_size=2, stride=2)\n d0_l = self.tfi_0(d1_g, d1_l, e0p)\n d0_l = self.sbfi_0(d0_l, e0)\n d0_l = self.decoder0_l(d0_l)\n d0_l = F.max_unpool2d(d0_l, id0, kernel_size=2, stride=2)\n d0_l = self.decoder_final_l(d0_l)\n local_sigmoid = F.sigmoid(d0_l)\n ##########################\n ### Fusion net - G/L\n ##########################\n fusion_sigmoid = get_masked_local_from_global(global_sigmoid, local_sigmoid)\n return global_sigmoid, local_sigmoid, fusion_sigmoid, global_sigmoid_side2, global_sigmoid_side1, global_sigmoid_side0\n "
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.sigmoid",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.max_unpool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
padam56/Data-Analysis-and-Visualization-Projects | [
"d11cf8f1df7ab7d560bbe12b50b4da76ccb84048"
] | [
"Neural Network Visualizer Web App with Python/ml_server.py"
] | [
"\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport random\nimport string\n\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\nmodel = tf.keras.models.load_model('model.h5')\nfeature_model = tf.keras.models.Model( #Give us output of all layers\n model.inputs,\n [layer.output for layer in model.layers]\n)\n\n_, (x_test, _) = tf.keras.datasets.mnist.load_data()\nx_test = x_test / 255.\n\ndef get_prediction():\n index = np.random.choice(x_test.shape[0])\n image = x_test[index,:,:]\n image_arr = np.reshape(image, (1, 784))\n return feature_model.predict(image_arr), image\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n preds, image = get_prediction()\n final_preds = [p.tolist() for p in preds]\n return json.dumps({\n 'prediction': final_preds,\n 'image': image.tolist()\n })\n return 'Welcome to the ml server'\n\nif __name__ == '__main__':\n app.run()\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.random.choice",
"numpy.reshape",
"tensorflow.keras.models.Model",
"tensorflow.keras.datasets.mnist.load_data"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Miro-Astore/mdanalysis_scripts | [
"faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23"
] | [
"graph_occ.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt \n\n\n\nwt=np.load('_scratch_r16_ma2374_gmx_cftr_2nd_round_310K_wt_2_occ.npy')\nR352Q=np.load('_scratch_r16_ma2374_gmx_cftr_2nd_round_310K_R352Q_1_occ.npy')\ninds=range(int(wt[0][-1]))\nwidth=0.4\n\nprop_wt=wt[1][0:-1]/(wt[1][-1])\n#print(wt[1][0:-1][-1])\n#print(len(prop_wt))\n#print(len(inds))\nplt.figure(figsize=(20,5))\nprop_mut=R352Q[1][0:-1]/R352Q[1][-1]\ninds=np.array([float(x) for x in inds ])\nprint((inds))\nprint((prop_wt))\nplt.bar(inds-width*0.5,prop_wt,width,label='WT',color='tab:blue')\nplt.xlim([-0.5,inds[-1]+0.5])\nplt.bar(inds+width*0.5,prop_mut,width,label='R352Q',color='red')\nres_list=wt[0][0:-1]\nres_list=[int(x) for x in res_list]\nplt.xticks(inds,res_list,fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlabel('Residue Number',fontsize=16)\nplt.ylabel('Chloride Coordination Number',fontsize=16)\nplt.ylim([0,1.2])\n#plt.bar(inds+width*0.5,prop_wt,width,label='WT',color='tab:blue')\n#plt.bar(inds+width*0.5,res_results[1,:],width,color='tab:blue',label='R352Q',color='red')\nplt.tight_layout()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar",
"numpy.load",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mathandy/handwritten_digit_recognition | [
"37ca5cadfaa0b2554873b2a1bf9fb7b729776eac"
] | [
"utils/diagnostic_tools.py"
] | [
"\"\"\"Some diagnostic tools.\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\nfrom collections import OrderedDict\nimport numpy as np\n\n\nfrom .display_tools import printmat\n\n\ndef getbinarydiagnostic(labels, predictions):\n \"\"\"returns a dictionary of various ratios used for diagnostic purposes.\"\"\"\n\n tp = sum(y == pred for y, pred in zip(labels, predictions) if pred)\n fp = sum(y != pred for y, pred in zip(labels, predictions) if pred)\n tn = sum(y == pred for y, pred in zip(labels, predictions) if not pred)\n fn = sum(y != pred for y, pred in zip(labels, predictions) if not pred)\n tp, fp, tn, fn = [np.float32(x) for x in (tp, fp, tn, fn)]\n old_seterr = np.seterr(divide=\"ignore\")\n diag = OrderedDict()\n diag.update({\n 'tp': tp,\n 'fp': fp,\n 'tn': tn,\n 'fn': fn,\n 'ppv': tp / (tp + fp), # positive predictive value (precision)\n 'tpr': tp / (tp + fn), # true positive rate (recall, sensitivity)\n 'tnr': tn / (tn + fp), # true negative rate (specificity)\n 'npv': tn / (tn + fn), # negative predictive value\n 'fpr': fp / (fp + tn), # false positive rate\n 'fnr': fn / (fn + tp) # false negative rate\n })\n diag.update({\n 'recall': diag['tpr'],\n 'precision': diag['ppv']\n })\n diag.update({\n 'fdr': 1 - diag['ppv'], # false discovery rate\n 'accuracy': (tp + tn) / (tp + tn + fp + fn),\n 'f1': 2 * diag['ppv'] * diag['tpr'] / (diag['ppv'] + diag['tpr']),\n # Mathews Correlation Coefficient\n 'mcc': (tp * tn - fp * fn) / np.sqrt(\n (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)),\n 'informedness': diag['tpr'] + diag['tnr'] - 1,\n 'markedness': diag['ppv'] + diag['npv'] - 1\n })\n np.seterr(**old_seterr)\n return diag\n\n\ndef getdiagnostics(labels, predictions):\n \"\"\"returns a dictionary of dictionaries (one for distinct label, unless\n binary) of various ratios used for diagnostic purposes.\"\"\"\n diags = dict()\n distinct_labels = set(labels)\n\n # In case of binary labels\n if len(distinct_labels) == 2:\n l0, l1 = distinct_labels\n if l0 and not l1:\n diags[l0] = getbinarydiagnostic(labels, predictions)\n return diags\n if l1 and not l0:\n diags[l1] = getbinarydiagnostic(labels, predictions)\n return diags\n\n # In case of more than two distinct labels\n for l in set(labels):\n ground_truth = [l == g for g in labels]\n bin_predictions = [l == p for p in predictions]\n diags[l] = getbinarydiagnostic(ground_truth, bin_predictions)\n return diags\n\n\nclass diagnostic(object):\n def __init__(self, labels):\n self.labels = labels\n self.distinct_labels = set(labels)\n self.diagnoses = dict() # dictionary of dictionaries of dictionaries\n\n def diagnose(self, predictions, method=None):\n if method is None:\n method = \"UnNamed: \" + str(len(self.diagnoses))\n self.diagnoses[method] = getdiagnostics(self.labels, predictions)\n\n def report(self, keys=['recall', 'precision', 'accuracy', 'f1']):\n methods = self.diagnoses.keys()\n # ratios = self.diagnoses.values()[0].values()[0].keys()\n for l in self.distinct_labels:\n printmat([[d[l][k] for k in keys] for d in self.diagnoses.values()],\n col_labels=keys,\n row_labels=methods)\n\n # Find winner\n winners = []\n for measure in keys:\n contest_results = []\n for m in methods:\n res = self.diagnoses[m][l][measure]\n if res is not np.NaN:\n contest_results.append((res, m))\n w = max(contest_results)\n winners.append(['Winner by ' + measure + ':',\n str(w[1]),\n str(w[0])])\n printmat(winners)\n"
] | [
[
"numpy.seterr",
"numpy.sqrt",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srivastavashobhit/Semantic-Segmentation-Identifying-Drivable-Path-for-Autonomous-Vehicles | [
"372904d5be7b00abb1de215515c86bd16fa411a4"
] | [
"src/utils/image_utils.py"
] | [
"import tensorflow as tf\n\n\ndef read_image(image_url):\n image = tf.io.read_file(image_url)\n image = tf.image.decode_png(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32) # this also set the value between 0 and 1\n\n return image\n\n\ndef read_mask(mask_url):\n mask = tf.io.read_file(mask_url)\n mask = tf.image.decode_png(mask, channels=3)\n mask = tf.math.reduce_max(mask, axis=-1, keepdims=True)\n return mask\n\n\ndef resize_image(image):\n shape = (96, 128)\n image = tf.image.resize(image, shape, method='nearest')\n\n return image\n\n\ndef resize_mask(mask):\n shape = (96, 128)\n mask = tf.image.resize(mask, shape, method='nearest')\n\n return mask\n\n\ndef read_image_mask(image_url, mask_url):\n return read_image(image_url), read_mask(mask_url)\n\n\ndef resize_image_mask(image, mask):\n return resize_image(image), resize_mask(mask)\n\n\ndef get_image_from_array(array):\n return tf.keras.preprocessing.image.array_to_img(array)\n"
] | [
[
"tensorflow.math.reduce_max",
"tensorflow.image.decode_png",
"tensorflow.image.resize",
"tensorflow.image.convert_image_dtype",
"tensorflow.io.read_file",
"tensorflow.keras.preprocessing.image.array_to_img"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Basvdbrink1998/Influencing-social-networks | [
"7b512edc4127680a37115c7e1434b06ebfa67e8a"
] | [
"Code/Simple_plots/Adjecency.py"
] | [
"import networkx as nx\nimport matplotlib.pyplot as plt\n\n\"\"\"\n Adjecency.py: Plots a house graph for Figure 2.1.\n\"\"\"\n\nnode_color = 'red'\nnode_border_color = 'black'\nnode_border_width = .6\nedge_color = 'black'\n\n\ndef draw(G, pos, ax):\n nodes1 = nx.draw_networkx_nodes(G, pos=pos, node_size=500,\n node_color='white', ax=ax)\n nodes1.set_edgecolor(node_border_color)\n nodes1.set_linewidth(node_border_width)\n nx.draw_networkx_edges(G, pos, edge_color=edge_color, alpha=.8,\n ax=ax)\n z = zip(G.nodes, list(range(0, len(G.nodes))))\n idx = dict(z)\n nx.draw_networkx_labels(G, pos, idx, font_size=9, ax=ax)\n ax.axis('off')\n\n\nfig, ax = plt.subplots()\nG = nx.house_graph()\npos = nx.spring_layout(G)\nidx = draw(G, pos, ax)\nprint(nx.adjacency_matrix(G).todense())\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jrosebr1/mxnet | [
"f0fe522cfd91b50ed1ef31d22ff412a1295e16a7"
] | [
"python/mxnet/test_utils.py"
] | [
"# coding: utf-8\n\"\"\"Tools for testing.\"\"\"\n# pylint: disable=invalid-name, no-member, too-many-arguments, too-many-locals, too-many-branches, too-many-statements, broad-except, line-too-long, unused-import\nfrom __future__ import absolute_import, print_function, division\nimport time\nimport traceback\nimport numbers\nimport numpy as np\nimport numpy.testing as npt\nimport mxnet as mx\n\nfrom .context import cpu, gpu, Context\nfrom .ndarray import array\nfrom .symbol import Symbol\n\n_rng = np.random.RandomState(1234)\n\ndef default_context():\n \"\"\"Get default context for regression test.\"\"\"\n # _TODO: get context from environment variable to support\n # testing with GPUs\n return Context.default_ctx\n\ndef set_default_context(ctx):\n \"\"\"Set default ctx\"\"\"\n Context.default_ctx = ctx\n\ndef default_dtype():\n \"\"\"Get default data type for regression test.\"\"\"\n # _TODO: get default dtype from environment variable\n return np.float32\n\n\ndef default_numerical_threshold():\n \"\"\"Get default numerical threshold for regression test.\"\"\"\n # _TODO: get from env variable, different threshold might\n # be needed for different device and dtype\n return 1e-6\n\n\ndef random_arrays(*shapes):\n \"\"\"Generate some random numpy arrays.\"\"\"\n arrays = [np.random.randn(*s).astype(default_dtype())\n for s in shapes]\n if len(arrays) == 1:\n return arrays[0]\n return arrays\n\n\ndef np_reduce(dat, axis, keepdims, numpy_reduce_func):\n \"\"\"Compatible reduce for old version numpy\n\n Parameters\n ----------\n dat : np.ndarray\n Same as Numpy\n\n axis : None or int or list-like\n Same as Numpy\n\n keepdims : bool\n Same as Numpy\n\n numpy_reduce_func : function\n Numpy reducing function like `np.sum` or `np.max`\n \"\"\"\n if isinstance(axis, int):\n axis = [axis]\n else:\n axis = list(axis) if axis is not None else range(len(dat.shape))\n ret = dat\n for i in reversed(sorted(axis)):\n ret = numpy_reduce_func(ret, axis=i)\n if keepdims:\n keepdims_shape = list(dat.shape)\n for i in axis:\n keepdims_shape[i] = 1\n ret = ret.reshape(tuple(keepdims_shape))\n return ret\n\ndef print_max_err_loc(a, b, rtol=1e-7, atol=0):\n \"\"\"print location of maximum violation\"\"\"\n diff = np.abs(a-b)\n tol = atol + rtol*np.abs(b)\n violation = diff/(tol+1e-20)\n loc = np.argmax(violation)\n idx = np.unravel_index(loc, violation.shape)\n print('Maximum err at ', idx, ':', a.flat[loc], ' vs ', b.flat[loc])\n return idx\n\ndef same(a, b):\n \"\"\"Test if two numpy arrays are the same\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n \"\"\"\n return np.array_equal(a, b)\n\ndef reldiff(a, b):\n \"\"\"Calculate the relative difference between two input arrays\n\n Calculated by :math:`\\\\frac{|a-b|_1}{|a|_1 + |b|_1}`\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n \"\"\"\n diff = np.sum(np.abs(a - b))\n norm = np.sum(np.abs(a)) + np.sum(np.abs(b))\n if diff == 0:\n return 0\n ret = diff / norm\n return ret\n\n\ndef almost_equal(a, b, threshold=None):\n \"\"\"Test if two numpy arrays are almost equal.\"\"\"\n threshold = threshold or default_numerical_threshold()\n rel = reldiff(a, b)\n return not np.isnan(rel) and rel <= threshold\n\n\ndef assert_almost_equal(a, b, threshold=None):\n \"\"\"Test that two numpy arrays are almost equal. Raise exception message if not.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n threshold : None or float\n The checking threshold. Default threshold will be used if set to None\n \"\"\"\n threshold = threshold or default_numerical_threshold()\n rel = reldiff(a, b)\n if np.isnan(rel) or rel > threshold:\n np.set_printoptions(threshold=4, suppress=True)\n msg = npt.build_err_msg([a, b],\n err_msg=\"Rel Err=%f, Expected <=%f\" % (rel, threshold),\n names=[\"a\", \"b\"])\n raise Exception(msg)\n return rel\n\ndef almost_equal_ignore_nan(a, b, rtol=None, atol=None):\n \"\"\"Test that two numpy arrays are almost equal (ignoring NaN in either array).\n Combines a relative and absolute measure of approximate eqality.\n If either the relative or absolute check passes, the arrays are considered equal.\n Including an absolute check resolves issues with the relative check where all\n array values are close to zero.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n rtol : None or float\n The relative threshold. Default threshold will be used if set to None\n atol : None or float\n The absolute threshold. Default threshold will be used if set to None\n \"\"\"\n a = np.copy(a)\n b = np.copy(b)\n nan_mask = np.logical_or(np.isnan(a), np.isnan(b))\n a[nan_mask] = 0\n b[nan_mask] = 0\n\n rtol = rtol or default_numerical_threshold()\n atol = atol or default_numerical_threshold()\n\n rel_approx_equal = reldiff(a, b) <= rtol\n abs_approx_equal = np.sum(np.abs(a - b) > atol) == 0\n\n return rel_approx_equal or abs_approx_equal\n\n\ndef simple_forward(sym, ctx=None, is_train=False, **inputs):\n \"\"\"A simple forward function for a symbol.\n\n Primarily used in doctest to conveniently test the function\n of a symbol. Takes numpy array as inputs and outputs are\n also converted to numpy arrays.\n\n Parameters\n ----------\n ctx : Context\n If None, will take the default context.\n inputs : keyword arguments\n Mapping each input name to a numpy array.\n\n Returns\n -------\n The result as a numpy array. Multiple results will\n be returned as a list of numpy arrays.\n \"\"\"\n ctx = ctx or default_context()\n inputs = {k: array(v) for k, v in inputs.iteritems()}\n exe = sym.bind(ctx, args=inputs)\n exe.forward(is_train=is_train)\n outputs = [x.asnumpy() for x in exe.outputs]\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n\n\ndef _parse_location(sym, location, ctx):\n \"\"\"Parse the given location to a dictionary\n\n Parameters\n ----------\n sym : Symbol\n location : None or list of np.ndarray or dict of str to np.ndarray\n\n Returns\n -------\n dict of str to np.ndarray\n \"\"\"\n assert isinstance(location, (dict, list, tuple))\n if isinstance(location, dict):\n if set(location.keys()) != set(sym.list_arguments()):\n raise ValueError(\"Symbol arguments and keys of the given location do not match.\"\n \"symbol args:%s, location.keys():%s\"\n % (str(set(sym.list_arguments())), str(set(location.keys()))))\n else:\n location = {k: v for k, v in zip(sym.list_arguments(), location)}\n location = {k: mx.nd.array(v, ctx=ctx) for k, v in location.items()}\n return location\n\n\ndef _parse_aux_states(sym, aux_states, ctx):\n \"\"\"\n\n Parameters\n ----------\n sym : Symbol\n aux_states : None or list of np.ndarray or dict of str to np.ndarray\n\n Returns\n -------\n dict of str to np.ndarray\n \"\"\"\n if aux_states is not None:\n if isinstance(aux_states, dict):\n if set(aux_states.keys()) != set(sym.list_auxiliary_states()):\n raise ValueError(\"Symbol aux_states names and given aux_states do not match.\"\n \"symbol aux_names:%s, aux_states.keys:%s\"\n % (str(set(sym.list_auxiliary_states())),\n str(set(aux_states.keys()))))\n elif isinstance(aux_states, (list, tuple)):\n aux_names = sym.list_auxiliary_states()\n aux_states = {k:v for k, v in zip(aux_names, aux_states)}\n aux_states = {k: mx.nd.array(v, ctx=ctx) for k, v in aux_states.items()}\n return aux_states\n\n\ndef numeric_grad(executor, location, aux_states=None, eps=1e-4, use_forward_train=True):\n \"\"\"Calculates a numeric gradient via finite difference method.\n\n Class based on Theano's `theano.gradient.numeric_grad` [1]\n\n Parameters\n ----------\n executor : Executor\n exectutor that computes the forward pass\n location : list of numpy.ndarray or dict of str to numpy.ndarray\n Argument values used as location to compute gradient\n Maps the name of arguments to the corresponding numpy.ndarray.\n Value of all the arguments must be provided.\n aux_states : None or list of numpy.ndarray or dict of str to numpy.ndarray, optional\n Auxiliary states values used as location to compute gradient\n Maps the name of aux_states to the corresponding numpy.ndarray.\n Value of all the auxiliary arguments must be provided.\n eps : float, optional\n epsilon for the finite-difference method\n use_forward_train : bool, optional\n Whether to use `is_train=True` in testing.\n References\n ---------\n ..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py\n \"\"\"\n for k, v in location.items():\n executor.arg_dict[k][:] = v\n approx_grads = {k: np.zeros(v.shape, dtype=np.float32)\n for k, v in location.items()}\n\n executor.forward(is_train=use_forward_train)\n f_x = executor.outputs[0].asnumpy()[0]\n for k in location:\n location[k] = np.ascontiguousarray(location[k])\n for k, v in location.items():\n old_value = v.copy()\n for i in range(np.prod(v.shape)):\n # inplace update\n v.ravel()[i] += eps\n executor.arg_dict[k][:] = v\n if aux_states is not None:\n for key, val in aux_states.items():\n executor.aux_dict[key][:] = val\n executor.forward(is_train=use_forward_train)\n f_eps = executor.outputs[0].asnumpy()[0]\n approx_grads[k].ravel()[i] = (f_eps - f_x) / eps\n v.ravel()[i] = old_value.ravel()[i]\n # copy back the original value\n executor.arg_dict[k][:] = old_value\n return approx_grads\n\n\ndef check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-4, check_eps=1e-2,\n grad_nodes=None, use_forward_train=True, ctx=None):\n \"\"\"Verify an operation by checking backward pass via finite difference method.\n\n Based on Theano's `theano.gradient.verify_grad` [1]\n\n Parameters\n ----------\n sym : Symbol\n Symbol containing op to test\n location : list or tuple or dict\n Argument values used as location to compute gradient\n\n - if type is list of numpy.ndarray\n inner elements should have the same the same order as mxnet.sym.list_arguments().\n - if type is dict of str -> numpy.ndarray\n maps the name of arguments to the corresponding numpy.ndarray.\n *In either case, value of all the arguments must be provided.*\n aux_states : ist or tuple or dict, optional\n The auxiliary states required when generating the executor for the symbol\n numeric_eps : float, optional\n Delta for the finite difference method that approximates the gradient\n check_eps : float, optional\n relative error eps used when comparing numeric grad to symbolic grad\n grad_nodes : None or list or tuple or dict, optional\n Names of the nodes to check gradient on\n use_forward_train : bool\n Whether to use is_train=True when computing the finite-difference\n ctx : Context, optional\n Check the gradient computation on the specified device\n References\n ---------\n ..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py\n \"\"\"\n if ctx is None:\n ctx = default_context()\n\n def random_projection(shape):\n \"\"\"Get a random weight matrix with not too small elements\n\n Parameters\n ----------\n shape : list or tuple\n \"\"\"\n # random_projection should not have elements too small,\n # otherwise too much precision is lost in numerical gradient\n plain = _rng.rand(*shape) + 0.1\n return plain\n\n location = _parse_location(sym=sym, location=location, ctx=ctx)\n location_npy = {k:v.asnumpy() for k, v in location.items()}\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx)\n if aux_states is not None:\n aux_states_npy = {k:v.asnumpy() for k, v in aux_states.items()}\n else:\n aux_states_npy = None\n if grad_nodes is None:\n grad_nodes = sym.list_arguments()\n grad_req = {k: 'write' for k in grad_nodes}\n elif isinstance(grad_nodes, (list, tuple)):\n grad_nodes = list(grad_nodes)\n grad_req = {k: 'write' for k in grad_nodes}\n elif isinstance(grad_nodes, dict):\n grad_req = grad_nodes.copy()\n grad_nodes = grad_nodes.keys()\n else:\n raise ValueError\n\n input_shape = {k: v.shape for k, v in location.items()}\n _, out_shape, _ = sym.infer_shape(**input_shape)\n proj = mx.sym.Variable(\"__random_proj\")\n out = mx.sym.sum(sym * proj)\n out = mx.sym.MakeLoss(out)\n\n location = dict(list(location.items()) +\n [(\"__random_proj\", mx.nd.array(random_projection(out_shape[0]), ctx=ctx))])\n args_grad_npy = dict([(k, _rng.normal(0, 0.01, size=location[k].shape)) for k in grad_nodes]\n + [(\"__random_proj\", _rng.normal(0, 0.01, size=out_shape[0]))])\n\n args_grad = {k: mx.nd.array(v, ctx=ctx) for k, v in args_grad_npy.items()}\n\n executor = out.bind(ctx, grad_req=grad_req,\n args=location, args_grad=args_grad, aux_states=aux_states)\n\n inps = executor.arg_arrays\n if len(inps) != len(location):\n raise ValueError(\"Executor arg_arrays and and location len do not match.\"\n \"Got %d inputs and %d locations\"%(len(inps), len(location)))\n assert len(executor.outputs) == 1\n\n executor.forward(is_train=True)\n executor.backward()\n symbolic_grads = {k:executor.grad_dict[k].asnumpy() for k in grad_nodes}\n\n numeric_gradients = numeric_grad(executor, location_npy, aux_states_npy,\n eps=numeric_eps, use_forward_train=use_forward_train)\n for name in grad_nodes:\n fd_grad = numeric_gradients[name]\n orig_grad = args_grad_npy[name]\n sym_grad = symbolic_grads[name]\n if grad_req[name] == 'write':\n rel = reldiff(fd_grad, sym_grad)\n arr_l = [fd_grad, sym_grad]\n elif grad_req[name] == 'add':\n rel = reldiff(fd_grad, sym_grad - orig_grad)\n arr_l = [fd_grad, sym_grad - orig_grad]\n elif grad_req[name] == 'null':\n rel = reldiff(orig_grad, sym_grad)\n arr_l = [orig_grad, sym_grad]\n else:\n raise ValueError\n if np.isnan(rel) or rel > check_eps:\n np.set_printoptions(threshold=4, suppress=True)\n msg = npt.build_err_msg(arr_l,\n err_msg=\"In symbol \\\"%s\\\", ctx=%s, \"\n \"numeric check failed for \\\"%s\\\", grad_req= \\\"%s\\\". \"\n \"Rel Err=%f, Expected <=%f\"\n %(sym.name, str(ctx), name, grad_req[name], rel, check_eps),\n names=[\"NUMERICAL\", \"BACKWARD\"])\n raise Exception(msg)\n\n\ndef check_symbolic_forward(sym, location, expected, check_eps=1E-4, aux_states=None, ctx=None):\n \"\"\"Compare foward call to expected value.\n\n Parameters\n ---------\n sym : Symbol\n output symbol\n location : list of np.ndarray or dict of str to np.ndarray\n The evaluation point\n\n - if type is list of np.ndarray\n contain all the numpy arrays corresponding to `sym.list_arguments()`\n - if type is dict of str to np.ndarray\n contain the mapping between argument names and their values\n expected : list of np.ndarray or dict of str to np.ndarray\n The expected output value\n\n - if type is list of np.ndarray\n contain arrays corresponding to exe.outputs\n - if type is dict of str to np.ndarray\n contain mapping between sym.list_output() and exe.outputs\n check_eps : float, optional\n relative error to check to\n aux_states : list of np.ndarray of dict, optional\n - if type is list of np.ndarray\n contain all the numpy arrays corresponding to sym.list_auxiliary_states\n - if type is dict of str to np.ndarray\n contain the mapping between names of auxiliary states and their values\n ctx : Context, optional\n running context\n \"\"\"\n if ctx is None:\n ctx = default_context()\n\n location = _parse_location(sym=sym, location=location, ctx=ctx)\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx)\n if isinstance(expected, dict):\n expected = [expected[k] for k in sym.list_outputs()]\n args_grad_data = {k:mx.nd.empty(v.shape, ctx=ctx) for k, v in location.items()}\n\n executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data, aux_states=aux_states)\n for g in executor.grad_arrays:\n if g:\n g[:] = 0\n\n executor.forward(is_train=False)\n outputs = [x.asnumpy() for x in executor.outputs]\n\n for output_name, expect, output in zip(sym.list_outputs(), expected, outputs):\n rel = reldiff(expect, output)\n if rel > check_eps:\n np.set_printoptions(threshold=4, suppress=True)\n msg = npt.build_err_msg([expect, output],\n err_msg=\"In symbol \\\"%s\\\", ctx=%s, \"\n \"forward check failed for \\\"%s\\\". \"\n \"Rel Err=%f, Expected <=%f\"\n %(sym.name, str(ctx), output_name, rel, check_eps),\n names=[\"EXPECTED\", \"FORWARD\"])\n raise Exception(msg)\n\n\ndef check_symbolic_backward(sym, location, out_grads, expected, check_eps=1e-5,\n aux_states=None, grad_req='write', ctx=None):\n \"\"\"Compare backward call to expected value.\n\n Parameters\n ---------\n sym : Symbol\n output symbol\n location : list of np.ndarray or dict of str to np.ndarray\n The evaluation point\n\n - if type is list of np.ndarray\n contain all the numpy arrays corresponding to mxnet.sym.list_arguments\n - if type is dict of str to np.ndarray\n contain the mapping between argument names and their values\n out_grads : None or list of np.ndarray or dict of str to np.ndarray\n numpy arrays corresponding to sym.outputs for incomming gradient\n\n - if type is list of np.ndarray\n contains arrays corresponding to exe.outputs\n - if type is dict of str to np.ndarray\n contains mapping between mxnet.sym.list_output() and Executor.outputs\n expected : list of np.ndarray or dict of str to np.ndarray\n expected gradient values\n\n - if type is list of np.ndarray\n contains arrays corresponding to exe.grad_arrays\n - if type is dict of str to np.ndarray\n contains mapping between sym.list_arguments() and exe.outputs\n check_eps: float, optional\n relative error to check to\n aux_states : list of np.ndarray or dict of str to np.ndarray\n grad_req : str or list of str or dict of str to str, optional\n gradient requirements. 'write', 'add' or 'null'\n ctx : Context, optional\n running context\n \"\"\"\n if ctx is None:\n ctx = default_context()\n\n location = _parse_location(sym=sym, location=location, ctx=ctx)\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx)\n if isinstance(expected, (list, tuple)):\n expected = {k:v for k, v in zip(sym.list_arguments(), expected)}\n args_grad_npy = {k:_rng.normal(size=v.shape) for k, v in expected.items()}\n args_grad_data = {k: mx.nd.array(v, ctx=ctx) for k, v in args_grad_npy.items()}\n if isinstance(grad_req, str):\n grad_req = {k:grad_req for k in sym.list_arguments()}\n elif isinstance(grad_req, (list, tuple)):\n grad_req = {k:v for k, v in zip(sym.list_arguments(), grad_req)}\n\n executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data, aux_states=aux_states)\n executor.forward(is_train=True)\n if isinstance(out_grads, (tuple, list)):\n out_grads = [mx.nd.array(v, ctx=ctx) for v in out_grads]\n elif isinstance(out_grads, (dict)):\n out_grads = {k:mx.nd.array(v, ctx=ctx) for k, v in out_grads.items()}\n else:\n assert out_grads is None\n executor.backward(out_grads)\n\n grads = {k: v.asnumpy() for k, v in args_grad_data.items()}\n for name in expected:\n if grad_req[name] == 'write':\n rel = reldiff(expected[name], grads[name])\n arr_l = [expected[name], grads[name]]\n elif grad_req[name] == 'add':\n rel = reldiff(expected[name], grads[name] - args_grad_npy[name])\n arr_l = [expected[name], grads[name] - args_grad_npy[name]]\n elif grad_req[name] == 'null':\n rel = reldiff(args_grad_npy[name], grads[name])\n arr_l = [args_grad_npy[name], grads[name]]\n else:\n raise ValueError\n if rel > check_eps:\n np.set_printoptions(threshold=4, suppress=True)\n msg = npt.build_err_msg(arr_l,\n err_msg=\"In symbol \\\"%s\\\", ctx=%s, \"\n \"backward check failed for \\\"%s\\\". \"\n \"Rel Err=%f, Expected <=%f\"\n %(sym.name, str(ctx), name, rel, check_eps),\n names=[\"EXPECTED\", \"BACKWARD\"])\n raise Exception(msg)\n\n\ndef check_speed(sym, location=None, ctx=None, N=20, grad_req=None, typ=\"whole\",\n **kwargs):\n \"\"\"Check the running speed of a symbol\n\n Parameters\n ----------\n sym : Symbol\n symbol to run the speed test\n location : none or dict of str to np.ndarray\n location to evaluate the inner executor\n ctx : Context\n running context\n N : int, optional\n repeat times\n grad_req : None or str or list of str or dict of str to str, optional\n gradient requirements\n typ : str, optional\n \"whole\" or \"forward\"\n\n - \"whole\"\n test the forward_backward speed\n - \"forward\"\n only test the forward speed\n \"\"\"\n if ctx is None:\n ctx = default_context()\n\n if grad_req is None:\n grad_req = 'write'\n if location is None:\n exe = sym.simple_bind(grad_req=grad_req, ctx=ctx, **kwargs)\n location = {k: _rng.normal(size=arr.shape, scale=1.0) for k, arr in\n exe.arg_dict.items()}\n else:\n assert isinstance(location, dict), \"Expect dict, get \\\"location\\\"=%s\" %str(location)\n exe = sym.simple_bind(grad_req=grad_req, ctx=ctx,\n **{k: v.shape for k, v in location.items()})\n\n for name, iarr in location.items():\n exe.arg_dict[name][:] = iarr.astype(exe.arg_dict[name].dtype)\n\n if typ == \"whole\":\n # Warm up\n exe.forward(is_train=True)\n exe.backward(out_grads=exe.outputs)\n for output in exe.outputs:\n output.wait_to_read()\n # Test forward + backward\n tic = time.time()\n for _ in range(N):\n exe.forward(is_train=True)\n exe.backward(out_grads=exe.outputs)\n for output in exe.outputs:\n output.wait_to_read()\n mx.nd.waitall()\n toc = time.time()\n forward_backward_time = (toc - tic) * 1.0 / N\n return forward_backward_time\n elif typ == \"forward\":\n # Warm up\n exe.forward(is_train=False)\n for output in exe.outputs:\n output.wait_to_read()\n\n # Test forward only\n tic = time.time()\n for _ in range(N):\n exe.forward(is_train=False)\n for output in exe.outputs:\n output.wait_to_read()\n mx.nd.waitall()\n toc = time.time()\n forward_time = (toc - tic) * 1.0 / N\n return forward_time\n else:\n raise ValueError('typ can only be \"whole\" or \"forward\".')\n\n\ndef check_consistency(sym, ctx_list, scale=1.0, grad_req='write',\n arg_params=None, aux_params=None, tol=None,\n raise_on_err=True, ground_truth=None):\n \"\"\"Check symbol gives the same output for different running context\n\n Parameters\n ----------\n sym : Symbol or list of Symbols\n symbol(s) to run the consistency test\n ctx_list : list\n running context. See example for more detail.\n scale : float, optional\n standard deviation of the inner normal distribution. Used in initialization\n grad_req : str or list of str or dict of str to str\n gradient requirement.\n\n Examples\n --------\n >>> # create the symbol\n >>> sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')\n >>> # initialize the running context\n >>> ctx_list =\\\n[{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\\\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\\\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\\\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\\\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]\n >>> check_consistency(sym, ctx_list)\n >>> sym = mx.sym.Concat(name='concat', num_args=2)\n >>> ctx_list = \\\n[{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\\\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\\\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\\\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\\\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]\n >>> check_consistency(sym, ctx_list)\n \"\"\"\n if tol is None:\n tol = {np.dtype(np.float16): 1e-1,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n elif isinstance(tol, numbers.Number):\n tol = {np.dtype(np.float16): tol,\n np.dtype(np.float32): tol,\n np.dtype(np.float64): tol,\n np.dtype(np.uint8): tol,\n np.dtype(np.int32): tol}\n\n assert len(ctx_list) > 1\n if isinstance(sym, Symbol):\n sym = [sym]*len(ctx_list)\n else:\n assert len(sym) == len(ctx_list)\n\n output_names = sym[0].list_outputs()\n arg_names = sym[0].list_arguments()\n exe_list = []\n for s, ctx in zip(sym, ctx_list):\n assert s.list_arguments() == arg_names\n assert s.list_outputs() == output_names\n exe_list.append(s.simple_bind(grad_req=grad_req, **ctx))\n\n arg_params = {} if arg_params is None else arg_params\n aux_params = {} if aux_params is None else aux_params\n for n, arr in exe_list[0].arg_dict.items():\n if n not in arg_params:\n arg_params[n] = np.random.normal(size=arr.shape, scale=scale)\n for n, arr in exe_list[0].aux_dict.items():\n if n not in aux_params:\n aux_params[n] = 0\n for exe in exe_list:\n for name, arr in exe.arg_dict.items():\n arr[:] = arg_params[name]\n for name, arr in exe.aux_dict.items():\n arr[:] = aux_params[name]\n\n dtypes = [np.dtype(exe.outputs[0].dtype) for exe in exe_list]\n max_idx = np.argmax(dtypes)\n gt = ground_truth\n if gt is None:\n gt = exe_list[max_idx].output_dict.copy()\n if grad_req != 'null':\n gt.update(exe_list[max_idx].grad_dict)\n\n # test\n for exe in exe_list:\n exe.forward(is_train=False)\n\n for i, exe in enumerate(exe_list):\n if i == max_idx:\n continue\n for name, arr in zip(output_names, exe.outputs):\n gtarr = gt[name].astype(dtypes[i]).asnumpy()\n arr = arr.asnumpy()\n try:\n npt.assert_allclose(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]])\n except Exception as e:\n print('Predict Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))\n print_max_err_loc(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]])\n traceback.print_exc()\n if raise_on_err:\n raise e\n\n # train\n if grad_req != 'null':\n for exe in exe_list:\n exe.forward(is_train=True)\n exe.backward(exe.outputs)\n\n for i, exe in enumerate(exe_list):\n if i == max_idx:\n continue\n curr = zip(output_names + arg_names, exe.outputs + exe.grad_arrays)\n for name, arr in curr:\n if gt[name] is None:\n assert arr is None\n continue\n gtarr = gt[name].astype(dtypes[i]).asnumpy()\n arr = arr.asnumpy()\n try:\n npt.assert_allclose(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]])\n except Exception as e:\n print('Train Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))\n print_max_err_loc(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]])\n print(e)\n if raise_on_err:\n raise e\n\n return gt\n"
] | [
[
"numpy.abs",
"numpy.array_equal",
"numpy.isnan",
"numpy.ascontiguousarray",
"numpy.set_printoptions",
"numpy.dtype",
"numpy.copy",
"numpy.argmax",
"numpy.random.normal",
"numpy.random.randn",
"numpy.prod",
"numpy.testing.build_err_msg",
"numpy.testing.assert_allclose",
"numpy.random.RandomState",
"numpy.unravel_index",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bcajes/glow | [
"5da50d7eb87e9e6bbc101f87598f2f5121d8927d"
] | [
"python/glow/wgr/ridge_reduction.py"
] | [
"# Copyright 2019 The Glow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .ridge_udfs import *\nfrom .model_functions import _is_binary, _prepare_covariates, _prepare_labels_and_warn, _check_model\nfrom nptyping import Float, NDArray\nimport pandas as pd\nfrom pyspark.sql import DataFrame, Row\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nimport pyspark.sql.functions as f\nfrom typeguard import typechecked\nfrom typing import Any, Dict, List, Union\nfrom glow.logging import record_hls_event\nimport warnings\n\n# Ignore warning to use applyInPandas instead of apply\n# TODO(hhd): Remove this and start using applyInPandas once we only support Spark 3.x.\nwarnings.filterwarnings('ignore', category=UserWarning, message='.*applyInPandas.*')\n\n__all__ = ['RidgeReduction']\n\n\n@typechecked\nclass RidgeReduction:\n \"\"\"\n The RidgeReduction class is intended to reduce the feature space of an N by M block matrix X to an N by P<<M block\n matrix. This is done by fitting K ridge models within each block of X on one or more target labels, such that a\n block with L columns to begin with will be reduced to a block with K columns, where each column is the prediction\n of one ridge model for one target label.\n \"\"\"\n def __init__(self,\n block_df: DataFrame,\n label_df: pd.DataFrame,\n sample_blocks: Dict[str, List[str]],\n cov_df: pd.DataFrame = pd.DataFrame({}),\n add_intercept: bool = True,\n alphas: List[float] = [],\n label_type='detect') -> None:\n \"\"\"\n Args:\n block_df : Spark DataFrame representing the beginning block matrix X\n label_df : Pandas DataFrame containing the target labels used in fitting the ridge models\n sample_blocks : Dict containing a mapping of sample_block ID to a list of corresponding sample IDs\n cov_df : Pandas DataFrame containing covariates to be included in every model in the stacking\n ensemble (optional).\n add_intercept: If True, an intercept column (all ones) will be added to the covariates\n (as the first column)\n alphas : array_like of alpha values used in the ridge reduction (optional).\n label_type: String to determine type treatment of labels. It can be 'detect' (default), 'binary',\n or 'quantitative'.\n \"\"\"\n self.block_df = block_df\n self.sample_blocks = sample_blocks\n self._label_type = label_type\n self.set_label_df(label_df)\n self.set_cov_df(cov_df, add_intercept)\n self.set_alphas(alphas)\n self.model_df = None\n self.reduced_block_df = None\n\n def __getstate__(self):\n # Copy the object's state from self.__dict__ which contains\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n del state['block_df'], state['model_df'], state['reduced_block_df']\n return state\n\n def set_label_df(self, label_df: pd.DataFrame) -> None:\n self._is_binary = _is_binary(label_df)\n self._std_label_df = _prepare_labels_and_warn(label_df, self._is_binary, self._label_type)\n self._label_df = label_df\n\n def get_label_df(self) -> pd.DataFrame:\n return self._label_df\n\n def set_label_type(self, label_type: str) -> None:\n self._label_type = label_type\n self._std_label_df = _prepare_labels_and_warn(self._label_df, self._is_binary, label_type)\n\n def get_label_type(self) -> str:\n return self._label_type\n\n def set_cov_df(self, cov_df: pd.DataFrame, add_intercept: bool) -> None:\n self._cov_df = cov_df\n self._std_cov_df = _prepare_covariates(cov_df, self._label_df, add_intercept)\n\n def get_cov_df(self) -> pd.DataFrame:\n return self._cov_df\n\n def set_alphas(self, alphas: List[float]) -> None:\n self._alphas = generate_alphas(\n self.block_df) if len(alphas) == 0 else create_alpha_dict(alphas)\n\n def get_alphas(self) -> Dict[str, Float]:\n return self._alphas\n\n def is_binary(self) -> bool:\n return self._is_binary\n\n def fit(self) -> DataFrame:\n \"\"\"\n Fits a ridge reducer model, represented by a Spark DataFrame containing coefficients for each of the ridge\n alpha parameters, for each block in the starting matrix, for each label in the target labels.\n\n Returns:\n Spark DataFrame containing the model resulting from the fitting routine.\n \"\"\"\n\n map_key_pattern = ['header_block', 'sample_block']\n reduce_key_pattern = ['header_block', 'header']\n\n if 'label' in self.block_df.columns:\n map_key_pattern.append('label')\n reduce_key_pattern.append('label')\n\n map_udf = pandas_udf(\n lambda key, pdf: map_normal_eqn(key, map_key_pattern, pdf, self._std_label_df, self.\n sample_blocks, self._std_cov_df), normal_eqn_struct,\n PandasUDFType.GROUPED_MAP)\n reduce_udf = pandas_udf(lambda key, pdf: reduce_normal_eqn(key, reduce_key_pattern, pdf),\n normal_eqn_struct, PandasUDFType.GROUPED_MAP)\n model_udf = pandas_udf(\n lambda key, pdf: solve_normal_eqn(key, map_key_pattern, pdf, self._std_label_df, self.\n _alphas, self._std_cov_df), model_struct,\n PandasUDFType.GROUPED_MAP)\n\n record_hls_event('wgrRidgeReduceFit')\n\n self.model_df = self.block_df.groupBy(map_key_pattern).apply(map_udf).groupBy(\n reduce_key_pattern).apply(reduce_udf).groupBy(map_key_pattern).apply(model_udf)\n\n return self.model_df\n\n def transform(self) -> DataFrame:\n \"\"\"\n Transforms a starting block matrix to the reduced block matrix, using a reducer model produced by the\n RidgeReduction fit method.\n\n Returns:\n Spark DataFrame representing the reduced block matrix\n \"\"\"\n _check_model(self.model_df)\n\n transform_key_pattern = ['header_block', 'sample_block']\n\n if 'label' in self.block_df.columns:\n transform_key_pattern.append('label')\n joined = self.block_df.drop('sort_key') \\\n .join(self.model_df, ['header_block', 'sample_block', 'header'], 'right') \\\n .withColumn('label', f.coalesce(f.col('label'), f.col('labels').getItem(0)))\n else:\n joined = self.block_df.drop('sort_key') \\\n .join(self.model_df, ['header_block', 'sample_block', 'header'], 'right')\n\n transform_udf = pandas_udf(\n lambda key, pdf: apply_model(key, transform_key_pattern, pdf, self._std_label_df, self.\n sample_blocks, self._alphas, self._std_cov_df),\n reduced_matrix_struct, PandasUDFType.GROUPED_MAP)\n\n record_hls_event('wgrRidgeReduceTransform')\n\n self.reduced_block_df = joined.groupBy(transform_key_pattern).apply(transform_udf)\n\n return self.reduced_block_df\n\n def fit_transform(self) -> DataFrame:\n \"\"\"\n Fits a ridge reduction model with a block matrix, then transforms the matrix using the model.\n\n Returns:\n Spark DataFrame representing the reduced block matrix\n \"\"\"\n\n self.fit()\n return self.transform()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TheBuoys/neuropy | [
"387a5eaa31629c726b5de1e71090b0d644f675b2"
] | [
"test/mock_objects/simple_data_loader.py"
] | [
"import sys\nimport tensorflow as tf\nimport numpy as np\nimport neuropy\n\n# tf.enable_eager_execution()\n\n#Simple example data loader for testing.\nclass DataLoader(neuropy.base.BaseDataLoader):\n def __init__(self, configuration, model_parameters):\n super(DataLoader, self).__init__(configuration, model_parameters)\n\n def get_data(self):\n return tf.data.Dataset.from_tensor_slices(np.array([0.1, 0.2, 0.3, 0.4]))\n\n def get_validation(self):\n return tf.data.Dataset.from_tensor_slices(np.array([0.05, 0.15, 0.25, 0.35]))\n\n def get_inference_dataset(self):\n return self.get_data().batch(self.model_parameters[\"batch_size\"])\n\n def get_training_dataset(self):\n data = self.get_data().repeat(self.model_parameters[\"epochs\"])\n data = tf.data.Dataset.zip((data, data))\n data = data.batch(self.model_parameters[\"batch_size\"])\n data = data.repeat(40)\n data = data.shuffle(20)\n return data\n\n"
] | [
[
"tensorflow.data.Dataset.zip",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
biomedia-mira/deepscm | [
"2dbfcde0d1fed63553c993d4abe4a74a59dc05d0"
] | [
"deepscm/experiments/medical/ukbb/sem_vi/base_sem_experiment.py"
] | [
"import pyro\n\nfrom typing import Mapping\n\nfrom pyro.infer import SVI, TraceGraph_ELBO\nfrom pyro.nn import pyro_method\nfrom pyro.optim import Adam\nfrom torch.distributions import Independent\n\nimport torch\nfrom pyro.distributions.torch_transform import ComposeTransformModule\nfrom pyro.distributions.transforms import (\n ComposeTransform, AffineTransform, ExpTransform, Spline\n)\nfrom pyro.distributions import LowRankMultivariateNormal, MultivariateNormal, Normal, TransformedDistribution\nfrom deepscm.arch.medical import Decoder, Encoder\nfrom deepscm.distributions.transforms.reshape import ReshapeTransform\nfrom deepscm.distributions.transforms.affine import LowerCholeskyAffine\n\nfrom deepscm.distributions.deep import DeepMultivariateNormal, DeepIndepNormal, Conv2dIndepNormal, DeepLowRankMultivariateNormal\n\nimport numpy as np\n\nfrom deepscm.experiments.medical.base_experiment import BaseCovariateExperiment, BaseSEM, EXPERIMENT_REGISTRY, MODEL_REGISTRY # noqa: F401\n\n\nclass CustomELBO(TraceGraph_ELBO):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.trace_storage = {'model': None, 'guide': None}\n\n def _get_trace(self, model, guide, args, kwargs):\n model_trace, guide_trace = super()._get_trace(model, guide, args, kwargs)\n\n self.trace_storage['model'] = model_trace\n self.trace_storage['guide'] = guide_trace\n\n return model_trace, guide_trace\n\n\nclass Lambda(torch.nn.Module):\n def __init__(self, func):\n super().__init__()\n self.func = func\n\n def forward(self, x):\n return self.func(x)\n\n\nclass BaseVISEM(BaseSEM):\n context_dim = 0\n\n def __init__(self, latent_dim: int, logstd_init: float = -5, enc_filters: str = '16,32,64,128', dec_filters: str = '128,64,32,16',\n num_convolutions: int = 2, use_upconv: bool = False, decoder_type: str = 'fixed_var', decoder_cov_rank: int = 10, **kwargs):\n super().__init__(**kwargs)\n\n self.img_shape = (1, 192 // self.downsample, 192 // self.downsample) if self.downsample > 0 else (1, 192, 192)\n\n self.latent_dim = latent_dim\n self.logstd_init = logstd_init\n\n self.enc_filters = tuple(int(f.strip()) for f in enc_filters.split(','))\n self.dec_filters = tuple(int(f.strip()) for f in dec_filters.split(','))\n self.num_convolutions = num_convolutions\n self.use_upconv = use_upconv\n self.decoder_type = decoder_type\n self.decoder_cov_rank = decoder_cov_rank\n\n # decoder parts\n decoder = Decoder(\n num_convolutions=self.num_convolutions, filters=self.dec_filters,\n latent_dim=self.latent_dim + self.context_dim, upconv=self.use_upconv,\n output_size=self.img_shape)\n\n if self.decoder_type == 'fixed_var':\n self.decoder = Conv2dIndepNormal(decoder, 1, 1)\n\n torch.nn.init.zeros_(self.decoder.logvar_head.weight)\n self.decoder.logvar_head.weight.requires_grad = False\n\n torch.nn.init.constant_(self.decoder.logvar_head.bias, self.logstd_init)\n self.decoder.logvar_head.bias.requires_grad = False\n elif self.decoder_type == 'learned_var':\n self.decoder = Conv2dIndepNormal(decoder, 1, 1)\n\n torch.nn.init.zeros_(self.decoder.logvar_head.weight)\n self.decoder.logvar_head.weight.requires_grad = False\n\n torch.nn.init.constant_(self.decoder.logvar_head.bias, self.logstd_init)\n self.decoder.logvar_head.bias.requires_grad = True\n elif self.decoder_type == 'independent_gaussian':\n self.decoder = Conv2dIndepNormal(decoder, 1, 1)\n\n torch.nn.init.zeros_(self.decoder.logvar_head.weight)\n self.decoder.logvar_head.weight.requires_grad = True\n\n torch.nn.init.normal_(self.decoder.logvar_head.bias, self.logstd_init, 1e-1)\n self.decoder.logvar_head.bias.requires_grad = True\n elif self.decoder_type == 'multivariate_gaussian':\n seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))\n self.decoder = DeepMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape))\n elif self.decoder_type == 'sharedvar_multivariate_gaussian':\n seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))\n self.decoder = DeepMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape))\n\n torch.nn.init.zeros_(self.decoder.logdiag_head.weight)\n self.decoder.logdiag_head.weight.requires_grad = False\n\n torch.nn.init.zeros_(self.decoder.lower_head.weight)\n self.decoder.lower_head.weight.requires_grad = False\n\n torch.nn.init.normal_(self.decoder.logdiag_head.bias, self.logstd_init, 1e-1)\n self.decoder.logdiag_head.bias.requires_grad = True\n elif self.decoder_type == 'lowrank_multivariate_gaussian':\n seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))\n self.decoder = DeepLowRankMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape), decoder_cov_rank)\n elif self.decoder_type == 'sharedvar_lowrank_multivariate_gaussian':\n seq = torch.nn.Sequential(decoder, Lambda(lambda x: x.view(x.shape[0], -1)))\n self.decoder = DeepLowRankMultivariateNormal(seq, np.prod(self.img_shape), np.prod(self.img_shape), decoder_cov_rank)\n\n torch.nn.init.zeros_(self.decoder.logdiag_head.weight)\n self.decoder.logdiag_head.weight.requires_grad = False\n\n torch.nn.init.zeros_(self.decoder.factor_head.weight)\n self.decoder.factor_head.weight.requires_grad = False\n\n torch.nn.init.normal_(self.decoder.logdiag_head.bias, self.logstd_init, 1e-1)\n self.decoder.logdiag_head.bias.requires_grad = True\n else:\n raise ValueError('unknown ')\n\n # encoder parts\n self.encoder = Encoder(num_convolutions=self.num_convolutions, filters=self.enc_filters, latent_dim=self.latent_dim, input_size=self.img_shape)\n\n latent_layers = torch.nn.Sequential(torch.nn.Linear(self.latent_dim + self.context_dim, self.latent_dim), torch.nn.ReLU())\n self.latent_encoder = DeepIndepNormal(latent_layers, self.latent_dim, self.latent_dim)\n\n # priors\n self.register_buffer('age_base_loc', torch.zeros([1, ], requires_grad=False))\n self.register_buffer('age_base_scale', torch.ones([1, ], requires_grad=False))\n\n self.sex_logits = torch.nn.Parameter(torch.zeros([1, ]))\n\n self.register_buffer('ventricle_volume_base_loc', torch.zeros([1, ], requires_grad=False))\n self.register_buffer('ventricle_volume_base_scale', torch.ones([1, ], requires_grad=False))\n\n self.register_buffer('brain_volume_base_loc', torch.zeros([1, ], requires_grad=False))\n self.register_buffer('brain_volume_base_scale', torch.ones([1, ], requires_grad=False))\n\n self.register_buffer('z_loc', torch.zeros([latent_dim, ], requires_grad=False))\n self.register_buffer('z_scale', torch.ones([latent_dim, ], requires_grad=False))\n\n self.register_buffer('x_base_loc', torch.zeros(self.img_shape, requires_grad=False))\n self.register_buffer('x_base_scale', torch.ones(self.img_shape, requires_grad=False))\n\n self.register_buffer('age_flow_lognorm_loc', torch.zeros([], requires_grad=False))\n self.register_buffer('age_flow_lognorm_scale', torch.ones([], requires_grad=False))\n\n self.register_buffer('ventricle_volume_flow_lognorm_loc', torch.zeros([], requires_grad=False))\n self.register_buffer('ventricle_volume_flow_lognorm_scale', torch.ones([], requires_grad=False))\n\n self.register_buffer('brain_volume_flow_lognorm_loc', torch.zeros([], requires_grad=False))\n self.register_buffer('brain_volume_flow_lognorm_scale', torch.ones([], requires_grad=False))\n\n # age flow\n self.age_flow_components = ComposeTransformModule([Spline(1)])\n self.age_flow_lognorm = AffineTransform(loc=self.age_flow_lognorm_loc.item(), scale=self.age_flow_lognorm_scale.item())\n self.age_flow_constraint_transforms = ComposeTransform([self.age_flow_lognorm, ExpTransform()])\n self.age_flow_transforms = ComposeTransform([self.age_flow_components, self.age_flow_constraint_transforms])\n\n # other flows shared components\n self.ventricle_volume_flow_lognorm = AffineTransform(loc=self.ventricle_volume_flow_lognorm_loc.item(), scale=self.ventricle_volume_flow_lognorm_scale.item()) # noqa: E501\n self.ventricle_volume_flow_constraint_transforms = ComposeTransform([self.ventricle_volume_flow_lognorm, ExpTransform()])\n\n self.brain_volume_flow_lognorm = AffineTransform(loc=self.brain_volume_flow_lognorm_loc.item(), scale=self.brain_volume_flow_lognorm_scale.item())\n self.brain_volume_flow_constraint_transforms = ComposeTransform([self.brain_volume_flow_lognorm, ExpTransform()])\n\n def __setattr__(self, name, value):\n super().__setattr__(name, value)\n\n if name == 'age_flow_lognorm_loc':\n self.age_flow_lognorm.loc = self.age_flow_lognorm_loc.item()\n elif name == 'age_flow_lognorm_scale':\n self.age_flow_lognorm.scale = self.age_flow_lognorm_scale.item()\n elif name == 'ventricle_volume_flow_lognorm_loc':\n self.ventricle_volume_flow_lognorm.loc = self.ventricle_volume_flow_lognorm_loc.item()\n elif name == 'ventricle_volume_flow_lognorm_scale':\n self.ventricle_volume_flow_lognorm.scale = self.ventricle_volume_flow_lognorm_scale.item()\n elif name == 'brain_volume_flow_lognorm_loc':\n self.brain_volume_flow_lognorm.loc = self.brain_volume_flow_lognorm_loc.item()\n elif name == 'brain_volume_flow_lognorm_scale':\n self.brain_volume_flow_lognorm.scale = self.brain_volume_flow_lognorm_scale.item()\n\n def _get_preprocess_transforms(self):\n return super()._get_preprocess_transforms().inv\n\n def _get_transformed_x_dist(self, latent):\n x_pred_dist = self.decoder.predict(latent)\n x_base_dist = Normal(self.x_base_loc, self.x_base_scale).to_event(3)\n\n preprocess_transform = self._get_preprocess_transforms()\n\n if isinstance(x_pred_dist, MultivariateNormal) or isinstance(x_pred_dist, LowRankMultivariateNormal):\n chol_transform = LowerCholeskyAffine(x_pred_dist.loc, x_pred_dist.scale_tril)\n reshape_transform = ReshapeTransform(self.img_shape, (np.prod(self.img_shape), ))\n x_reparam_transform = ComposeTransform([reshape_transform, chol_transform, reshape_transform.inv])\n elif isinstance(x_pred_dist, Independent):\n x_pred_dist = x_pred_dist.base_dist\n x_reparam_transform = AffineTransform(x_pred_dist.loc, x_pred_dist.scale, 3)\n\n return TransformedDistribution(x_base_dist, ComposeTransform([x_reparam_transform, preprocess_transform]))\n\n @pyro_method\n def guide(self, x, age, sex, ventricle_volume, brain_volume):\n raise NotImplementedError()\n\n @pyro_method\n def svi_guide(self, x, age, sex, ventricle_volume, brain_volume):\n self.guide(x, age, sex, ventricle_volume, brain_volume)\n\n @pyro_method\n def svi_model(self, x, age, sex, ventricle_volume, brain_volume):\n with pyro.plate('observations', x.shape[0]):\n pyro.condition(self.model, data={'x': x, 'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume})()\n\n @pyro_method\n def infer_z(self, *args, **kwargs):\n return self.guide(*args, **kwargs)\n\n @pyro_method\n def infer(self, **obs):\n _required_data = ('x', 'sex', 'age', 'ventricle_volume', 'brain_volume')\n assert set(obs.keys()) == set(_required_data), 'got: {}'.format(tuple(obs.keys()))\n\n z = self.infer_z(**obs)\n\n exogeneous = self.infer_exogeneous(z=z, **obs)\n exogeneous['z'] = z\n\n return exogeneous\n\n @pyro_method\n def reconstruct(self, x, age, sex, ventricle_volume, brain_volume, num_particles: int = 1):\n obs = {'x': x, 'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume}\n z_dist = pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']\n\n recons = []\n for _ in range(num_particles):\n z = pyro.sample('z', z_dist)\n recon, *_ = pyro.poutine.condition(\n self.sample, data={'sex': sex, 'age': age, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume, 'z': z})(x.shape[0])\n recons += [recon]\n return torch.stack(recons).mean(0)\n\n @pyro_method\n def counterfactual(self, obs: Mapping, condition: Mapping = None, num_particles: int = 1):\n _required_data = ('x', 'sex', 'age', 'ventricle_volume', 'brain_volume')\n assert set(obs.keys()) == set(_required_data), 'got: {}'.format(tuple(obs.keys()))\n\n z_dist = pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']\n\n counterfactuals = []\n for _ in range(num_particles):\n z = pyro.sample('z', z_dist)\n\n exogeneous = self.infer_exogeneous(z=z, **obs)\n exogeneous['z'] = z\n # condition on sex if sex isn't included in 'do' as it's a root node and we don't have the exogeneous noise for it yet...\n if 'sex' not in condition.keys():\n exogeneous['sex'] = obs['sex']\n\n counter = pyro.poutine.do(pyro.poutine.condition(self.sample_scm, data=exogeneous), data=condition)(obs['x'].shape[0])\n counterfactuals += [counter]\n return {k: v for k, v in zip(('x', 'z', 'sex', 'age', 'ventricle_volume', 'brain_volume'), (torch.stack(c).mean(0) for c in zip(*counterfactuals)))}\n\n @classmethod\n def add_arguments(cls, parser):\n parser = super().add_arguments(parser)\n\n parser.add_argument('--latent_dim', default=100, type=int, help=\"latent dimension of model (default: %(default)s)\")\n parser.add_argument('--logstd_init', default=-5, type=float, help=\"init of logstd (default: %(default)s)\")\n parser.add_argument('--enc_filters', default='16,24,32,64,128', type=str, help=\"number of filters to use (default: %(default)s)\")\n parser.add_argument('--dec_filters', default='128,64,32,24,16', type=str, help=\"number of filters to use (default: %(default)s)\")\n parser.add_argument('--num_convolutions', default=3, type=int, help=\"number of convolutions to build model (default: %(default)s)\")\n parser.add_argument('--use_upconv', default=False, action='store_true', help=\"toogle upconv (default: %(default)s)\")\n parser.add_argument(\n '--decoder_type', default='fixed_var', help=\"var type (default: %(default)s)\",\n choices=['fixed_var', 'learned_var', 'independent_gaussian', 'sharedvar_multivariate_gaussian', 'multivariate_gaussian',\n 'sharedvar_lowrank_multivariate_gaussian', 'lowrank_multivariate_gaussian'])\n parser.add_argument('--decoder_cov_rank', default=10, type=int, help=\"rank for lowrank cov approximation (requires lowrank decoder) (default: %(default)s)\") # noqa: E501\n\n return parser\n\n\nclass SVIExperiment(BaseCovariateExperiment):\n def __init__(self, hparams, pyro_model: BaseSEM):\n super().__init__(hparams, pyro_model)\n\n self.svi_loss = CustomELBO(num_particles=hparams.num_svi_particles)\n\n self._build_svi()\n\n def _build_svi(self, loss=None):\n def per_param_callable(module_name, param_name):\n params = {'eps': 1e-5, 'amsgrad': self.hparams.use_amsgrad, 'weight_decay': self.hparams.l2}\n if 'flow_components' in module_name or 'sex_logits' in param_name:\n params['lr'] = self.hparams.pgm_lr\n else:\n params['lr'] = self.hparams.lr\n\n print(f'building opt for {module_name} - {param_name} with p: {params}')\n return params\n\n if loss is None:\n loss = self.svi_loss\n\n if self.hparams.use_cf_guide:\n def guide(*args, **kwargs):\n return self.pyro_model.counterfactual_guide(*args, **kwargs, counterfactual_type=self.hparams.cf_elbo_type)\n self.svi = SVI(self.pyro_model.svi_model, guide, Adam(per_param_callable), loss)\n else:\n self.svi = SVI(self.pyro_model.svi_model, self.pyro_model.svi_guide, Adam(per_param_callable), loss)\n self.svi.loss_class = loss\n\n def backward(self, *args, **kwargs):\n pass # No loss to backpropagate since we're using Pyro's optimisation machinery\n\n def print_trace_updates(self, batch):\n with torch.no_grad():\n print('Traces:\\n' + ('#' * 10))\n\n guide_trace = pyro.poutine.trace(self.pyro_model.svi_guide).get_trace(**batch)\n model_trace = pyro.poutine.trace(pyro.poutine.replay(self.pyro_model.svi_model, trace=guide_trace)).get_trace(**batch)\n\n guide_trace = pyro.poutine.util.prune_subsample_sites(guide_trace)\n model_trace = pyro.poutine.util.prune_subsample_sites(model_trace)\n\n model_trace.compute_log_prob()\n guide_trace.compute_score_parts()\n\n print(f'model: {model_trace.nodes.keys()}')\n for name, site in model_trace.nodes.items():\n if site[\"type\"] == \"sample\":\n fn = site['fn']\n if isinstance(fn, Independent):\n fn = fn.base_dist\n print(f'{name}: {fn} - {fn.support}')\n log_prob_sum = site[\"log_prob_sum\"]\n is_obs = site[\"is_observed\"]\n print(f'model - log p({name}) = {log_prob_sum} | obs={is_obs}')\n if torch.isnan(log_prob_sum):\n value = site['value'][0]\n conc0 = fn.concentration0\n conc1 = fn.concentration1\n\n print(f'got:\\n{value}\\n{conc0}\\n{conc1}')\n\n raise Exception()\n\n print(f'guide: {guide_trace.nodes.keys()}')\n\n for name, site in guide_trace.nodes.items():\n if site[\"type\"] == \"sample\":\n fn = site['fn']\n if isinstance(fn, Independent):\n fn = fn.base_dist\n print(f'{name}: {fn} - {fn.support}')\n entropy = site[\"score_parts\"].entropy_term.sum()\n is_obs = site[\"is_observed\"]\n print(f'guide - log q({name}) = {entropy} | obs={is_obs}')\n\n def get_trace_metrics(self, batch):\n metrics = {}\n\n model = self.svi.loss_class.trace_storage['model']\n guide = self.svi.loss_class.trace_storage['guide']\n\n metrics['log p(x)'] = model.nodes['x']['log_prob'].mean()\n metrics['log p(age)'] = model.nodes['age']['log_prob'].mean()\n metrics['log p(sex)'] = model.nodes['sex']['log_prob'].mean()\n metrics['log p(ventricle_volume)'] = model.nodes['ventricle_volume']['log_prob'].mean()\n metrics['log p(brain_volume)'] = model.nodes['brain_volume']['log_prob'].mean()\n metrics['p(z)'] = model.nodes['z']['log_prob'].mean()\n metrics['q(z)'] = guide.nodes['z']['log_prob'].mean()\n metrics['log p(z) - log q(z)'] = metrics['p(z)'] - metrics['q(z)']\n\n return metrics\n\n def prep_batch(self, batch):\n x = batch['image'] * 255.\n age = batch['age'].unsqueeze(1).float()\n sex = batch['sex'].unsqueeze(1).float()\n ventricle_volume = batch['ventricle_volume'].unsqueeze(1).float()\n brain_volume = batch['brain_volume'].unsqueeze(1).float()\n\n x = x.float()\n\n if self.training:\n x += torch.rand_like(x)\n\n return {'x': x, 'age': age, 'sex': sex, 'ventricle_volume': ventricle_volume, 'brain_volume': brain_volume}\n\n def training_step(self, batch, batch_idx):\n batch = self.prep_batch(batch)\n\n if self.hparams.validate:\n print('Validation:')\n self.print_trace_updates(batch)\n\n loss = self.svi.step(**batch)\n\n metrics = self.get_trace_metrics(batch)\n\n if np.isnan(loss):\n self.logger.experiment.add_text('nan', f'nand at {self.current_epoch}:\\n{metrics}')\n raise ValueError('loss went to nan with metrics:\\n{}'.format(metrics))\n\n tensorboard_logs = {('train/' + k): v for k, v in metrics.items()}\n tensorboard_logs['train/loss'] = loss\n\n self.log_dict(tensorboard_logs)\n\n return torch.Tensor([loss])\n\n def validation_step(self, batch, batch_idx):\n batch = self.prep_batch(batch)\n\n loss = self.svi.evaluate_loss(**batch)\n\n metrics = self.get_trace_metrics(batch)\n\n return {'loss': loss, **metrics}\n\n def test_step(self, batch, batch_idx):\n batch = self.prep_batch(batch)\n\n loss = self.svi.evaluate_loss(**batch)\n\n metrics = self.get_trace_metrics(batch)\n\n samples = self.build_test_samples(batch)\n\n return {'loss': loss, **metrics, 'samples': samples}\n\n @classmethod\n def add_arguments(cls, parser):\n parser = super().add_arguments(parser)\n\n parser.add_argument('--num_svi_particles', default=4, type=int, help=\"number of particles to use for ELBO (default: %(default)s)\")\n parser.add_argument('--num_sample_particles', default=32, type=int, help=\"number of particles to use for MC sampling (default: %(default)s)\")\n parser.add_argument('--use_cf_guide', default=False, action='store_true', help=\"whether to use counterfactual guide (default: %(default)s)\")\n parser.add_argument(\n '--cf_elbo_type', default=-1, choices=[-1, 0, 1, 2],\n help=\"-1: randomly select per batch, 0: shuffle thickness, 1: shuffle intensity, 2: shuffle both (default: %(default)s)\")\n\n return parser\n\n\nEXPERIMENT_REGISTRY[SVIExperiment.__name__] = SVIExperiment\n"
] | [
[
"torch.ones",
"torch.Tensor",
"torch.zeros",
"numpy.isnan",
"torch.nn.init.constant_",
"torch.rand_like",
"torch.isnan",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.init.normal_",
"numpy.prod",
"torch.nn.init.zeros_",
"torch.stack",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
junan146/Distributed-Deep-Learning | [
"5cdb624b91a44d0a7eb339d07a19b1f7fe64ab4a"
] | [
"mnist_async_sharding_greedy/worker.py"
] | [
"from model import Model\nfrom mpi4py import MPI\nfrom typing import List\nimport numpy as np\nimport tensorflow as tf\nimport time,sys\nfrom functools import reduce\n\nclass SyncWorker(Model):\n def __init__(self, batch_size, rank, num_ps, num_workers):\n super().__init__()\n\n ''' Modify var_bucket & var_shape for greedy ordering '''\n # Sort parameters \n tmp = {i: reduce(lambda x, y: x*y, self.var_shape[i].as_list()) for i in range(self.var_size)}\n tmp = sorted(tmp, key=tmp.get)\n\n # Reorder parameters\n self.greedy_order = []\n i = 0\n j = len(tmp) - 1\n while i < j:\n self.greedy_order.append(tmp[i])\n self.greedy_order.append(tmp[j])\n i += 1\n j -= 1\n\n # Add mid value if the number of parameters is odd\n if len(tmp) % 2:\n self.greedy_order.append(tmp[i])\n\n # Modify var_bucket\n with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE):\n self.var_bucket = [tf.compat.v1.get_variable(\"v{}\".format(i), shape=self.var_shape[i], dtype=tf.float32) for i in self.greedy_order]\n\n # Modify var_shape\n self.var_shape = [self.var_shape[i] for i in self.greedy_order]\n \n # Set rank of worker\n # rank: number of parameter servers ~ number of parameter servers + number of workers - 1\n self.rank = rank\n \n # Set number of parameter servers & workers\n self.num_workers = num_workers\n self.num_ps = num_ps\n self.avg_var_size = self.var_size // self.num_ps\n self.local_var_size = self.avg_var_size + self.var_size % self.num_ps\n\n self.batch_size = batch_size\n self.grad_buckets = [tf.compat.v1.placeholder(shape=self.var_shape[i], dtype=tf.float32) for i in range(self.var_size)]\n self.senders = [tf.py_function(func=self.wrap_send(i), inp=[self.grad_buckets[i]], Tout=[]) for i in range(self.var_size)]\n \n def wrap_send(self, num):\n def send(grad):\n # Send data to parameter server\n ind = num // self.avg_var_size\n if num >= self.var_size - self.local_var_size:\n ind = self.num_ps-1\n comm.Send([grad, MPI.FLOAT], dest=ind, tag=num-(ind*self.avg_var_size))\n\n return None\n\n return send\n\n def work(self, cnt):\n x_batch = self.x_train[self.batch_size*cnt:self.batch_size*(cnt+1)]\n y_batch = self.y_train[self.batch_size*cnt:self.batch_size*(cnt+1)]\n ret, = self.sess.run([self.grads], feed_dict={self.x: x_batch, self.y_: y_batch, self.keep_prob: 0.5})\n grads = [grad for grad, var in ret] # gradient tuple\n \n for i in range(self.var_size):\n self.sess.run([self.senders[i]], feed_dict={self.grad_buckets[i]: grads[self.greedy_order[i]]})\n\n\nif __name__ == \"__main__\":\n epoch = 1\n batch_size = 100\n comm = MPI.COMM_WORLD\n\n # Set rank of worker\n # rank: number of parameter servers ~ number of parameter servers + number of workers - 1\n rank = comm.Get_rank()\n \n # Set number of parameter servers & workers\n num_workers = int(sys.argv[2])\n num_ps = comm.Get_size() - num_workers\n\n start = time.clock()\n worker = SyncWorker(batch_size, rank, num_ps, num_workers)\n\n # Send parameters to parameter server\n if worker.rank == worker.num_ps:\n data = {\"size\": worker.var_size, \"shape\": worker.var_shape, \"total_batch\": worker.x_train.shape[0]}\n for i in range(worker.num_ps):\n comm.send(data, dest=i, tag=0)\n\n # For broadcasting\n bucket = [np.empty(worker.var_shape[i], dtype=np.float32) for i in range(worker.var_size)]\n ph_bucket = [tf.compat.v1.placeholder(shape=worker.var_shape[i], dtype=tf.float32) for i in range(worker.var_size)]\n\n bucket_assign = [tf.compat.v1.assign(worker.var_bucket[i], ph_bucket[i]) for i in range(worker.var_size)]\n\n for step in range(epoch):\n batch_num = int(worker.x_train.shape[0]/batch_size)\n for batch_cnt in range(batch_num):\n\n # Calculate gradients then send them to parameter server\n worker.work(batch_cnt)\n\n # Receive data from parameter server\n for i in range(worker.var_size):\n ind = i // worker.avg_var_size\n if i >= worker.var_size - worker.local_var_size:\n ind = worker.num_ps-1\n comm.Recv([bucket[i], MPI.FLOAT], source=ind, tag=i-(ind*worker.avg_var_size))\n \n # Assign broadcasted values\n worker.sess.run(bucket_assign, feed_dict={ph_bucket[i]:bucket[i] for i in range(worker.var_size)})\n if batch_cnt % 10 == 0:\n print(\"Worker{} epoch: {} batch: {} accuracy: {}\".format(rank,step,batch_cnt,worker.sess.run(worker.accuracy, feed_dict={worker.x: worker.x_test, worker.y_: worker.y_test, worker.keep_prob: 1.0})))\n \n end = time.clock()\n print(\"Worker{} final accuracy: {}\".format(rank,worker.sess.run(worker.accuracy, feed_dict={worker.x: worker.x_test, worker.y_: worker.y_test, worker.keep_prob: 1.0})))\n print(\"Time: {}\".format(end-start))\n"
] | [
[
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.variable_scope",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ducviet00/HMER | [
"0fa322ed35412737a24ec3955c9a3d96d1989bd4"
] | [
"Train.py"
] | [
"'''\nPython 3.6 \nPytorch >= 0.4\nWritten by Hongyu Wang in Beihang university\n'''\nimport torch\nimport math\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy\nimport torch.utils.data as data\nfrom data_iterator import dataIterator\nfrom Densenet_torchvision import densenet121\nfrom Attention_RNN import AttnDecoderRNN\n#from Resnet101 import resnet101\nimport random\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom Discriminator import Discriminator\nfrom Adversarial_Loss import Adversarial_Loss, Loss_D\n\n\n# compute the wer loss\ndef cmp_result(label,rec):\n dist_mat = numpy.zeros((len(label)+1, len(rec)+1),dtype='int32')\n dist_mat[0,:] = range(len(rec) + 1)\n dist_mat[:,0] = range(len(label) + 1)\n for i in range(1, len(label) + 1):\n for j in range(1, len(rec) + 1):\n hit_score = dist_mat[i-1, j-1] + (label[i-1] != rec[j-1])\n ins_score = dist_mat[i,j-1] + 1\n del_score = dist_mat[i-1, j] + 1\n dist_mat[i,j] = min(hit_score, ins_score, del_score)\n dist = dist_mat[len(label), len(rec)]\n return dist, len(label)\n\ndef load_dict(dictFile):\n fp=open(dictFile)\n stuff=fp.readlines()\n fp.close()\n lexicon={}\n for l in stuff:\n w=l.strip().split()\n lexicon[w[0]]=int(w[1])\n print('total words/phones',len(lexicon))\n return lexicon\n\ndatasets=['./offline-train.pkl','./train_caption.txt']\nvalid_datasets=['./offline-test.pkl', './test_caption.txt']\ndictionaries=['./dictionary.txt']\nbatch_Imagesize=500000\nvalid_batch_Imagesize=500000\n# batch_size for training and testing\nbatch_size=2\nbatch_size_t=2\n# the max (label length/Image size) in training and testing\n# you can change 'maxlen','maxImagesize' by the size of your GPU\nmaxlen=48\nmaxImagesize= 100000\n# hidden_size in RNN\nhidden_size = 256\n# teacher_forcing_ratio \nteacher_forcing_ratio = 1\n# change the gpu id \ngpu = [0]\n# learning rate\nlr_rate = 0.00001\n# flag to remember when to change the learning rate\nflag = 0\n# exprate\nexprate = 0\n\n# worddicts\nworddicts = load_dict(dictionaries[0])\nworddicts_r = [None] * len(worddicts)\nfor kk, vv in worddicts.items():\n worddicts_r[vv] = kk\n\n#load train data and test data\ntrain,train_label = dataIterator(\n datasets[0], datasets[1],worddicts,batch_size=1,\n batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize\n )\nlen_train = len(train)\n\ntest,test_label = dataIterator(\n valid_datasets[0],valid_datasets[1],worddicts,batch_size=1,\n batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize\n )\nlen_test = len(test)\n\nclass custom_dset(data.Dataset):\n def __init__(self,train,train_label,batch_size):\n self.train = train\n self.train_label = train_label\n self.batch_size = batch_size\n\n def __getitem__(self, index):\n train_setting = torch.from_numpy(numpy.array(self.train[index]))\n label_setting = torch.from_numpy(numpy.array(self.train_label[index])).type(torch.LongTensor)\n\n size = train_setting.size()\n\n # print(\"size: \", size)\n train_setting = train_setting.view(1,size[2],size[3])\n # print(\"train set: \", train_setting.size())\n train_print = torch.rand(train_setting.size())\n # print(\"train print: \", train_print.size())\n label_setting = label_setting.view(-1)\n return train_setting,label_setting, train_print\n\n def __len__(self):\n return len(self.train)\n\n\noff_image_train = custom_dset(train,train_label,batch_size)\noff_image_test = custom_dset(test,test_label,batch_size)\n\n# collate_fn is writting for padding imgs in batch. \n# As images in my dataset are different size, so the padding is necessary.\n# Padding images to the max image size in a mini-batch and cat a mask. \n\ndef padding_mask(ii, aa1, bb1):\n ii = ii.float()\n img_size_h = ii.size()[1]\n img_size_w = ii.size()[2]\n img_mask_sub_s = torch.ones(1,img_size_h,img_size_w).type(torch.FloatTensor)\n img_mask_sub_s = img_mask_sub_s*255.0\n img_mask_sub = torch.cat((ii,img_mask_sub_s),dim=0)\n padding_h = aa1-img_size_h\n padding_w = bb1-img_size_w\n m = torch.nn.ZeroPad2d((0,padding_w,0,padding_h))\n img_mask_sub_padding = m(img_mask_sub)\n img_mask_sub_padding = img_mask_sub_padding.unsqueeze(0)\n return img_mask_sub_padding\n\ndef collate_fn(batch):\n batch.sort(key=lambda x: len(x[1]), reverse=True)\n img, label, img_p = zip(*batch)\n aa1 = 0\n bb1 = 0\n k = 0\n k1 = 0\n kp = 0\n max_len = len(label[0])+1\n for j in range(len(img)):\n size = img[j].size()\n if size[1] > aa1:\n aa1 = size[1]\n if size[2] > bb1:\n bb1 = size[2]\n\n for ii in img:\n img_mask_sub_padding = padding_mask(ii, aa1, bb1)\n if k==0:\n img_padding_mask = img_mask_sub_padding\n else:\n img_padding_mask = torch.cat((img_padding_mask,img_mask_sub_padding),dim=0)\n k = k+1\n\n for iii in img_p:\n img_mask_sub_padding_p = padding_mask(iii, aa1, bb1)\n if kp==0:\n img_padding_mask_p = img_mask_sub_padding_p\n else:\n img_padding_mask_p = torch.cat((img_padding_mask_p,img_mask_sub_padding_p),dim=0)\n kp+=1\n\n\n for ii1 in label:\n ii1 = ii1.long()\n ii1 = ii1.unsqueeze(0)\n ii1_len = ii1.size()[1]\n m = torch.nn.ZeroPad2d((0,max_len-ii1_len,0,0))\n ii1_padding = m(ii1)\n if k1 == 0:\n label_padding = ii1_padding\n else:\n label_padding = torch.cat((label_padding,ii1_padding),dim=0)\n k1 = k1+1\n\n img_padding_mask = img_padding_mask/255.0\n img_padding_mask_p = img_padding_mask_p/255.0\n # print(\"img_padding_mask: \", img_padding_mask.size())\n # print(\"label_padding\", label_padding.size())\n return img_padding_mask, label_padding, img_padding_mask_p\n\ntrain_loader = torch.utils.data.DataLoader(\n dataset = off_image_train,\n batch_size = batch_size,\n shuffle = True,\n collate_fn = collate_fn,\n num_workers=2,\n )\ntest_loader = torch.utils.data.DataLoader(\n dataset = off_image_test,\n batch_size = batch_size_t,\n shuffle = True,\n collate_fn = collate_fn,\n num_workers=2,\n)\n\ndef my_train(target_length,attn_decoder1, discriminator,\n output_highfeature, output_area, output_highfeature_p, output_area_p,y,criterion, adv_loss, loss_d, encoder_optimizer1,decoder_optimizer1,discriminator_optimizer1,x_mean, xp_mean,dense_input,h_mask,w_mask,gpu,\n decoder_input,decoder_hidden,attention_sum,decoder_attention, dense_input_p,h_mask_p,w_mask_p,\n decoder_input_p,decoder_hidden_p,attention_sum_p,decoder_attention_p):\n loss = 0\n loss_adv = 0 \n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n flag_z = [0]*batch_size\n\n if use_teacher_forcing:\n encoder_optimizer1.zero_grad()\n decoder_optimizer1.zero_grad()\n discriminator_optimizer1.zero_grad()\n my_num = 0\n\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention, attention_sum, out_emb = attn_decoder1(decoder_input,\n decoder_hidden,\n output_highfeature,\n output_area,\n attention_sum,\n decoder_attention,\n dense_input,batch_size,h_mask,w_mask,gpu)\n\n decoder_output_p, decoder_hidden_p, decoder_attention_p, attention_sum_p, out_emb_p = attn_decoder1(decoder_input_p,\n decoder_hidden_p,\n output_highfeature_p,\n output_area_p,\n attention_sum_p,\n decoder_attention_p,\n dense_input_p,batch_size,h_mask_p,w_mask_p,gpu) #print(decoder_output.size()) (batch,1,112)\n \n\n # print(\"discriminator: \", discriminator(out_emb).size())\n y = y.unsqueeze(0)\n dis_h = discriminator(out_emb)\n dis_p = discriminator(out_emb_p)\n\n\n for i in range(batch_size):\n discriminator.train(mode=False)\n attn_decoder1.train(mode=True)\n encoder.train(mode= True)\n print(decoder_output.size())\n if int(y[0][i][di]) == 0:\n flag_z[i] = flag_z[i]+1\n if flag_z[i] > 1:\n continue\n else:\n loss += criterion(decoder_output[i], y[:,i,di])\n # loss += criterion(decoder_output_p[i], y[:,i,di])\n # adv = loss_d(dis_h[i])\n # loss += adv[0]\n print(\"loss: \", loss)\n else:\n loss += criterion(decoder_output[i], y[:,i,di])\n # loss += criterion(decoder_output_p[i], y[:,i,di])\n # adv = loss_d(dis_h[i])\n # # print(\"adv: \", adv.size())\n # loss += adv[0]\n print(\"loss: \", loss)\n loss.backward(retain_graph=True)\n encoder_optimizer1.step()\n decoder_optimizer1.step()\n # discriminator_optimizer1.step()\n\n n_steps = 5\n for j in range(n_steps):\n discriminator.train(mode=True)\n attn_decoder1.train(mode=False)\n encoder.train(mode=False)\n for i in range(batch_size):\n if int(y[0][i][di]) == 0:\n flag_z[i] = flag_z[i]+1\n if flag_z[i] > 1:\n continue\n else:\n loss_adv += adv_loss(dis_p[i], dis_h[i])\n else:\n loss_adv += adv_loss(dis_p[i], dis_h[i])\n # print(\"loss_adv: \", loss_adv)\n loss_adv.backward(retain_graph= True)\n discriminator_optimizer1.step()\n # encoder_optimizer1.step()\n\n\n if int(y[0][0][di]) == 0:\n break\n decoder_input = y[:,:,di]\n decoder_input = decoder_input.squeeze(0)\n decoder_input_p = y[:,:,di]\n decoder_input_p = decoder_input_p.squeeze(0)\n y = y.squeeze(0)\n\n # encoder_optimizer1.step()\n # decoder_optimizer1.step()\n # discriminator_optimizer1.step()\n \n return loss.item(), loss_adv.item()\n\n # else:\n # encoder_optimizer1.zero_grad()\n # decoder_optimizer1.zero_grad()\n # my_num = 0\n # for di in range(target_length):\n # decoder_output, decoder_hidden, decoder_attention,attention_sum, out_emb= attn_decoder1(decoder_input, decoder_hidden,\n # output_highfeature, output_area,\n # attention_sum,decoder_attention,dense_input,batch_size,\n # h_mask,w_mask,gpu)\n # #print(decoder_output.size()) 1*10*112\n # #print(y.size()) 1*37\n # #topi (b,1)\n # topv,topi = torch.max(decoder_output,2)\n # decoder_input = topi\n # decoder_input = decoder_input.view(batch_size)\n\n # y = y.unsqueeze(0)\n # #print(y_t)\n\n # # 1*bs*17\n # for k in range(batch_size):\n # if int(y[0][k][di]) == 0:\n # flag_z[k] = flag_z[k]+1\n # if flag_z[k] > 1:\n # continue\n # else:\n # loss += criterion(decoder_output[k], y[:,k,di])\n # else:\n # loss += criterion(decoder_output[k], y[:,k,di])\n\n # y = y.squeeze(0)\n # # if int(topi[0]) == 0:\n # # break\n # loss.backward()\n # encoder_optimizer1.step()\n # decoder_optimizer1.step()\n # return loss.item(), loss_adv.item()\n\nencoder = densenet121()\n\npthfile = r'./model/densenet121-a639ec97.pth'\npretrained_dict = torch.load(pthfile) \nencoder_dict = encoder.state_dict()\npretrained_dict = {k: v for k, v in pretrained_dict.items() if k in encoder_dict}\nencoder_dict.update(pretrained_dict)\nencoder.load_state_dict(encoder_dict)\n\nattn_decoder1 = AttnDecoderRNN(hidden_size,112,dropout_p=0.5)\n\nencoder=encoder.cuda()\nattn_decoder1 = attn_decoder1.cuda()\nencoder = torch.nn.DataParallel(encoder, device_ids=gpu)\nattn_decoder1 = torch.nn.DataParallel(attn_decoder1, device_ids=gpu)\n\ndiscriminator = Discriminator(128, 512)\ndiscriminator = discriminator.cuda()\ndiscriminator = torch.nn.DataParallel(discriminator, device_ids=gpu)\n\ndef imresize(im,sz):\n pil_im = Image.fromarray(im)\n # print(\"size: \",sz)\n return numpy.array(pil_im.resize(sz))\n\n\ncriterion = nn.NLLLoss()\nadv_loss = Adversarial_Loss(0.4)\nloss_d = Loss_D()\n\n# encoder.load_state_dict(torch.load('model/encoder_lr0.00001_BN_te1_d05_SGD_bs8_mask_conv_bn_b.pkl'))\n# attn_decoder1.load_state_dict(torch.load('model/attn_decoder_lr0.00001_BN_te1_d05_SGD_bs8_mask_conv_bn_b.pkl'))\ndecoder_input_init = torch.LongTensor([111]*batch_size).cuda()\ndecoder_hidden_init = torch.randn(batch_size, 1, hidden_size).cuda()\nnn.init.xavier_uniform_(decoder_hidden_init)\n\n# attn_decoder1.load_state_dict(torch.load('model/attn_decoder_lr0.00001_BN_te1_d05_SGD_bs8_mask_conv_bn_b.pkl'))\ndecoder_input_init_p = torch.LongTensor([111]*batch_size).cuda()\ndecoder_hidden_init_p = torch.randn(batch_size, 1, hidden_size).cuda()\nnn.init.xavier_uniform_(decoder_hidden_init_p)\n\n\n# encoder_optimizer1 = torch.optim.Adam(encoder.parameters(), lr=lr_rate)\n# decoder_optimizer1 = torch.optim.Adam(attn_decoder1.parameters(), lr=lr_rate)\n\nfor epoch in range(200):\n encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n discriminator_optimizer1 = torch.optim.SGD(discriminator.parameters(), lr=lr_rate, momentum=0.9)\n\n # # if using SGD optimizer\n # if epoch+1 == 50:\n # lr_rate = lr_rate/10\n # encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n # decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n # if epoch+1 == 75:\n # lr_rate = lr_rate/10\n # encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n # decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n\n\n running_loss=0\n running_loss_adv=0\n whole_loss = 0\n whole_loss_adv =0\n\n encoder.train(mode=True)\n attn_decoder1.train(mode=True)\n discriminator.train(mode=True)\n\n # this is the train\n for step,(x,y,xp) in enumerate(train_loader):\n # print(\"xp: \",xp.size())\n # print(\"y: \", y.size())\n if x.size()[0]<batch_size:\n break\n h_mask = []\n w_mask = []\n h_mask_p = []\n w_mask_p = []\n for i,j in zip(x, xp):\n #h*w\n size_mask = i[1].size()\n s_w = str(i[1][0])\n s_h = str(i[1][:,1])\n w = s_w.count('1')\n h = s_h.count('1')\n h_comp = int(h/16)+1\n w_comp = int(w/16)+1\n h_mask.append(h_comp)\n w_mask.append(w_comp)\n\n size_mask_p = j[1].size()\n s_w_p = str(j[1][0])\n s_h_p = str(j[1][:,1])\n w_p = s_w_p.count('1')\n h_p = s_h_p.count('1')\n h_comp_p = int(h_p/16)+1\n w_comp_p = int(w_p/16)+1\n h_mask_p.append(h_comp_p)\n w_mask_p.append(w_comp_p)\n\n x = x.cuda()\n y = y.cuda()\n xp = xp.cuda()\n # out is CNN featuremaps\n output_highfeature = encoder(x)\n # print(\"out: \", output_highfeature.size())\n # print(\"output_highfeature: \", output_highfeature.size())\n output_highfeature_p = encoder(xp)\n x_mean=[]\n xp_mean = []\n for i,j in zip(output_highfeature,output_highfeature_p):\n x_mean.append(float(torch.mean(i)))\n xp_mean.append(float(torch.mean(j)))\n # x_mean = torch.mean(output_highfeature)\n # x_mean = float(x_mean)\n for i in range(batch_size):\n decoder_hidden_init[i] = decoder_hidden_init[i]*x_mean[i]\n decoder_hidden_init[i] = torch.tanh(decoder_hidden_init[i])\n decoder_hidden_init_p[i] = decoder_hidden_init_p[i]*xp_mean[i]\n decoder_hidden_init_p[i] = torch.tanh(decoder_hidden_init_p[i])\n # decoder_hidden_init[]\n\n # dense_input is height and output_area is width which is bb\n output_area1 = output_highfeature.size()\n output_area1_p = output_highfeature_p.size()\n\n output_area = output_area1[3]\n output_area_p = output_area1_p[3]\n\n dense_input = output_area1[2]\n dense_input_p = output_area1_p[2]\n\n target_length = y.size()[1]\n attention_sum_init = torch.zeros(batch_size,1,dense_input,output_area).cuda()\n attention_sum_init_p = torch.zeros(batch_size, 1, dense_input_p, output_area_p).cuda()\n\n decoder_attention_init = torch.zeros(batch_size,1,dense_input,output_area).cuda()\n decoder_attention_init_p = torch.zeros(batch_size,1,dense_input_p,output_area_p).cuda()\n\n loss, loss_adv = my_train(target_length,attn_decoder1, discriminator, output_highfeature,\n output_area, output_highfeature_p, output_area_p,y,criterion, adv_loss, loss_d, encoder_optimizer1,decoder_optimizer1,discriminator_optimizer1,x_mean, xp_mean,dense_input,h_mask,w_mask,gpu,\n decoder_input_init,decoder_hidden_init,attention_sum_init,decoder_attention_init, dense_input_p,h_mask_p,w_mask_p,\n decoder_input_init_p,decoder_hidden_init_p,attention_sum_init_p,decoder_attention_init_p)\n \n running_loss += loss\n running_loss_adv += loss_adv\n \n if step % 20 == 19:\n pre = ((step+1)/len_train)*100*batch_size\n whole_loss += running_loss\n whole_loss_adv += running_loss_adv\n running_loss = running_loss/(batch_size*20)\n running_loss_adv = running_loss_adv/(batch_size*20)\n print('epoch is %d, lr rate is %.5f, te is %.3f, batch_size is %d, loading for %.3f%%, running_loss is %f' %(epoch,lr_rate,teacher_forcing_ratio, batch_size,pre,running_loss))\n print('epoch is %d, lr rate is %.5f, te is %.3f, batch_size is %d, loading for %.3f%%, running_loss is %f' %(epoch,lr_rate,teacher_forcing_ratio, batch_size,pre,running_loss_adv))\n # with open(\"training_data/running_loss_%.5f_pre_GN_te05_d02_all.txt\" %(lr_rate),\"a\") as f:\n # f.write(\"%s\\n\"%(str(running_loss)))\n running_loss = 0\n running_loss_adv = 0\n\n loss_all_out = whole_loss / len_train\n loss_all_out_adv = whole_loss_adv/ len_train\n print(\"epoch is %d, the whole loss is %f\" % (epoch, loss_all_out))\n print(\"epoch is %d, the whole loss adv is %f\" % (epoch, loss_all_out_adv))\n # with open(\"training_data/whole_loss_%.5f_pre_GN_te05_d02_all.txt\" % (lr_rate), \"a\") as f:\n # f.write(\"%s\\n\" % (str(loss_all_out)))\n\n # this is the prediction and compute wer loss\n total_dist = 0\n total_label = 0\n total_line = 0\n total_line_rec = 0\n whole_loss_t = 0\n\n encoder.eval()\n attn_decoder1.eval()\n print('Now, begin testing!!')\n\n for step_t, (x_t, y_t) in enumerate(test_loader):\n x_real_high = x_t.size()[2]\n x_real_width = x_t.size()[3]\n if x_t.size()[0]<batch_size_t:\n break\n print('testing for %.3f%%'%(step_t*100*batch_size_t/len_test))\n h_mask_t = []\n w_mask_t = []\n for i in x_t:\n #h*w\n size_mask_t = i[1].size()\n s_w_t = str(i[1][0])\n s_h_t = str(i[1][:,1])\n w_t = s_w_t.count('1')\n h_t = s_h_t.count('1')\n h_comp_t = int(h_t/16)+1\n w_comp_t = int(w_t/16)+1\n h_mask_t.append(h_comp_t)\n w_mask_t.append(w_comp_t)\n\n x_t = x_t.cuda()\n y_t = y_t.cuda()\n output_highfeature_t = encoder(x_t)\n\n x_mean_t = torch.mean(output_highfeature_t)\n x_mean_t = float(x_mean_t)\n output_area_t1 = output_highfeature_t.size()\n output_area_t = output_area_t1[3]\n dense_input = output_area_t1[2]\n\n decoder_input_t = torch.LongTensor([111]*batch_size_t)\n decoder_input_t = decoder_input_t.cuda()\n decoder_hidden_t = torch.randn(batch_size_t, 1, hidden_size).cuda()\n nn.init.xavier_uniform_(decoder_hidden_t)\n\n x_mean_t=[]\n for i in output_highfeature_t:\n x_mean_t.append(float(torch.mean(i)))\n # x_mean = torch.mean(output_highfeature)\n # x_mean = float(x_mean)\n for i in range(batch_size_t):\n decoder_hidden_t[i] = decoder_hidden_t[i]*x_mean_t[i]\n decoder_hidden_t[i] = torch.tanh(decoder_hidden_t[i])\n\n prediction = torch.zeros(batch_size_t,maxlen)\n #label = torch.zeros(batch_size_t,maxlen)\n prediction_sub = []\n label_sub = []\n decoder_attention_t = torch.zeros(batch_size_t,1,dense_input,output_area_t).cuda()\n attention_sum_t = torch.zeros(batch_size_t,1,dense_input,output_area_t).cuda()\n flag_z_t = [0]*batch_size_t\n loss_t = 0\n m = torch.nn.ZeroPad2d((0,maxlen-y_t.size()[1],0,0))\n y_t = m(y_t)\n for i in range(maxlen):\n decoder_output, decoder_hidden_t, decoder_attention_t, attention_sum_t, out_emb = attn_decoder1(decoder_input_t,\n decoder_hidden_t,\n output_highfeature_t,\n output_area_t,\n attention_sum_t,\n decoder_attention_t,dense_input,batch_size_t,h_mask_t,w_mask_t,gpu)\n\n ### you can see the attention when testing\n\n # print('this is',i)\n # for i in range(batch_size_t):\n # x_real = numpy.array(x_t[i][0].data.cpu())\n\n # show = numpy.array(decoder_attention_t[i][0].data.cpu())\n # show = imresize(show,(x_real_width,x_real_high))\n # k_max = show.max()\n # show = show/k_max\n\n # show_x = x_real+show\n # plt.imshow(show_x, interpolation='nearest', cmap='gray_r')\n # plt.show()\n \n topv,topi = torch.max(decoder_output,2)\n # if torch.sum(y_t[0,:,i])==0:\n # y_t = y_t.squeeze(0)\n # break\n if torch.sum(topi)==0:\n break\n decoder_input_t = topi\n decoder_input_t = decoder_input_t.view(batch_size_t)\n\n # prediction\n prediction[:,i] = decoder_input_t\n\n for i in range(batch_size_t):\n for j in range(maxlen):\n if int(prediction[i][j]) ==0:\n break\n else:\n prediction_sub.append(int(prediction[i][j]))\n if len(prediction_sub)<maxlen:\n prediction_sub.append(0)\n\n for k in range(y_t.size()[1]):\n if int(y_t[i][k]) ==0:\n break\n else:\n label_sub.append(int(y_t[i][k]))\n label_sub.append(0)\n\n dist, llen = cmp_result(label_sub, prediction_sub)\n total_dist += dist\n total_label += llen\n total_line += 1\n if dist == 0:\n total_line_rec = total_line_rec+ 1\n\n label_sub = []\n prediction_sub = []\n\n print('total_line_rec is',total_line_rec)\n wer = float(total_dist) / total_label\n sacc = float(total_line_rec) / total_line\n print('wer is %.5f' % (wer))\n print('sacc is %.5f ' % (sacc))\n # print('whole loss is %.5f'%(whole_loss_t/925))\n # with open(\"training_data/wer_%.5f_pre_GN_te05_d02_all.txt\" % (lr_rate), \"a\") as f:\n # f.write(\"%s\\n\" % (str(wer)))\n\n if (sacc > exprate):\n exprate = sacc\n print(exprate)\n print(\"saving the model....\")\n print('encoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl' %(lr_rate))\n torch.save(encoder.state_dict(), './model/encoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl'%(lr_rate))\n torch.save(attn_decoder1.state_dict(), './model/attn_decoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl'%(lr_rate))\n print(\"done\")\n flag = 0\n else:\n flag = flag+1\n print('the best is %f' % (exprate))\n print('the loss is bigger than before,so do not save the model')\n\n if flag == 10:\n lr_rate = lr_rate*0.1\n flag = 0\n\n\n\n\n\n\n\n\n# '''\n# Python 3.6 \n# Pytorch >= 0.4\n# Written by Hongyu Wang in Beihang university\n# '''\n# import torch\n# import math\n# import torch.nn as nn\n# from torch.autograd import Variable\n# import torch.nn.functional as F\n# import numpy\n# import torch.utils.data as data\n# from data_iterator import dataIterator\n# from Densenet_torchvision import densenet121\n# from Attention_RNN import AttnDecoderRNN\n# #from Resnet101 import resnet101\n# import random\n# import matplotlib.pyplot as plt\n# from PIL import Image\n\n\n# # compute the wer loss\n# def cmp_result(label,rec):\n# dist_mat = numpy.zeros((len(label)+1, len(rec)+1),dtype='int32')\n# dist_mat[0,:] = range(len(rec) + 1)\n# dist_mat[:,0] = range(len(label) + 1)\n# for i in range(1, len(label) + 1):\n# for j in range(1, len(rec) + 1):\n# hit_score = dist_mat[i-1, j-1] + (label[i-1] != rec[j-1])\n# ins_score = dist_mat[i,j-1] + 1\n# del_score = dist_mat[i-1, j] + 1\n# dist_mat[i,j] = min(hit_score, ins_score, del_score)\n# dist = dist_mat[len(label), len(rec)]\n# return dist, len(label)\n\n# def load_dict(dictFile):\n# fp=open(dictFile)\n# stuff=fp.readlines()\n# fp.close()\n# lexicon={}\n# for l in stuff:\n# w=l.strip().split()\n# lexicon[w[0]]=int(w[1])\n# print('total words/phones',len(lexicon))\n# return lexicon\n\n# datasets=['./offline-train.pkl','./train_caption.txt']\n# valid_datasets=['./offline-test.pkl', './test_caption.txt']\n# dictionaries=['./dictionary.txt']\n# batch_Imagesize=500000\n# valid_batch_Imagesize=500000\n# # batch_size for training and testing\n# batch_size=6\n# batch_size_t=6\n# # the max (label length/Image size) in training and testing\n# # you can change 'maxlen','maxImagesize' by the size of your GPU\n# maxlen=48\n# maxImagesize= 100000\n# # hidden_size in RNN\n# hidden_size = 256\n# # teacher_forcing_ratio \n# teacher_forcing_ratio = 1\n# # change the gpu id \n# gpu = [0]\n# # learning rate\n# lr_rate = 0.0001\n# # flag to remember when to change the learning rate\n# flag = 0\n# # exprate\n# exprate = 0\n\n# # worddicts\n# worddicts = load_dict(dictionaries[0])\n# worddicts_r = [None] * len(worddicts)\n# for kk, vv in worddicts.items():\n# worddicts_r[vv] = kk\n\n# #load train data and test data\n# train,train_label = dataIterator(\n# datasets[0], datasets[1],worddicts,batch_size=1,\n# batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize\n# )\n# len_train = len(train)\n\n# test,test_label = dataIterator(\n# valid_datasets[0],valid_datasets[1],worddicts,batch_size=1,\n# batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize\n# )\n# len_test = len(test)\n\n\n# class custom_dset(data.Dataset):\n# def __init__(self,train,train_label,batch_size):\n# self.train = train\n# self.train_label = train_label\n# self.batch_size = batch_size\n\n# def __getitem__(self, index):\n# train_setting = torch.from_numpy(numpy.array(self.train[index]))\n# label_setting = torch.from_numpy(numpy.array(self.train_label[index])).type(torch.LongTensor)\n\n# size = train_setting.size()\n# train_setting = train_setting.view(1,size[2],size[3])\n# label_setting = label_setting.view(-1)\n# return train_setting,label_setting\n\n# def __len__(self):\n# return len(self.train)\n\n\n# off_image_train = custom_dset(train,train_label,batch_size)\n# off_image_test = custom_dset(test,test_label,batch_size)\n\n# # collate_fn is writting for padding imgs in batch. \n# # As images in my dataset are different size, so the padding is necessary.\n# # Padding images to the max image size in a mini-batch and cat a mask. \n# def collate_fn(batch):\n# batch.sort(key=lambda x: len(x[1]), reverse=True)\n# img, label = zip(*batch)\n# aa1 = 0\n# bb1 = 0\n# k = 0\n# k1 = 0\n# max_len = len(label[0])+1\n# for j in range(len(img)):\n# size = img[j].size()\n# if size[1] > aa1:\n# aa1 = size[1]\n# if size[2] > bb1:\n# bb1 = size[2]\n\n# for ii in img:\n# ii = ii.float()\n# img_size_h = ii.size()[1]\n# img_size_w = ii.size()[2]\n# img_mask_sub_s = torch.ones(1,img_size_h,img_size_w).type(torch.FloatTensor)\n# img_mask_sub_s = img_mask_sub_s*255.0\n# img_mask_sub = torch.cat((ii,img_mask_sub_s),dim=0)\n# padding_h = aa1-img_size_h\n# padding_w = bb1-img_size_w\n# m = torch.nn.ZeroPad2d((0,padding_w,0,padding_h))\n# img_mask_sub_padding = m(img_mask_sub)\n# img_mask_sub_padding = img_mask_sub_padding.unsqueeze(0)\n# if k==0:\n# img_padding_mask = img_mask_sub_padding\n# else:\n# img_padding_mask = torch.cat((img_padding_mask,img_mask_sub_padding),dim=0)\n# k = k+1\n\n# for ii1 in label:\n# ii1 = ii1.long()\n# ii1 = ii1.unsqueeze(0)\n# ii1_len = ii1.size()[1]\n# m = torch.nn.ZeroPad2d((0,max_len-ii1_len,0,0))\n# ii1_padding = m(ii1)\n# if k1 == 0:\n# label_padding = ii1_padding\n# else:\n# label_padding = torch.cat((label_padding,ii1_padding),dim=0)\n# k1 = k1+1\n\n# img_padding_mask = img_padding_mask/255.0\n# return img_padding_mask, label_padding\n\n# train_loader = torch.utils.data.DataLoader(\n# dataset = off_image_train,\n# batch_size = batch_size,\n# shuffle = True,\n# collate_fn = collate_fn,\n# num_workers=2,\n# )\n# test_loader = torch.utils.data.DataLoader(\n# dataset = off_image_test,\n# batch_size = batch_size_t,\n# shuffle = True,\n# collate_fn = collate_fn,\n# num_workers=2,\n# )\n\n# def my_train(target_length,attn_decoder1,\n# output_highfeature, output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input,h_mask,w_mask,gpu,\n# decoder_input,decoder_hidden,attention_sum,decoder_attention):\n# loss = 0\n\n# use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n# flag_z = [0]*batch_size\n\n# if use_teacher_forcing:\n# encoder_optimizer1.zero_grad()\n# decoder_optimizer1.zero_grad()\n# my_num = 0\n\n# for di in range(target_length):\n# decoder_output, decoder_hidden, decoder_attention, attention_sum, out_emb = attn_decoder1(decoder_input,\n# decoder_hidden,\n# output_highfeature,\n# output_area,\n# attention_sum,\n# decoder_attention,\n# dense_input,batch_size,h_mask,w_mask,gpu)\n \n \n# #print(decoder_output.size()) (batch,1,112)\n# y = y.unsqueeze(0)\n# for i in range(batch_size):\n# if int(y[0][i][di]) == 0:\n# flag_z[i] = flag_z[i]+1\n# if flag_z[i] > 1:\n# continue\n# else:\n# loss += criterion(decoder_output[i], y[:,i,di])\n# else:\n# loss += criterion(decoder_output[i], y[:,i,di])\n# # print(\"loss: \", loss)\n\n# if int(y[0][0][di]) == 0:\n# break\n# decoder_input = y[:,:,di]\n# decoder_input = decoder_input.squeeze(0)\n# y = y.squeeze(0)\n\n# loss.backward()\n\n# encoder_optimizer1.step()\n# decoder_optimizer1.step()\n# return loss.item()\n\n# else:\n# encoder_optimizer1.zero_grad()\n# decoder_optimizer1.zero_grad()\n# my_num = 0\n# for di in range(target_length):\n# decoder_output, decoder_hidden, decoder_attention,attention_sum= attn_decoder1(decoder_input, decoder_hidden,\n# output_highfeature, output_area,\n# attention_sum,decoder_attention,dense_input,batch_size,\n# h_mask,w_mask,gpu)\n# #print(decoder_output.size()) 1*10*112\n# #print(y.size()) 1*37\n# #topi (b,1)\n# topv,topi = torch.max(decoder_output,2)\n# decoder_input = topi\n# decoder_input = decoder_input.view(batch_size)\n\n# y = y.unsqueeze(0)\n# #print(y_t)\n\n# # 1*bs*17\n# for k in range(batch_size):\n# if int(y[0][k][di]) == 0:\n# flag_z[k] = flag_z[k]+1\n# if flag_z[k] > 1:\n# continue\n# else:\n# loss += criterion(decoder_output[k], y[:,k,di])\n# else:\n# loss += criterion(decoder_output[k], y[:,k,di])\n\n# y = y.squeeze(0)\n# # if int(topi[0]) == 0:\n# # break\n# loss.backward()\n# encoder_optimizer1.step()\n# decoder_optimizer1.step()\n# return loss.item()\n\n# encoder = densenet121()\n\n# pthfile = r'./model/densenet121-a639ec97.pth'\n# pretrained_dict = torch.load(pthfile) \n# encoder_dict = encoder.state_dict()\n# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in encoder_dict}\n# encoder_dict.update(pretrained_dict)\n# encoder.load_state_dict(encoder_dict)\n\n# attn_decoder1 = AttnDecoderRNN(hidden_size,112,dropout_p=0.5)\n\n# encoder=encoder.cuda()\n# attn_decoder1 = attn_decoder1.cuda()\n# encoder = torch.nn.DataParallel(encoder, device_ids=gpu)\n# attn_decoder1 = torch.nn.DataParallel(attn_decoder1, device_ids=gpu)\n\n# def imresize(im,sz):\n# pil_im = Image.fromarray(im)\n# return numpy.array(pil_im.resize(sz))\n\n\n# criterion = nn.NLLLoss()\n# # encoder.load_state_dict(torch.load('model/encoder_lr0.00001_BN_te1_d05_SGD_bs8_mask_conv_bn_b.pkl'))\n# # attn_decoder1.load_state_dict(torch.load('model/attn_decoder_lr0.00001_BN_te1_d05_SGD_bs8_mask_conv_bn_b.pkl'))\n# decoder_input_init = torch.LongTensor([111]*batch_size).cuda()\n# decoder_hidden_init = torch.randn(batch_size, 1, hidden_size).cuda()\n# nn.init.xavier_uniform_(decoder_hidden_init)\n\n# # encoder_optimizer1 = torch.optim.Adam(encoder.parameters(), lr=lr_rate)\n# # decoder_optimizer1 = torch.optim.Adam(attn_decoder1.parameters(), lr=lr_rate)\n\n# for epoch in range(200):\n# encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n# decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n\n# # # if using SGD optimizer\n# # if epoch+1 == 50:\n# # lr_rate = lr_rate/10\n# # encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n# # decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n# # if epoch+1 == 75:\n# # lr_rate = lr_rate/10\n# # encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)\n# # decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)\n\n\n# running_loss=0\n# whole_loss = 0\n\n# encoder.train(mode=True)\n# attn_decoder1.train(mode=True)\n\n# # this is the train\n# for step,(x,y) in enumerate(train_loader):\n# if x.size()[0]<batch_size:\n# break\n# h_mask = []\n# w_mask = []\n# for i in x:\n# #h*w\n# size_mask = i[1].size()\n# s_w = str(i[1][0])\n# s_h = str(i[1][:,1])\n# w = s_w.count('1')\n# h = s_h.count('1')\n# h_comp = int(h/16)+1\n# w_comp = int(w/16)+1\n# h_mask.append(h_comp)\n# w_mask.append(w_comp)\n\n# x = x.cuda()\n# y = y.cuda()\n# # out is CNN featuremaps\n# output_highfeature = encoder(x)\n# x_mean=[]\n# for i in output_highfeature:\n# x_mean.append(float(torch.mean(i)))\n# # x_mean = torch.mean(output_highfeature)\n# # x_mean = float(x_mean)\n# for i in range(batch_size):\n# decoder_hidden_init[i] = decoder_hidden_init[i]*x_mean[i]\n# decoder_hidden_init[i] = torch.tanh(decoder_hidden_init[i])\n\n# # dense_input is height and output_area is width which is bb\n# output_area1 = output_highfeature.size()\n\n# output_area = output_area1[3]\n# dense_input = output_area1[2]\n# target_length = y.size()[1]\n# attention_sum_init = torch.zeros(batch_size,1,dense_input,output_area).cuda()\n# decoder_attention_init = torch.zeros(batch_size,1,dense_input,output_area).cuda()\n\n# running_loss += my_train(target_length,attn_decoder1,output_highfeature,\n# output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input,h_mask,w_mask,gpu,\n# decoder_input_init,decoder_hidden_init,attention_sum_init,decoder_attention_init)\n\n \n# if step % 20 == 19:\n# pre = ((step+1)/len_train)*100*batch_size\n# whole_loss += running_loss\n# running_loss = running_loss/(batch_size*20)\n# print('epoch is %d, lr rate is %.5f, te is %.3f, batch_size is %d, loading for %.3f%%, running_loss is %f' %(epoch,lr_rate,teacher_forcing_ratio, batch_size,pre,running_loss))\n# # with open(\"training_data/running_loss_%.5f_pre_GN_te05_d02_all.txt\" %(lr_rate),\"a\") as f:\n# # f.write(\"%s\\n\"%(str(running_loss)))\n# running_loss = 0\n\n# loss_all_out = whole_loss / len_train\n# print(\"epoch is %d, the whole loss is %f\" % (epoch, loss_all_out))\n# # with open(\"training_data/whole_loss_%.5f_pre_GN_te05_d02_all.txt\" % (lr_rate), \"a\") as f:\n# # f.write(\"%s\\n\" % (str(loss_all_out)))\n\n# # this is the prediction and compute wer loss\n# total_dist = 0\n# total_label = 0\n# total_line = 0\n# total_line_rec = 0\n# whole_loss_t = 0\n\n# encoder.eval()\n# attn_decoder1.eval()\n# print('Now, begin testing!!')\n\n# for step_t, (x_t, y_t) in enumerate(test_loader):\n# x_real_high = x_t.size()[2]\n# x_real_width = x_t.size()[3]\n# if x_t.size()[0]<batch_size_t:\n# break\n# print('testing for %.3f%%'%(step_t*100*batch_size_t/len_test),end='\\r')\n# h_mask_t = []\n# w_mask_t = []\n# for i in x_t:\n# #h*w\n# size_mask_t = i[1].size()\n# s_w_t = str(i[1][0])\n# s_h_t = str(i[1][:,1])\n# w_t = s_w_t.count('1')\n# h_t = s_h_t.count('1')\n# h_comp_t = int(h_t/16)+1\n# w_comp_t = int(w_t/16)+1\n# h_mask_t.append(h_comp_t)\n# w_mask_t.append(w_comp_t)\n\n# x_t = x_t.cuda()\n# y_t = y_t.cuda()\n# output_highfeature_t = encoder(x_t)\n\n# x_mean_t = torch.mean(output_highfeature_t)\n# x_mean_t = float(x_mean_t)\n# output_area_t1 = output_highfeature_t.size()\n# output_area_t = output_area_t1[3]\n# dense_input = output_area_t1[2]\n\n# decoder_input_t = torch.LongTensor([111]*batch_size_t)\n# decoder_input_t = decoder_input_t.cuda()\n# decoder_hidden_t = torch.randn(batch_size_t, 1, hidden_size).cuda()\n# nn.init.xavier_uniform_(decoder_hidden_t)\n\n# x_mean_t=[]\n# for i in output_highfeature_t:\n# x_mean_t.append(float(torch.mean(i)))\n# # x_mean = torch.mean(output_highfeature)\n# # x_mean = float(x_mean)\n# for i in range(batch_size_t):\n# decoder_hidden_t[i] = decoder_hidden_t[i]*x_mean_t[i]\n# decoder_hidden_t[i] = torch.tanh(decoder_hidden_t[i])\n\n# prediction = torch.zeros(batch_size_t,maxlen)\n# #label = torch.zeros(batch_size_t,maxlen)\n# prediction_sub = []\n# label_sub = []\n# decoder_attention_t = torch.zeros(batch_size_t,1,dense_input,output_area_t).cuda()\n# attention_sum_t = torch.zeros(batch_size_t,1,dense_input,output_area_t).cuda()\n# flag_z_t = [0]*batch_size_t\n# loss_t = 0\n# m = torch.nn.ZeroPad2d((0,maxlen-y_t.size()[1],0,0))\n# y_t = m(y_t)\n# for i in range(maxlen):\n# decoder_output, decoder_hidden_t, decoder_attention_t, attention_sum_t = attn_decoder1(decoder_input_t,\n# decoder_hidden_t,\n# output_highfeature_t,\n# output_area_t,\n# attention_sum_t,\n# decoder_attention_t,dense_input,batch_size_t,h_mask_t,w_mask_t,gpu)\n\n# ### you can see the attention when testing\n\n# # print('this is',i)\n# # for i in range(batch_size_t):\n# # x_real = numpy.array(x_t[i][0].data.cpu())\n\n# # show = numpy.array(decoder_attention_t[i][0].data.cpu())\n# # show = imresize(show,(x_real_width,x_real_high))\n# # k_max = show.max()\n# # show = show/k_max\n\n# # show_x = x_real+show\n# # plt.imshow(show_x, interpolation='nearest', cmap='gray_r')\n# # plt.show()\n \n# topv,topi = torch.max(decoder_output,2)\n# # if torch.sum(y_t[0,:,i])==0:\n# # y_t = y_t.squeeze(0)\n# # break\n# if torch.sum(topi)==0:\n# break\n# decoder_input_t = topi\n# decoder_input_t = decoder_input_t.view(batch_size_t)\n\n# # prediction\n# prediction[:,i] = decoder_input_t\n\n# for i in range(batch_size_t):\n# for j in range(maxlen):\n# if int(prediction[i][j]) ==0:\n# break\n# else:\n# prediction_sub.append(int(prediction[i][j]))\n# if len(prediction_sub)<maxlen:\n# prediction_sub.append(0)\n\n# for k in range(y_t.size()[1]):\n# if int(y_t[i][k]) ==0:\n# break\n# else:\n# label_sub.append(int(y_t[i][k]))\n# label_sub.append(0)\n\n# dist, llen = cmp_result(label_sub, prediction_sub)\n# total_dist += dist\n# total_label += llen\n# total_line += 1\n# if dist == 0:\n# total_line_rec = total_line_rec+ 1\n\n# label_sub = []\n# prediction_sub = []\n\n# print('total_line_rec is',total_line_rec)\n# wer = float(total_dist) / total_label\n# sacc = float(total_line_rec) / total_line\n# print('wer is %.5f' % (wer))\n# print('sacc is %.5f ' % (sacc))\n# # print('whole loss is %.5f'%(whole_loss_t/925))\n# # with open(\"training_data/wer_%.5f_pre_GN_te05_d02_all.txt\" % (lr_rate), \"a\") as f:\n# # f.write(\"%s\\n\" % (str(wer)))\n\n# if (sacc > exprate):\n# exprate = sacc\n# print(exprate)\n# print(\"saving the model....\")\n# print('encoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl' %(lr_rate))\n# torch.save(encoder.state_dict(), 'model/encoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl'%(lr_rate))\n# torch.save(attn_decoder1.state_dict(), 'model/attn_decoder_lr%.5f_GN_te1_d05_SGD_bs6_mask_conv_bn_b_xavier.pkl'%(lr_rate))\n# print(\"done\")\n# flag = 0\n# else:\n# flag = flag+1\n# print('the best is %f' % (exprate))\n# print('the loss is bigger than before,so do not save the model')\n\n# if flag == 10:\n# lr_rate = lr_rate*0.1\n# flag = 0\n"
] | [
[
"torch.nn.ZeroPad2d",
"torch.mean",
"torch.nn.NLLLoss",
"torch.LongTensor",
"torch.ones",
"torch.max",
"torch.load",
"torch.cat",
"torch.randn",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.tanh",
"torch.nn.init.xavier_uniform_",
"torch.nn.DataParallel",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FrauBluher/PMSM | [
"acb806ea23705ecc8ea29d8a23c3fb10c3b61e19"
] | [
"Config Tool/matplottest.py"
] | [
"#!/usr/bin/env python\n# Plot a graph of Data which is comming in on the fly\n# uses pylab\n# Author: Norbert Feurle\n# Date: 12.1.2012\n# License: if you get any profit from this then please share it with me and only use it for good\nimport pylab\nfrom pylab import *\nimport tkinter\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n\nroot = tkinter.Tk()\nroot.wm_title(\"Extended Realtime Plotter\")\n\nxAchse=pylab.arange(0,100,1)\nyAchse=pylab.array([0]*100)\n\nfig = pylab.figure(1)\nax = fig.add_subplot(111)\nax.grid(True)\nax.set_title(\"Realtime Waveform Plot\")\nax.set_xlabel(\"Time\")\nax.set_ylabel(\"Amplitude\")\nax.axis([0,100,-1.5,1.5])\nline1=ax.plot(xAchse,yAchse,'-')\n\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas.show()\ncanvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\ntoolbar = NavigationToolbar2TkAgg( canvas, root )\ntoolbar.update()\ncanvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\nvalues=[]\nvalues = [0 for x in range(100)]\n\nTa=0.01\nfa=1.0/Ta\nfcos=3.5\n\nKonstant=cos(2*pi*fcos*Ta)\nT0=1.0\nT1=Konstant\n\ndef SinwaveformGenerator():\n global values,T1,Konstant,T0,wScale2\n #ohmegaCos=arccos(T1)/Ta\n #print \"fcos=\", ohmegaCos/(2*pi), \"Hz\"\n\n Tnext=((Konstant*T1)*2)-T0 \n if len(values)%100>70:\n values.append(random()*2-1)\n else:\n values.append(Tnext)\n T0=T1\n T1=Tnext\n root.after(int(wScale2['to'])-wScale2.get(),SinwaveformGenerator)\n\ndef RealtimePloter():\n global values,wScale,wScale2\n NumberSamples=min(len(values),wScale.get())\n CurrentXAxis=pylab.arange(len(values)-NumberSamples,len(values),1)\n line1[0].set_data(CurrentXAxis,pylab.array(values[-NumberSamples:]))\n ax.axis([CurrentXAxis.min(),CurrentXAxis.max(),-1.5,1.5])\n canvas.draw()\n root.after(25,RealtimePloter)\n #canvas.draw()\n\n #manager.show()\n\ndef _quit():\n root.quit() # stops mainloop\n root.destroy() # this is necessary on Windows to prevent\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\n\nbutton = tkinter.Button(master=root, text='Quit', command=_quit)\nbutton.pack(side=tkinter.BOTTOM)\n\nwScale = tkinter.Scale(master=root,label=\"View Width:\", from_=3, to=1000,sliderlength=30,length=100, orient=tkinter.HORIZONTAL)\nwScale2 = tkinter.Scale(master=root,label=\"Generation Speed:\", from_=1, to=200,sliderlength=30,length=100, orient=tkinter.HORIZONTAL)\nwScale2.pack(side=tkinter.BOTTOM)\nwScale.pack(side=tkinter.BOTTOM)\n\nwScale.set(100)\nwScale2.set(wScale2['to']-10)\n\nroot.protocol(\"WM_DELETE_WINDOW\", _quit) #thanks aurelienvlg\nroot.after(100,SinwaveformGenerator)\nroot.after(100,RealtimePloter)\ntkinter.mainloop()\n#pylab.show()\n"
] | [
[
"matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
canard0328/malss | [
"976ebdb6e4bee52a0dbb65e0ddeed767cfe39591"
] | [
"malss/app/analyzer.py"
] | [
"# coding: utf-8\r\n\r\nfrom PyQt5.QtWidgets import QScrollArea\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\nfrom .content import Content\r\nfrom multiprocessing import Process, Manager\r\nfrom threading import Condition\r\nimport sys\r\nfrom .waiting_animation import WaitingAnimation\r\n\r\n\r\nclass Analyzer(Content):\r\n\r\n def __init__(self, parent=None, title='', params=None):\r\n super().__init__(parent, title, params)\r\n\r\n # \"parent.parent()\" must be modified.\r\n self.wait_ani = WaitingAnimation(parent.parent())\r\n self.wait_ani.hide()\r\n\r\n def resizeEvent(self, event):\r\n # To be modified.\r\n self.wait_ani.resize(self.parent().parent().size())\r\n event.accept()\r\n\r\n QScrollArea.resizeEvent(self, event)\r\n\r\n def preprocess(self):\r\n \"\"\"\r\n This method need to be overridden.\r\n \"\"\"\r\n pass\r\n\r\n def button_clicked(self, mdl, X, y, next_page):\r\n self.analyze(mdl, X, y, next_page)\r\n\r\n def analyze(self, mdl, X, y, next_page):\r\n if len(mdl.get_algorithms()) > 0:\r\n self.thread = AnalyzeWorker(mdl, X, y)\r\n self.thread.finSignal.connect(self.analyzed)\r\n self.thread.start()\r\n self.wait_ani.show()\r\n else:\r\n \"\"\"\r\n Already analyzed and not need to re-analyze.\r\n \"\"\"\r\n self.button_func(next_page)\r\n\r\n def analyzed(self, signalData):\r\n \"\"\"\r\n This method need to be overridden.\r\n \"\"\"\r\n self.wait_ani.hide()\r\n if 'error' in signalData:\r\n self.params.error = signalData['error']\r\n self.button_func('Error')\r\n else:\r\n pass\r\n\r\n def add_algorithm(self, mdl, algorithms, results):\r\n \"\"\"\r\n Add algorithm for re-analysis if hyper-parameters are changed.\r\n Otherwise, mdl.get_algorithms() returns empty list.\r\n \"\"\"\r\n prev_algorithms = mdl.get_algorithms()\r\n for n in range(len(prev_algorithms)):\r\n mdl.remove_algorithm(0)\r\n\r\n for name, parameters in algorithms:\r\n if not self.__need_analyze(name, parameters, results):\r\n continue\r\n\r\n if name == 'Support Vector Machine (RBF Kernel)':\r\n if self.params.task == 'Regression':\r\n from sklearn.svm import SVR\r\n mdl.add_algorithm(\r\n SVR(kernel='rbf'),\r\n parameters,\r\n 'Support Vector Machine (RBF Kernel)',\r\n ('http://scikit-learn.org/stable/modules/'\r\n 'generated/sklearn.svm.SVR.html'))\r\n elif self.params.task == 'Classification':\r\n from sklearn.svm import SVC\r\n mdl.add_algorithm(\r\n SVC(random_state=mdl.random_state,\r\n kernel='rbf'),\r\n parameters,\r\n 'Support Vector Machine (RBF Kernel)',\r\n ('http://scikit-learn.org/stable/modules/'\r\n 'generated/sklearn.svm.SVC.html'))\r\n else:\r\n raise Exception('Wrong task name.')\r\n elif name == 'Random Forest':\r\n if self.params.task == 'Regression':\r\n from sklearn.ensemble import RandomForestRegressor\r\n mdl.add_algorithm(\r\n RandomForestRegressor(\r\n random_state=mdl.random_state,\r\n n_estimators=500,\r\n n_jobs=1),\r\n parameters,\r\n 'Random Forest',\r\n ('http://scikit-learn.org/stable/modules/'\r\n 'generated/'\r\n 'sklearn.ensemble.RandomForestRegressor.html'))\r\n elif self.params.task == 'Classification':\r\n from sklearn.ensemble import RandomForestClassifier\r\n mdl.add_algorithm(\r\n RandomForestClassifier(\r\n random_state=mdl.random_state,\r\n n_estimators=500,\r\n n_jobs=1),\r\n parameters,\r\n 'Random Forest',\r\n ('http://scikit-learn.org/stable/modules/'\r\n 'generated/'\r\n 'sklearn.ensemble.RandomForestClassifier.html'))\r\n else:\r\n raise Exception('Wrong task name.')\r\n elif name == 'Support Vector Machine (Linear Kernel)':\r\n from sklearn.svm import LinearSVC\r\n mdl.add_algorithm(\r\n LinearSVC(random_state=mdl.random_state),\r\n parameters,\r\n 'Support Vector Machine (Linear Kernel)',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.svm.LinearSVC.html'))\r\n elif name == 'Logistic Regression':\r\n from sklearn.linear_model import LogisticRegression\r\n mdl.add_algorithm(\r\n LogisticRegression(\r\n random_state=mdl.random_state),\r\n parameters,\r\n 'Logistic Regression',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.linear_model.LogisticRegression.html'))\r\n elif name == 'Decision Tree':\r\n if self.params.task == 'Regression':\r\n from sklearn.tree import DecisionTreeRegressor\r\n mdl.add_algorithm(\r\n DecisionTreeRegressor(\r\n random_state=mdl.random_state),\r\n parameters,\r\n 'Decision Tree',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.tree.DecisionTreeRegressor.html'))\r\n elif self.params.task == 'Classification':\r\n from sklearn.tree import DecisionTreeClassifier\r\n mdl.add_algorithm(\r\n DecisionTreeClassifier(\r\n random_state=mdl.random_state),\r\n parameters,\r\n 'Decision Tree',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.tree.DecisionTreeClassifier.html'))\r\n else:\r\n raise Exception('Wrong task name.')\r\n elif name == 'k-Nearest Neighbors':\r\n from sklearn.neighbors import KNeighborsClassifier\r\n mdl.add_algorithm(\r\n KNeighborsClassifier(),\r\n parameters,\r\n 'k-Nearest Neighbors',\r\n ('http://scikit-learn.org/stable/modules/'\r\n 'generated/sklearn.neighbors.KNeighborsClassifier'\r\n '.html'))\r\n elif name == 'SGD Classifier':\r\n from sklearn.linear_model import SGDClassifier\r\n mdl.add_algorithm(\r\n SGDClassifier(\r\n random_state=mdl.random_state,\r\n n_jobs=1),\r\n parameters,\r\n 'SGD Classifier',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.linear_model.SGDClassifier.html'))\r\n elif name == 'Ridge Regression':\r\n from sklearn.linear_model import Ridge\r\n mdl.add_algorithm(\r\n Ridge(),\r\n parameters,\r\n 'Ridge Regression',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.linear_model.Ridge.html'))\r\n elif name == 'SGD Regressor':\r\n from sklearn.linear_model import SGDRegressor\r\n mdl.add_algorithm(\r\n SGDRegressor(\r\n random_state=mdl.random_state),\r\n parameters,\r\n 'SGD Regressor',\r\n ('http://scikit-learn.org/stable/modules/generated/'\r\n 'sklearn.linear_model.SGDRegressor.html'))\r\n\r\n def __need_analyze(self, name, parameters, results):\r\n flg = False\r\n\r\n param_result = results['algorithms'][name]['grid_scores']\r\n param_dic = {}\r\n for param, score, std in param_result:\r\n for k, v in param.items():\r\n if k not in param_dic:\r\n param_dic[k] = [v]\r\n else:\r\n param_dic[k].append(v)\r\n for k in param.keys():\r\n param_dic[k] = sorted(list(set(param_dic[k])))\r\n\r\n for k, v in parameters[0].items():\r\n if param_dic[k][0] != v[0] or param_dic[k][-1] != v[-1]:\r\n flg = True\r\n break\r\n elif len(param_dic[k]) != len(v):\r\n flg = True\r\n break\r\n\r\n return flg\r\n\r\n\r\nclass AnalyzeWorker(QThread):\r\n finSignal = pyqtSignal(dict)\r\n\r\n def __init__(self, mdl, X, y):\r\n super().__init__()\r\n self.mdl = mdl\r\n self.X = X\r\n self.y = y\r\n self.con = Condition()\r\n\r\n def run(self):\r\n with Manager() as manager:\r\n d = manager.dict()\r\n job = Process(target=AnalyzeWorker.sub_job,\r\n args=(self.mdl, self.X, self.y, d))\r\n job.start()\r\n job.join()\r\n self.finSignal.emit(d['result'])\r\n\r\n @staticmethod\r\n def sub_job(mdl, X, y, d):\r\n try:\r\n mdl.fit(X, y)\r\n d['result'] = mdl.results\r\n except Exception as e:\r\n import traceback\r\n d['result'] = {'error': traceback.format_exc()}"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.SGDRegressor",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.svm.SVR",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.Ridge",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC",
"sklearn.linear_model.SGDClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ssabzzz/BERT-NER | [
"ab60d6afee2b5b4200149c6270823872fd8efecd"
] | [
"interactive.py"
] | [
"\"Evaluate the model\"\"\"\nimport os\nimport nltk\nimport torch\nimport random\nimport logging\nimport argparse\nimport numpy as np\nimport utils as utils\nfrom metrics import get_entities\nfrom data_loader import DataLoader\nfrom SequenceTagger import BertForSequenceTagging\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='msra', help=\"Directory containing the dataset\")\nparser.add_argument('--seed', type=int, default=23, help=\"random seed for initialization\")\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ndef interAct(model, data_iterator, params, mark='Interactive', verbose=False):\n \"\"\"Evaluate the model on `steps` batches.\"\"\"\n # set model to evaluation mode\n model.eval()\n idx2tag = params.idx2tag\n\n batch_data, batch_token_starts = next(data_iterator)\n batch_masks = batch_data.gt(0)\n \n batch_output = model((batch_data, batch_token_starts), token_type_ids=None, attention_mask=batch_masks)[0] # shape: (batch_size, max_len, num_labels)\n batch_output = batch_output.detach().cpu().numpy()\n \n pred_tags = []\n pred_tags.extend([[idx2tag.get(idx) for idx in indices] for indices in np.argmax(batch_output, axis=2)])\n \n return(get_entities(pred_tags))\n\n\ndef bert_ner_init():\n args = parser.parse_args()\n tagger_model_dir = 'experiments/' + args.dataset\n\n # Load the parameters from json file\n json_path = os.path.join(tagger_model_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n params = utils.Params(json_path)\n\n # Use GPUs if available\n params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Set the random seed for reproducible experiments\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n params.seed = args.seed\n\n # Set the logger\n utils.set_logger(os.path.join(tagger_model_dir, 'evaluate.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the dataset...\")\n\n # Initialize the DataLoader\n data_dir = 'data/' + args.dataset\n if args.dataset in [\"conll\"]:\n bert_class = 'bert-base-cased'\n elif args.dataset in [\"msra\"]:\n bert_class = 'bert-base-chinese'\n elif args.dataset in [\"pe\"]:\n bert_class = 'bert-base-multilingual-cased'\n\n data_loader = DataLoader(data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1)\n\n # Load the model\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n model.to(params.device)\n\n return model, data_loader, args.dataset, params\n\ndef BertNerResponse(model, queryString): \n model, data_loader, dataset, params = model\n if dataset in ['msra','pe']:\n queryString = [i for i in queryString]\n elif dataset in ['conll']:\n queryString = nltk.word_tokenize(queryString)\n\n\n with open('data/' + dataset + '/interactive/sentences.txt', 'w') as f:\n f.write(' '.join(queryString))\n\n inter_data = data_loader.load_data('interactive')\n inter_data_iterator = data_loader.data_iterator(inter_data, shuffle=False)\n result = interAct(model, inter_data_iterator, params)\n res = []\n for item in result:\n if dataset in ['msra','pe']:\n res.append((''.join(queryString[item[1]:item[2]+1]), item[0]))\n elif dataset in ['conll']:\n res.append((' '.join(queryString[item[1]:item[2]+1]), item[0]))\n return res\n\n\ndef main():\n model = bert_ner_init()\n while True:\n query = input('Input:')\n if query == 'exit':\n break\n print(BertNerResponse(model, query))\n\n\nif __name__ == '__main__':\n main()\n\n\n \n\n"
] | [
[
"torch.manual_seed",
"numpy.argmax",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dheer08/Workflow | [
"041d01d2014e7bf0ff8d5cf400f1cd7b75911cbf"
] | [
"Ensemble.py"
] | [
"import csv\r\nimport math\r\nimport random \r\nimport pandas as pd\r\nfrom sklearn.naive_bayes import GaussianNB ,BernoulliNB\r\nfrom sklearn import preprocessing,linear_model\r\nimport sklearn\r\nimport numpy as np\r\n#from sklearn.utils import shuffle\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import svm\r\nfrom sklearn.ensemble import VotingClassifier\r\nimport statistics\r\n\r\ndata =pd.read_csv(\"Real Dataset.csv\")\r\n#print(data.head())\r\n\r\nle =preprocessing.LabelEncoder()\r\ncloudlet_ID =le.fit_transform(list(data[\"cloudlet ID\"]))\r\nDatacenter_ID =le.fit_transform(list(data[\"Data center ID\"]))\r\nVM_ID = le.fit_transform(list(data[\"VM ID\"]))\r\nBwutil =le.fit_transform(list(data[\"Bwutil\"]))\r\nCPUutil =le.fit_transform(list(data[\"CPUutil\"]))\r\nmemutil =le.fit_transform(list(data[\"memutil\"]))\r\nDisk_util =le.fit_transform(list(data[\"Disk util\"]))\r\nturn_aroundTime =data[\"turnAround\"]\r\n#Start_Time =le.fit_transform(list(data[\"Start Time\"]))\r\n#Finish_Time =le.fit_transform(list(data[\"Finish Time\"]))\r\n#namespace =le.fit_transform(list(data[\"namespace\"]))\r\nstatus =le.fit_transform(list(data[\"STATUS\"]))\r\n\r\nx=list(zip(cloudlet_ID,Datacenter_ID,VM_ID,Bwutil,CPUutil,memutil,Disk_util))\r\ny=list(status)\r\n\r\nx_train,x_test,y_train,y_test =sklearn.model_selection.train_test_split(x,y,test_size = 0.1)\r\n\r\nmodel1 =RandomForestClassifier(n_estimators=10)\r\nmodel2 =KNeighborsClassifier(n_neighbors=5)\r\nmodel3=svm.SVC(gamma='auto')\r\nmodel4=linear_model.LinearRegression()\r\nmodel5=linear_model.LogisticRegression()\r\nmodel6=GaussianNB()\r\nmodel7=DecisionTreeClassifier()\r\nmodel8=BernoulliNB()\r\n\r\nmodel1.fit(x_train,y_train)\r\nmodel2.fit(x_train,y_train)\r\nmodel3.fit(x_train,y_train)\r\nmodel4.fit(x_train,y_train)\r\nmodel5.fit(x_train,y_train)\r\nmodel6.fit(x_train,y_train)\r\nmodel7.fit(x_train,y_train)\r\nmodel8.fit(x_train,y_train)\r\n\r\nacc1 =model1.score(x_test,y_test)\r\nacc2=model2.score(x_test,y_test)\r\nacc3=model3.score(x_test,y_test)\r\nacc4=model4.score(x_test,y_test)\r\nacc5=model5.score(x_test,y_test)\r\nacc6=model6.score(x_test,y_test)\r\nacc7=model7.score(x_test,y_test)\r\nacc8=model8.score(x_test,y_test)\r\n\r\n#final_pred =np.array([])\r\n#for i in range(0,len(x_test)):\r\n\t#final_pred =np.append(final_pred,statistics.mode([pred1[i],pred2[i],pred3[i]]))\r\n\r\nmodel11 =VotingClassifier(estimators=[('rf',model1),('kn',model2),('svm',model3)],voting ='hard')\r\n#model12=VotingClassifier(estimators=[('rf',model1),('kn',model2),('lr',model4)],voting='hard')\r\nmodel13=VotingClassifier(estimators=[('rf',model1),('kn',model2),('lr',model5)],voting='hard')\r\nmodel14=VotingClassifier(estimators=[('rf',model1),('kn',model2),('nb',model6)],voting='hard')\r\nmodel15=VotingClassifier(estimators=[('rf',model1),('svm',model3),('nb',model6)],voting='hard')\r\nmodel16=VotingClassifier(estimators=[('rf',model1),('lr',model5),('nb',model6)],voting='hard')\r\nmodel17=VotingClassifier(estimators=[('svm',model3),('kn',model2),('nb',model6)],voting='hard')\r\nmodel18=VotingClassifier(estimators=[('lr',model5),('kn',model2),('svm',model3)],voting='hard')\r\n#Left model12 due to conversion error and the accuracy is very low\r\n\r\nmodel11.fit(x_train,y_train)\r\n#model12.fit(x_train,y_train)\r\nmodel13.fit(x_train,y_train)\r\nmodel14.fit(x_train,y_train)\r\nmodel15.fit(x_train,y_train)\r\nmodel16.fit(x_train,y_train)\r\nmodel17.fit(x_train,y_train)\r\nmodel18.fit(x_train,y_train)\r\n\r\nacc11=model11.score(x_test,y_test)\r\n#acc12=model12.score(x_test,y_test)\r\nacc13=model13.score(x_test,y_test)\r\nacc14=model14.score(x_test,y_test)\r\nacc15=model15.score(x_test,y_test)\r\nacc16=model16.score(x_test,y_test)\r\nacc17=model17.score(x_test,y_test)\r\nacc18=model18.score(x_test,y_test)\r\n\r\nprint(\"\\n\\n\\n\")\r\nprint(\"Random Forest :\",end=\"\")\r\nprint(acc1)\r\nprint(\"Kneighbors :\",end=\"\")\r\nprint(acc2)\r\nprint(\"SVM :\",end=\"\")\r\nprint(acc3)\r\nprint(\"Linear Regression :\",end=\"\")\r\nprint(acc4)\r\nprint(\"Logistic Regression :\",end=\"\")\r\nprint(acc5)\r\nprint(\"Naive Bayes :\",end=\"\")\r\nprint(acc6)\r\nprint(\"Bernoulli NaiveBayes :\",end=\"\")\r\nprint(acc8)\r\nprint(\"Decision Tree :\",end=\"\")\r\nprint(acc7)\r\nprint(\"\\n BY USING MAX VOTING\")\r\nprint(\"RandomForest,KNeighbors,SVM :\",end =\"\")\r\nprint(acc11)\r\n#print(\"RandomForest,KNeighbors,LinearRegression :\",end=\"\")\r\n#print(acc12)\r\nprint(\"RandomForest,KNeighbors,LogisticRegression :\",end=\"\")\r\nprint(acc13)\r\nprint(\"RandomForest,KNeighbors,NaiveBayes :\",end=\"\")\r\nprint(acc14)\r\nprint(\"RandomForest,SVM,Naive Bayes :\",end=\"\")\r\nprint(acc15)\r\nprint(\"RandomForest,LogisticRegression,Naive Bayes :\",end=\"\")\r\nprint(acc16)\r\nprint(\"SVM,KNeighbors,NaiveBayes :\",end=\"\")\r\nprint(acc17)\r\nprint(\"LogisticRegression,KNeighbors,SVM :\",end=\"\")\r\nprint(acc18)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n"
] | [
[
"pandas.read_csv",
"sklearn.naive_bayes.GaussianNB",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.VotingClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.linear_model.LinearRegression",
"sklearn.svm.SVC",
"sklearn.preprocessing.LabelEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lucasxlu/FGVC | [
"c1ac2d49d77d4069ecee0b2c97dbd7cd16b3f700"
] | [
"main/run_resnet.py"
] | [
"import copy\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom sklearn.metrics import confusion_matrix\nfrom torch.optim import lr_scheduler\nfrom torchvision import models\n\nsys.path.append('../')\nfrom data.data_loader import load_data\nfrom util.file_utils import mkdirs_if_not_exist\nfrom config.cfg import cfg\n\ndataloaders = load_data('FGVC')\ndataset_sizes = {x: len(dataloaders[x]) for x in ['train', 'val', 'test']}\nbatch_size = cfg['config']['FGVC']['batch_size']\n\n\ndef train_model(model, train_dataloader, test_dataloader, criterion, optimizer, scheduler, num_epochs,\n inference=False):\n \"\"\"\n train model\n :param model:\n :param train_dataloader:\n :param test_dataloader:\n :param criterion:\n :param optimizer:\n :param scheduler:\n :param num_epochs:\n :param inference:\n :return:\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n model = model.to(device)\n\n if not inference:\n print('Start training ResNet...')\n model.train()\n\n for epoch in range(num_epochs):\n scheduler.step()\n\n running_loss = 0.0\n for i, data in enumerate(train_dataloader, 0):\n images, label = data['image'], data['label']\n\n images = images.to(device)\n label = label.to(device)\n\n optimizer.zero_grad()\n\n pred = model(images)\n loss = criterion(pred, label)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 50 == 49: # print every 50 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 50))\n running_loss = 0.0\n\n print('Finished training ResNet...\\n')\n print('Saving trained model...')\n model_path_dir = './model'\n mkdirs_if_not_exist(model_path_dir)\n torch.save(model.state_dict(), os.path.join(model_path_dir, 'resnet.pth'))\n print('ResNet has been saved successfully~')\n\n else:\n print('Loading pre-trained model...')\n model.load_state_dict(torch.load(os.path.join('./model/resnet.pth')))\n\n model.eval()\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for data in test_dataloader:\n images, label = data['image'], data['label']\n images = images.to(device)\n label = label.to(device)\n\n pred = model.forward(images)\n _, predicted = torch.max(pred.data, 1)\n\n total += pred.size(0)\n\n correct += (predicted == label).sum().item()\n\n print('Accuracy of ResNet: %f' % (correct / total))\n\n\ndef train_model_ft(model, dataloaders, criterion, optimizer, scheduler, num_epochs, inference):\n \"\"\"\n train model with fine-tune on ImageNet\n :param dataloaders:\n :param model:\n :param criterion:\n :param optimizer:\n :param scheduler:\n :param num_epochs:\n :param inference:\n :return:\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n model = model.to(device)\n\n if not inference:\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 100)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n scheduler.step()\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for i, data in enumerate(dataloaders[phase], 0):\n inputs = data['image'].to(device)\n labels = data['label'].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / (dataset_sizes[phase] * batch_size)\n epoch_acc = running_corrects.double() / (dataset_sizes[phase] * batch_size)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n model_path_dir = './model'\n mkdirs_if_not_exist(model_path_dir)\n torch.save(model.state_dict(), os.path.join(model_path_dir, '{0}_Epoch_{1}.pth'.format(\n model.__class__.__name__, epoch)))\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n\n else:\n print('Loading pre-trained model...')\n model.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model.__class__.__name__)))\n\n model.eval()\n\n correct = 0\n total = 0\n y_pred = []\n y_true = []\n filename_list = []\n probs = []\n\n with torch.no_grad():\n for data in dataloaders['test']:\n images, labels, filenames = data['image'], data['label'], data['filename']\n images = images.to(device)\n labels = labels.to(device)\n\n outputs = model.forward(images)\n outputs = F.softmax(outputs)\n\n # get TOP-K output labels and corresponding probabilities\n topK_prob, topK_label = torch.topk(outputs, 2)\n probs += topK_prob.to(\"cpu\").detach().numpy().tolist()\n\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n y_pred += predicted.to(\"cpu\").detach().numpy().tolist()\n y_true += labels.to(\"cpu\").detach().numpy().tolist()\n filename_list += filenames\n\n print('Accuracy of ResNet: %f' % (correct / total))\n\n cm = confusion_matrix(y_true, y_pred)\n print(cm)\n\n cm = np.array(cm)\n\n precisions = []\n recalls = []\n for i in range(len(cm)):\n precisions.append(cm[i][i] / sum(cm[:, i].tolist()))\n recalls.append(cm[i][i] / sum(cm[i, :].tolist()))\n\n print('Precision List: ')\n print(precisions)\n print('Recall List: ')\n print(recalls)\n\n print(\"Precision of {0} on val set = {1}\".format(model.__class__.__name__,\n sum(precisions) / len(precisions)))\n print(\n \"Recall of {0} on val set = {1}\".format(model.__class__.__name__, sum(recalls) / len(recalls)))\n\n print('Output CSV...')\n col = ['filename', 'gt', 'pred', 'prob']\n df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],\n columns=col)\n df.to_csv(\"./output-%s.csv\" % model.__class__.__name__, index=False)\n print('CSV has been generated...')\n\n\ndef run_resnet(epoch, inference=False):\n \"\"\"\n train and eval on ResNet\n :return:\n \"\"\"\n resnet = models.resnet50(pretrained=True)\n num_ftrs = resnet.fc.in_features\n resnet.fc = nn.Linear(num_ftrs, 998)\n\n criterion = nn.CrossEntropyLoss()\n\n optimizer = optim.SGD(resnet.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4)\n\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)\n\n train_model_ft(model=resnet, dataloaders=load_data(\"FGVC\"), criterion=criterion,\n optimizer=optimizer, scheduler=exp_lr_scheduler, num_epochs=epoch, inference=inference)\n\n # train_model(model=resnet, train_dataloader=train_dataloader, test_dataloader=test_dataloader,\n # criterion=criterion, optimizer=optimizer, scheduler=exp_lr_scheduler,\n # num_epochs=cfg['config']['FGVC']['epoch'],\n # inference=False)\n\n\nif __name__ == '__main__':\n run_resnet(epoch=200, inference=False)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.max",
"torch.sum",
"sklearn.metrics.confusion_matrix",
"torch.nn.Linear",
"torch.nn.DataParallel",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.topk",
"torch.cuda.device_count",
"numpy.array",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cather-learner/Social-Bias-Finance | [
"ac98b2ea3ecb6c5dd7f29afa004d8ffa833b9bdf"
] | [
"utils.py"
] | [
"# coding: UTF-8\nimport torch\nfrom tqdm import tqdm\nimport time\nfrom datetime import timedelta\nimport re\nimport pandas as pd\nimport numpy as np\n\nPAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号\n\ndef build_dataset(config):\n def load_dataset(path, pad_size=32):\n contents = []\n #data_ = pd.read_csv(path, low_memory=False)\n with open(path, 'r', encoding='UTF-8', errors='ignore') as f:\n for line in f:\n label = int(line.split(',')[1])\n content = line.split(',')[0]\n token = config.tokenizer.tokenize(content)\n token = [CLS] + token\n seq_len = len(token)\n mask = []\n token_ids = config.tokenizer.convert_tokens_to_ids(token)\n\n if pad_size:\n if len(token) < pad_size:\n mask = [1] * len(token_ids) + [0] * (pad_size - len(token))\n token_ids += ([0] * (pad_size - len(token)))\n else:\n mask = [1] * pad_size\n token_ids = token_ids[:pad_size]\n seq_len = pad_size\n contents.append((token_ids, label, seq_len, mask))\n return contents\n\n train = load_dataset(config.train_path, config.pad_size)\n dev = load_dataset(config.dev_path, config.pad_size)\n test = load_dataset(config.test_path, config.pad_size)\n return train, dev, test\n\n\nclass DatasetIterater(object):\n def __init__(self, batches, batch_size, device):\n self.batch_size = batch_size\n self.batches = batches\n self.n_batches = len(batches) // batch_size\n self.residue = False # 记录batch数量是否为整数\n if len(batches) % self.n_batches != 0:\n self.residue = True\n self.index = 0\n self.device = device\n\n def _to_tensor(self, datas):\n x = torch.LongTensor([_[0] for _ in datas]).to(self.device)\n y = torch.LongTensor([_[1] for _ in datas]).to(self.device)\n # pad前的长度(超过pad_size的设为pad_size)\n seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)\n mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)\n return (x, seq_len, mask), y\n\n def __next__(self):\n if self.residue and self.index == self.n_batches:\n batches = self.batches[self.index * self.batch_size: len(self.batches)]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n elif self.index >= self.n_batches:\n self.index = 0\n raise StopIteration\n else:\n batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n def __iter__(self):\n return self\n\n def __len__(self):\n if self.residue:\n return self.n_batches + 1\n else:\n return self.n_batches\n\n\ndef build_iterator(datas, config):\n iter = DatasetIterater(datas, config.batch_size, config.device)\n return iter\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n"
] | [
[
"torch.LongTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dgehringer/pyiron_atomistics | [
"2c8052b082f2c4fb6f6291ac2b1f801ea7ab1567"
] | [
"pyiron_atomistics/atomistics/structure/sparse_list.py"
] | [
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nfrom __future__ import print_function\n\n# import os\nimport sys\nimport copy\nimport numpy as np\nfrom collections import OrderedDict\nfrom collections.abc import Sequence\n\n__author__ = \"Joerg Neugebauer\"\n__copyright__ = (\n \"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - \"\n \"Computational Materials Design (CM) Department\"\n)\n__version__ = \"1.0\"\n__maintainer__ = \"Jan Janssen\"\n__email__ = \"[email protected]\"\n__status__ = \"production\"\n__date__ = \"Sep 1, 2017\"\n\n\nclass SparseListElement(object):\n \"\"\"\n Handle single element of a sparse lisr\n Args:\n ind: index\n val: value\n \"\"\"\n\n def __init__(self, ind, val):\n self.index = ind\n self.value = val\n\n def __str__(self):\n return \"({}: {})\".format(self.index, self.value)\n\n\nclass SparseList(object):\n \"\"\"\n Object to represent a single sparse list\n Internal representation like a dict\n External representation like a list\n Args:\n sparse_list: dict object with {index: val}\n default: default value for all elements not given by index in sparse_list\n length: length of the list\n \"\"\"\n\n def __init__(self, sparse_list, default=None, length=None):\n if isinstance(sparse_list, dict):\n self._dict = sparse_list.copy()\n if \"_\" in self._dict.keys():\n default = self._dict[\"_\"]\n del self._dict[\"_\"]\n\n if length is None:\n raise ValueError(\"Length must be provided in dict input mode\")\n self._length = length\n elif isinstance(sparse_list, (list, np.ndarray)):\n # self._dict = {el: [] for el in set(sparse_list)}\n self._dict = {}\n for i, el in enumerate(sparse_list):\n self._dict[i] = el\n self._length = len(sparse_list)\n if length is not None:\n if length != self._length:\n raise ValueError(\"Incompatible length of new list\")\n self._default = default\n\n def _val_data_type(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n if isinstance(self.values(), dict):\n pass\n print(self.values())\n data_0 = self.values()[0]\n if isinstance(data_0, list):\n if isinstance(data_0[0], bool):\n return \"list_bool\"\n else:\n raise ValueError(\n \"tags which have as elements lists or tensors are not implemented\"\n )\n else:\n return \"scalar\"\n\n def to_hdf(self, hdf, key):\n \"\"\"\n\n Args:\n hdf:\n key:\n\n Returns:\n\n \"\"\"\n if len(self.list()) > 0:\n # Convert to array and store\n hdf[key] = np.array(self.list())\n elif len(self.values()) > 0:\n print(\"sparse array: \", key, len(self.values()))\n data_type = self._val_data_type()\n my_dict = OrderedDict()\n my_dict[\"index\"] = self.keys()\n if data_type == \"list_bool\":\n my_dict[\"values\"] = [\n sum([2**i * int(v) for i, v in enumerate(val)])\n for val in self.values()\n ]\n else:\n my_dict[\"values\"] = self.values()\n print(\"values: \", self.values())\n hdf[key] = my_dict\n\n def __len__(self):\n return self._length\n\n def __copy__(self):\n return SparseList(\n sparse_list=self._dict, default=self._default, length=self._length\n )\n\n def keys(self):\n \"\"\"\n\n Returns:\n indices of non-sparse elements\n \"\"\"\n return self._dict.keys()\n\n def values(self):\n \"\"\"\n\n Returns:\n values of non-sparse elements\n \"\"\"\n return self._dict.values()\n\n def items(self):\n \"\"\"\n\n Returns:\n index, value pairs of non-sparse elements\n \"\"\"\n return self._dict.items()\n\n def list(self):\n \"\"\"\n convert sparse list into full list\n Returns:\n list representation\n \"\"\"\n full_list = [self._default for _ in range(self._length)]\n for i, val in self._dict.items():\n full_list[i] = val\n\n return full_list\n\n def __iter__(self):\n if self._default is None:\n for i, val in self._dict.items():\n yield SparseListElement(i, val)\n else:\n for i, val in enumerate(self.list()):\n yield val\n\n def __getitem__(self, item):\n if isinstance(item, (int, np.integer)):\n if item in self._dict:\n return self._dict[item]\n return self._default\n\n if isinstance(item, slice):\n ind_list = range(len(self))[item]\n elif isinstance(item, (list, tuple, np.ndarray)):\n if len(item) == 0:\n ind_list = []\n else:\n if isinstance(item[0], (int, np.integer)):\n ind_list = item\n elif isinstance(item[0], (bool, np.bool_)):\n ind_list = []\n for i, bo in enumerate(item):\n if bo:\n ind_list.append(i)\n else:\n raise ValueError(\"Unknown item type: \" + str(type(item)))\n sliced_dict = {\n j: self._dict[ind] for j, ind in enumerate(ind_list) if ind in self._dict\n }\n\n return self.__class__(sliced_dict, default=self._default, length=len(ind_list))\n\n def __setitem__(self, key, value):\n if isinstance(key, (int, np.integer)):\n if key > len(self):\n raise IndexError\n self._dict[key] = value\n return\n elif isinstance(key, slice):\n key = range(len(self))[key]\n\n if max(key) > self._length:\n raise IndexError\n for i in key:\n self._dict[i] = value\n\n def __delitem__(self, key):\n # programmed for simplicity, not for performance\n ind_list = list(range(len(self)))\n if isinstance(key, (list, np.ndarray, tuple)):\n indexes = sorted(list(key), reverse=True)\n for index in indexes:\n del ind_list[index]\n else:\n del ind_list[key]\n new_list = self[ind_list]\n self._dict = new_list._dict\n self._length = new_list._length\n self._default = new_list._default\n\n def __add__(self, other):\n if not (isinstance(other, SparseList)):\n raise AssertionError()\n if not (self._default == other._default):\n raise AssertionError()\n new_list = self.__copy__()\n shifted_dict = {i + self._length: val for i, val in other._dict.items()}\n new_list._dict.update(shifted_dict)\n new_list._length += len(other)\n return new_list\n\n def __mul__(self, other):\n if not isinstance(other, (int, np.integer)):\n raise ValueError(\"Multiplication defined only for SparseArray*integers\")\n overall_list = other * np.arange(len(self)).tolist()\n new_dic = dict()\n for k in self.keys():\n for val in np.argwhere(np.array(overall_list) == k).flatten():\n new_dic[val] = self[k]\n return self.__class__(new_dic, default=self._default, length=other * len(self))\n\n def __rmul__(self, other):\n if isinstance(other, int):\n return self * other\n\n def __str__(self):\n if self._default is None:\n return \"[\" + \" \".join([str(el) for el in self]) + \"]\"\n else:\n # return \"[\" + \" \".join([str(el) + os.sep for el in self.list()]) + \"]\"\n return \"[\" + \" \".join([str(el) for el in self.list()]) + \"]\"\n\n def __repr__(self):\n return str(self.list())\n\n\ndef sparse_index(index_list, length, default_val=True):\n \"\"\"\n\n Args:\n index_list:\n length:\n default_val:\n\n Returns:\n\n \"\"\"\n new_dict = {i: default_val for i in index_list}\n return SparseList(new_dict, length=length)\n\n\nclass SparseArrayElement(object):\n \"\"\"\n Single element of a SparseArray\n Args:\n **qwargs:\n \"\"\"\n\n def __init__(self, **qwargs):\n self._lists = dict()\n if qwargs:\n self._lists = qwargs\n\n def __getattr__(self, item):\n if item in self._lists.keys():\n return self._lists[item]\n raise AttributeError(\n \"Object has no attribute {} {}\".format(self.__class__, item)\n )\n\n def __str__(self):\n out_str = \"\"\n for key, val in self._lists.items():\n out_str += \"{}: {}\".format(key, val)\n return out_str\n\n def __eq__(self, other):\n if not (isinstance(other, SparseArrayElement)):\n raise AssertionError()\n conditions = []\n for key in self._lists.keys():\n try:\n if isinstance(self._lists[key], np.ndarray):\n conditions += list(np.equal(self._lists[key], other._lists[key]))\n else:\n conditions.append(self._lists[key] == other._lists[key])\n except KeyError:\n conditions.append(False)\n return all(conditions)\n\n\nclass SparseArray(object):\n \"\"\"\n Administrate object that consists of several sparse lists (tags) and full lists that have identical indices and\n length\n\n Args:\n **qwargs: dictionary containing lists and SparseLists (tags) (must have identical length)\n \"\"\"\n\n def __init__(self, length=None, **qwargs):\n self._lists = dict()\n self._length = length\n for key in qwargs:\n value = qwargs[key]\n if self._length is None:\n self._length = len(value)\n else:\n if not len(self) == len(value):\n raise ValueError(\n \"Inconsistent vector lengths {} {} {}\".format(\n key, len(self), len(value)\n )\n )\n self._lists[key] = value\n\n def __setitem__(self, key, value):\n # exclude hidden variables (starting with _ from being added to _lists\n # if (not hasattr(self, '_lists')) or (key[0] == \"_\"):\n # self.__dict__[key] = value\n # return\n # el\n\n if isinstance(value, SparseList):\n self._lists[key] = value\n return\n elif isinstance(value, (Sequence, np.ndarray)):\n if len(value) == len(self):\n self._lists[key] = value\n return\n else:\n raise ValueError(\n \"Length of array object and new list are inconsistent: {} {} {}\".format(\n key, len(value), len(self)\n )\n )\n raise ValueError(\"Unsupported argument: \" + str(type(value)))\n\n def __getattr__(self, item):\n # if not (item in [\"_lists\"]):\n # print \"item: \", item, hasattr(self, item)\n if \"_lists\" in dir(self): # Python 3\n if item in self._lists.keys():\n return self._lists[item]\n\n return object.__getattribute__(self, item)\n # raise AttributeError(\"%r object has no attribute %r\" %(self.__class__, item))\n\n def __delitem__(self, key):\n for k in self.keys():\n if len(self._lists[k]) == 0:\n # ensure ASE compatibility\n print(\"Empty key in SparseList: \", k, key)\n continue\n # print \"del: \", k, key\n if isinstance(self._lists[k], np.ndarray):\n self._lists[k] = np.delete(self._lists[k], key, axis=0)\n self._length = len(self._lists[k])\n elif isinstance(self._lists[k], (list, tuple)):\n if isinstance(key, (list, np.ndarray, tuple)):\n indexes = sorted(list(key), reverse=True)\n for index in indexes:\n del self._lists[k][index]\n else:\n del self._lists[k][key]\n else:\n del self._lists[k][key]\n # self._length = len(self._lists[k])\n\n def check_consistency(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n for key, val in self._lists.items():\n # for val in self._lists.values():\n # print (\"consistency: \", key, len(val), len(self))\n if not (len(val) == self._length):\n raise AssertionError()\n\n def __str__(self):\n out_str = \"\\n\"\n for key, val in self._lists.items():\n out_str += key + \" := [\" + \" \".join([str(el) for el in val]) + \"] \\n\"\n return out_str\n\n def __len__(self):\n if hasattr(self, \"_length\"):\n return self._length\n else:\n return 0\n\n def __getitem__(self, item):\n new_dict = {}\n if isinstance(item, int):\n for key, value in self._lists.items():\n if value[item] is not None:\n new_dict[key] = value[item]\n return SparseArrayElement(**new_dict)\n elif isinstance(item, str):\n return self._lists[item]\n\n elif isinstance(item, (list, np.ndarray)):\n # print(\"key(__getitem__) len, type, item[0]: \", len(item), type(item), item[0])\n if len(item) == len(self):\n if isinstance(item[0], (np.bool_, bool)):\n item = np.arange(len(item))[item]\n for key, value in self._lists.items():\n # print ('key: ', key, type(value))\n if isinstance(item, slice):\n new_dict[key] = value[item]\n else:\n if isinstance(value, (list, tuple)):\n new_dict[key] = [value[i] for i in item]\n else:\n if len(value) > 0:\n try:\n new_dict[key] = value[item]\n except IndexError:\n print(\"Index error:: \", key, item, value)\n # else:\n # new_dict[key] = []\n # print (\"new_dict: \", new_dict, self.__class__)\n return self.__class__(**new_dict)\n\n def keys(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self._lists.keys()\n\n def items(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self._lists.items()\n\n def __copy__(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n cls = self.__class__\n result = cls.__new__(cls)\n result.__init__()\n for k, v in self.__dict__.items():\n if k == \"_lists\":\n result.__dict__[k] = {}\n for key, val in self._lists.items():\n if isinstance(val, SparseList):\n result.__dict__[k][key] = val.__copy__()\n elif isinstance(val, list):\n result.__dict__[k][key] = val[:]\n else:\n result.__dict__[k][key] = np.copy(val)\n else:\n result.__dict__[k] = v\n return result\n\n def __add__(self, other):\n # print \"__add__.new_elements\"\n # assert(isinstance(other, self.__class__))\n new_array = self.__copy__()\n for key, val in other.items():\n if key not in self.keys():\n if isinstance(val, SparseList):\n new_array._lists[key] = SparseList(\n {}, default=other._lists[key]._default, length=len(self)\n )\n else:\n raise ValueError(\n \"Incompatible lists (for non-sparse lists keys must be identical (1)\"\n + str(key)\n )\n\n new_length = len(self) + len(other)\n for key, val in new_array.items():\n # print \"key: \", key, val.__class__, isinstance(new_array, SparseList)\n if key in other.keys():\n if isinstance(new_array._lists[key], np.ndarray):\n new_array._lists[key] = np.append(\n new_array._lists[key], other._lists[key], axis=0\n )\n elif isinstance(new_array._lists[key], (list, SparseList)):\n new_array._lists[key] += other._lists[key]\n else:\n raise ValueError(\n \"Type not implemented \" + str(type(new_array._lists[key]))\n )\n elif isinstance(val, SparseList):\n new_array._lists[\n key\n ]._length = new_length # TODO: default extends to all elements (may be undesired)\n else:\n print(\"non-matching key: \", key)\n raise ValueError(\n \"Incompatible lists (for non-sparse lists keys must be identical (2)\"\n )\n new_array._length += len(other)\n return new_array\n\n def __mul__(self, other):\n if not isinstance(other, int):\n raise ValueError(\n \"Multiplication with SparseMatrix only implemented for integers\"\n )\n new_array = self.__copy__()\n for key, value in self.items():\n new_array._lists[key] *= other\n\n new_array._length *= other\n return new_array\n\n def __rmul__(self, other):\n if isinstance(other, int):\n return self * other\n\n def add_tag(self, *args, **qwargs):\n for key in args:\n self._lists[key] = SparseList({}, length=len(self))\n\n for key, default in qwargs.items():\n self._lists[key] = SparseList({}, default=default, length=len(self))\n\n def remove_tag(self, *args, **qwargs):\n \"\"\"\n\n Args:\n *args:\n **qwargs:\n\n Returns:\n\n \"\"\"\n for key in args:\n del self._lists[key]\n for key, default in qwargs.items():\n del self._lists[key]\n"
] | [
[
"numpy.copy",
"numpy.delete",
"numpy.append",
"numpy.equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aimakerspace/synergos_algorithm | [
"9d07fa10ae78c0da5ad0415c58230b9712642134"
] | [
"synalgo/interfaces/model.py"
] | [
"#!/usr/bin/env python\r\n\r\n####################\r\n# Required Modules #\r\n####################\r\n\r\n# Generic\r\nfrom collections import OrderedDict\r\nfrom typing import Tuple\r\n\r\n# Libs\r\nimport syft as sy\r\nimport torch as th\r\nfrom torch import nn\r\n\r\n# Custom\r\nfrom synalgo.utils import TorchParser\r\n\r\n##################\r\n# Configurations #\r\n##################\r\n\r\ntorch_parser = TorchParser()\r\n\r\n###################################\r\n# Model Abstraction Class - Model #\r\n###################################\r\n\r\nclass Model(nn.Module):\r\n \"\"\"\r\n The Model class serves to automate the building of structured deep neural\r\n nets, given specific layer configurations. Being a parent class of sy.Plan,\r\n this makes it more efficient to deploy in terms of communication costs.\r\n\r\n Args:\r\n owner (VirtualWorker/WebsocketClientWorker): Handler of this model\r\n structure (OrderedDict): Configurations used to build the achitecture of the NN\r\n is_condensed (bool): Toggles Binary or Multiclass prediction\r\n\r\n Attributes:\r\n is_condensed (bool): Toggles Binary or Multiclass prediction\r\n layers (OrderedDict): Maps specific layers to their respective activations\r\n + <Specific layer configuration dynamically defined>\r\n \"\"\"\r\n def __init__(self, structure):\r\n super().__init__()\r\n self.__SPECIAL_CASES = ['RNNBase', 'RNN', 'RNNCell', \r\n 'LSTM', 'LSTMCell',\r\n 'GRU', 'GRUCell']\r\n \r\n ###########################\r\n # Implementation Footnote #\r\n ###########################\r\n\r\n # [Causes]\r\n # Any learning rate scheduler with \"plateau\" (eg. \"ReduceLROnPlateau\") \r\n # requires its model to have the attribute 'self.metric'\r\n\r\n # [Problems]\r\n # Without this parameter, the following error will be raised:\r\n # \"TypeError: step() missing 1 required positional argument: 'metrics'\"\r\n \r\n # [Solution]\r\n # Specify it by default. It will be available for those layers/functions\r\n # who need it, and ignored by those who do not.\r\n\r\n self.metric = 0 # used for learning rate policy 'plateau'\r\n\r\n self.layers = OrderedDict()\r\n\r\n for layer, params in enumerate(structure):\r\n\r\n # Detect if input layer\r\n is_input_layer = params['is_input']\r\n\r\n # Detect layer type\r\n layer_type = params['l_type']\r\n\r\n # Construct layer name (eg. nnl_0_linear)\r\n layer_name = self.__construct_layer_name(layer, layer_type)\r\n\r\n # Extract layer structure and initialise layer\r\n layer_structure = params['structure']\r\n setattr(\r\n self, \r\n layer_name,\r\n torch_parser.parse_layer(layer_type)(**layer_structure)\r\n )\r\n\r\n # Detect activation function & store it for use in .forward()\r\n # Note: In more complex models, other layer types will be declared,\r\n # ones that do not require activation intermediates (eg. \r\n # batch normalisation). Hence, by pass activation by passing\r\n # an identity function instead.\r\n layer_activation = params['activation']\r\n self.layers[layer_name] = torch_parser.parse_activation(\r\n layer_activation\r\n )\r\n\r\n ###########\r\n # Helpers #\r\n ###########\r\n\r\n @staticmethod\r\n def __construct_layer_name(layer_idx: int, layer_type: str) -> str:\r\n \"\"\" This function was created as a means for formatting the layer name\r\n to facilitate finding & handling special cases during forward\r\n propagation\r\n\r\n Args:\r\n layer_idx (int): Index of the layer\r\n layer_type (str): Type of layer\r\n Returns:\r\n layer name (str)\r\n \"\"\"\r\n return f\"nnl_{layer_idx}_{layer_type.lower()}\" \r\n\r\n\r\n @staticmethod\r\n def __parse_layer_name(layer_name: str) -> Tuple[str, str]:\r\n \"\"\" This function was created as a means for reversing the formatting\r\n done during name creation to facilitate finding & handling special \r\n cases during forward propagation\r\n\r\n Args:\r\n layer name (str)\r\n Returns:\r\n layer_idx (int): Index of the layer\r\n layer_type (str): Type of layer\r\n \"\"\"\r\n _, layer_idx, layer_type = layer_name.split('_')\r\n return layer_idx, layer_type.capitalize()\r\n\r\n ##################\r\n # Core Functions #\r\n ##################\r\n\r\n def forward(self, x):\r\n \r\n # Apply the appropiate activation functions\r\n for layer_name, a_func in self.layers.items():\r\n curr_layer = getattr(self, layer_name)\r\n\r\n _, layer_type = self.__parse_layer_name(layer_name)\r\n\r\n # Check if current layer is a recurrent layer\r\n if layer_type in self.__SPECIAL_CASES:\r\n x, _ = a_func(curr_layer(x))\r\n else:\r\n x = a_func(curr_layer(x))\r\n\r\n return x\r\n\r\n\r\n\r\n#########################################\r\n# Model Communication Class - ModelPlan #\r\n#########################################\r\n\r\nclass ModelPlan(sy.Plan):\r\n \"\"\"\r\n The Model class serves to automate the building of structured deep neural\r\n nets, given specific layer configurations. Being a parent class of sy.Plan,\r\n this makes it more efficient to deploy in terms of communication costs.\r\n\r\n Args:\r\n owner (VirtualWorker/WebsocketClientWorker): Handler of this model\r\n structure (OrderedDict): Configurations used to build the achitecture of the NN\r\n is_condensed (bool): Toggles Binary or Multiclass prediction\r\n\r\n Attributes:\r\n is_condensed (bool): Toggles Binary or Multiclass prediction\r\n layers (OrderedDict): Maps specific layers to their respective activations\r\n + <Specific layer configuration dynamically defined>\r\n \"\"\"\r\n def __init__(self, structure):\r\n super().__init__()\r\n self.__SPECIAL_CASES = ['RNNBase', 'RNN', 'RNNCell', \r\n 'LSTM', 'LSTMCell',\r\n 'GRU', 'GRUCell']\r\n \r\n self.layers = OrderedDict()\r\n\r\n for layer, params in enumerate(structure):\r\n\r\n # Detect if input layer\r\n is_input_layer = params['is_input']\r\n\r\n # Detect layer type\r\n layer_type = params['l_type']\r\n\r\n # Construct layer name (eg. nnl_0_linear)\r\n layer_name = self.__construct_layer_name(layer, layer_type)\r\n\r\n # Extract layer structure and initialise layer\r\n layer_structure = params['structure']\r\n setattr(\r\n self, \r\n layer_name,\r\n torch_parser.parse_layer(layer_type)(**layer_structure)\r\n )\r\n\r\n # Detect activation function & store it for use in .forward()\r\n # Note: In more complex models, other layer types will be declared,\r\n # ones that do not require activation intermediates (eg. \r\n # batch normalisation). Hence, by pass activation by passing\r\n # an identity function instead.\r\n layer_activation = params['activation']\r\n self.layers[layer_name] = torch_parser.parse_activation(\r\n layer_activation\r\n )\r\n\r\n ###########\r\n # Helpers #\r\n ###########\r\n\r\n @staticmethod\r\n def __construct_layer_name(layer_idx: int, layer_type: str) -> str:\r\n \"\"\" This function was created as a means for formatting the layer name\r\n to facilitate finding & handling special cases during forward\r\n propagation\r\n\r\n Args:\r\n layer_idx (int): Index of the layer\r\n layer_type (str): Type of layer\r\n Returns:\r\n layer name (str)\r\n \"\"\"\r\n return f\"nnl_{layer_idx}_{layer_type.lower()}\" \r\n\r\n\r\n @staticmethod\r\n def __parse_layer_name(layer_name: str) -> Tuple[str, str]:\r\n \"\"\" This function was created as a means for reversing the formatting\r\n done during name creation to facilitate finding & handling special \r\n cases during forward propagation\r\n\r\n Args:\r\n layer name (str)\r\n Returns:\r\n layer_idx (int): Index of the layer\r\n layer_type (str): Type of layer\r\n \"\"\"\r\n _, layer_idx, layer_type = layer_name.split('_')\r\n return layer_idx, layer_type.capitalize()\r\n\r\n ##################\r\n # Core Functions #\r\n ##################\r\n\r\n def forward(self, x):\r\n \r\n # Apply the appropiate activation functions\r\n for layer_name, a_func in self.layers.items():\r\n curr_layer = getattr(self, layer_name)\r\n\r\n _, layer_type = self.__parse_layer_name(layer_name)\r\n\r\n # Check if current layer is a recurrent layer\r\n if layer_type in self.__SPECIAL_CASES:\r\n x, _ = a_func(curr_layer(x))\r\n else:\r\n x = a_func(curr_layer(x))\r\n\r\n return x\r\n\r\n\r\n def build(self, shape):\r\n \"\"\" Uses a declared shape to create mock data for building the \r\n customised plan\r\n\r\n Args:\r\n shape (tuple):\r\n Returns:\r\n\r\n \"\"\"\r\n mock_data = th.rand(shape)\r\n return super().build(mock_data)\r\n\r\n\r\n#########\r\n# Tests #\r\n#########\r\n\r\nif __name__ == \"__main__\":\r\n\r\n from pprint import pprint\r\n\r\n model_structure = [\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": True,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 15,\r\n \"out_features\": 100\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 100,\r\n \"out_features\": 90\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 90,\r\n \"out_features\": 80\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 80,\r\n \"out_features\": 70\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 70,\r\n \"out_features\": 60\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 60,\r\n \"out_features\": 50\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 50,\r\n \"out_features\": 40\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 40,\r\n \"out_features\": 30\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 30,\r\n \"out_features\": 20\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 20,\r\n \"out_features\": 10\r\n }\r\n },\r\n {\r\n \"activation\": \"sigmoid\",\r\n \"is_input\": False,\r\n \"l_type\": \"Linear\",\r\n \"structure\": {\r\n \"bias\": True,\r\n \"in_features\": 10,\r\n \"out_features\": 1\r\n }\r\n }\r\n ]\r\n\r\n # model_structure = [\r\n # # Input: N, C, Height, Width [N, 1, 28, 28]\r\n # {\r\n # \"activation\": \"relu\",\r\n # \"is_input\": True,\r\n # \"l_type\": \"Conv2d\",\r\n # \"structure\": {\r\n # \"in_channels\": 1, \r\n # \"out_channels\": 4, # [N, 4, 28, 28]\r\n # \"kernel_size\": 3,\r\n # \"stride\": 1,\r\n # \"padding\": 1\r\n # }\r\n # },\r\n # {\r\n # \"activation\": None,\r\n # \"is_input\": False,\r\n # \"l_type\": \"Flatten\",\r\n # \"structure\": {}\r\n # },\r\n # # ------------------------------\r\n # {\r\n # \"activation\": \"sigmoid\",\r\n # \"is_input\": False,\r\n # \"l_type\": \"Linear\",\r\n # \"structure\": {\r\n # \"bias\": True,\r\n # \"in_features\": 4 * 28 * 28,\r\n # \"out_features\": 1\r\n # }\r\n # }\r\n # ]\r\n\r\n hook = sy.TorchHook(th)\r\n bob = sy.VirtualWorker(hook, id='bob')\r\n \r\n # model = Model(model_structure)\r\n # pprint(model.__dict__)\r\n # pprint(model.state_dict())\r\n\r\n model_plan = Model(structure=model_structure)\r\n print(model_plan.include_state)\r\n print(model_plan.layers)\r\n\r\n print(\"-->\", model_plan.build(shape=(1, 15)))\r\n # print(\"-->\", model_plan.build(shape=(1, 1, 28, 28)))\r\n\r\n ###########################################################################\r\n # Class Model can be built! No need for forward() to be the only function #\r\n ###########################################################################\r\n \r\n # print(\"Before sending:\", list(model_plan.parameters()))\r\n # model_plan_ptr = model_plan.send(bob)\r\n # print(\"After sending:\", model_plan_ptr.location, list(model_plan_ptr.parameters()))\r\n # print(\"Can load_data_dict()?\", model_plan.load_state_dict)\r\n \r\n # train_data = th.rand([100,15]).send(bob)\r\n # print(model_plan_ptr(train_data).get())\r\n\r\n ###############################################\r\n # Copies of un-built plans cannot be built... #\r\n ###############################################\r\n\r\n # model_plan_copy = model_plan.copy()\r\n # print(f\"Before building: {model_plan_copy}\")\r\n # model_plan_copy.build()\r\n # print(f\"After building: {model_plan_copy}\")\r\n\r\n #######################################\r\n # Built plans can be copied and used! #\r\n #######################################\r\n\r\n model_plan_copy = model_plan.copy()\r\n print(f\"Before building: {model_plan_copy}\")\r\n print(f\"After building: {model_plan_copy}\")\r\n\r\n print(\"Before sending:\", list(model_plan_copy.parameters()))\r\n model_plan_ptr = model_plan_copy.send(bob)\r\n print(\"After sending:\", model_plan_ptr.location, list(model_plan_ptr.parameters()))\r\n\r\n train_data = th.rand((32, 15)).send(bob)\r\n print(model_plan_ptr(train_data).get(), model_plan_ptr(train_data).shape)\r\n\r\n\r\n"
] | [
[
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gregory-Eales/ml-reimplementations | [
"ef2652224cc31ca3b569c9ccd9089a4053eb2b2f"
] | [
"trpo/trpo/policy_network.py"
] | [
"import torch\nimport numpy as np\n\n\nclass PolicyNetwork(torch.nn.Module):\n\n def __init__(self, alpha, input_size, output_size):\n\n super(PolicyNetwork, self).__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n\n self.fc1 = torch.nn.Linear(input_size, 128)\n self.fc2 = torch.nn.Linear(128, 128)\n self.fc3 = torch.nn.Linear(128, output_size)\n self.tanh = torch.nn.Tanh()\n self.relu = torch.nn.LeakyReLU()\n self.sigmoid = torch.nn.Sigmoid()\n\n self.kl_divergence = torch.nn.KLDivLoss()\n self.optimizer = torch.optim.Adam(lr=alpha, params=self.parameters())\n\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')\n self.to(self.device)\n\n def loss(self, log_probs, prev_probs, advantages):\n\n l = torch.sum(log_probs/prev_probs * advantages)\n\n kl = self.kl_divergence(log_probs, prev_probs)\n\n return torch.sum(l - kl)\n\n\n def forward(self, x):\n x = torch.Tensor(x).to(self.device)\n out = self.fc1(x)\n out = self.tanh(out)\n out = self.fc2(out)\n out = self.tanh(out)\n out = self.fc3(out)\n out = self.sigmoid(out)\n out = out.to(torch.device('cpu:0'))\n return out\n\n def optimize(self, log_probs, old_probs, advantages):\n self.optimizer.zero_grad()\n loss = self.loss(log_probs, old_probs, advantages)\n loss.backward(retain_graph=True)\n self.optimizer.step()\n"
] | [
[
"torch.nn.KLDivLoss",
"torch.Tensor",
"torch.sum",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryo-ma/covid19-japan-web-api | [
"7e17fabac7c56b5e5a4547078a3e77fc96e88e1f"
] | [
"project/src/script/create_positive_detail.py"
] | [
"import pandas as pd\nimport json\nimport os\nfrom ..const import (PREFECTURES, POSITIVE_DETAIL_JSON_PATH,\n POSITIVE_DETAIL_DATA_PATH, POSITIVE_DEITAL_PREFECTURE_JSON_PATH_FORMAT)\n\n\ndef create_json_file():\n header = ('code', 'announcement_date', 'src', 'prefecture', 'residence_prefecture',\n 'age', 'gender', 'attribute', 'prefecture_number',\n 'travel_or_contact', 'detail', 'id', 'diagnosis_date', 'onset', 'symptom',\n 'death_or_discharge_date', 'comment', 'outcome', 'outcome_src')\n positive_detail_df = pd.read_csv(POSITIVE_DETAIL_DATA_PATH, names=header, encoding='utf-8')\n\n # by prefecture\n groupby_prefecture = positive_detail_df.groupby('prefecture')\n for prefecture in PREFECTURES:\n json_path = os.path.join('project', POSITIVE_DEITAL_PREFECTURE_JSON_PATH_FORMAT.format(prefecture))\n df_by_prefecture = (groupby_prefecture.get_group(prefecture)\n if prefecture in groupby_prefecture.groups else None)\n output_positive_detail(json_path, df_by_prefecture)\n\n # all\n output_positive_detail(os.path.join('project', POSITIVE_DETAIL_JSON_PATH), positive_detail_df)\n\n\ndef output_positive_detail(json_path, positive_detail_df):\n with open(json_path, 'w', encoding='utf-8') as f:\n json_data = ([] if positive_detail_df is None\n else positive_detail_df.drop(positive_detail_df.index[0]).fillna('').to_dict(orient='records'))\n json.dump(json_data, f, indent=2, ensure_ascii=False)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
edgchen1/onnxruntime | [
"60bbdf14035014d94d525c402d3d6404ac32ad3d"
] | [
"orttraining/orttraining/python/training/ortmodule/_utils.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nfrom onnxruntime.capi.onnxruntime_inference_collection import OrtValue\nfrom onnxruntime.capi import _pybind_state as C\nfrom ._fallback_exceptions import ORTModuleDeviceException, wrap_exception\n\nimport os\nimport copy\nimport inspect\nimport torch\nfrom torch.utils.dlpack import from_dlpack, to_dlpack\nimport traceback\nfrom typing import List\nimport types\nimport warnings\nfrom distutils.version import LooseVersion\n\ndef _ortvalue_from_torch_tensor(torch_tensor):\n # TODO: Current DLPack doesn't support bool and PyTorch disables converting bool tensor to DLPack in recent commit.\n # https://github.com/pytorch/pytorch/blob/7e7be526c9d9179f35084e9cca5b5c5ad5172100/aten/src/ATen/DLConvertor.cpp#L41\n # We need to convert bool tensor to unit8 tensor to workaround this.\n # DLPack is discussing how to support bool type, we can remove this workaround once both DLPack\n # and PyTorch support bool type.\n is_bool_tensor = torch_tensor.dtype == torch.bool\n if is_bool_tensor and LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):\n torch_tensor = torch_tensor.to(torch.uint8)\n return C.OrtValue.from_dlpack(to_dlpack(torch_tensor), is_bool_tensor)\n\n\ndef _torch_tensor_from_dl_pack(dlpack, ortvalue, device):\n torch_tensor = from_dlpack(dlpack) if device.type != 'ort' else C.ort_from_dlpack(dlpack)\n return torch_tensor.to(torch.bool) if ortvalue.data_type() == 'tensor(bool)' else torch_tensor\n\n\ndef _ortvalue_to_torch_tensor(ortvalue, device):\n # PyTorch's to_dlpack() uses same config for both torch.bool and torch.uint8,\n # and convert the config to torch.uint8 tensor duing from_dlpack().\n # So we need to convert the torch tensor to torch.bool type if OrtValue is bool tensor.\n dlpack_tensor = ortvalue.to_dlpack()\n return _torch_tensor_from_dl_pack(dlpack_tensor, ortvalue, device)\n\ndef _torch_tensor_to_dlpack(tensor):\n if tensor.device.type == 'ort':\n return C.ort_to_dlpack(tensor)\n else:\n # TODO: Current DLPack doesn't support bool and PyTorch disables converting bool tensor to DLPack in recent commit.\n # https://github.com/pytorch/pytorch/blob/7e7be526c9d9179f35084e9cca5b5c5ad5172100/aten/src/ATen/DLConvertor.cpp#L41\n # We need to convert bool tensor to unit8 tensor to workaround this.\n # DLPack is discussing how to support bool type, we can remove this workaround once both DLPack\n # and PyTorch support bool type.\n if tensor.dtype == torch.bool and LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):\n tensor = tensor.to(torch.uint8)\n return to_dlpack(tensor)\n\n\ndef _check_same_device(device, argument_str, *args):\n '''Check that all tensor arguments in *args reside on the same device as the input device'''\n\n assert isinstance(device, torch.device), '`device` must be a valid `torch.device` object'\n for arg in args:\n if arg is not None and isinstance(arg, torch.Tensor):\n arg_device = torch.device(arg.device)\n if arg_device != device:\n raise wrap_exception(ORTModuleDeviceException,\n RuntimeError(\n f\"{argument_str} found on device {arg_device}, but expected it to be on module device {device}.\"))\n\n\ndef get_device_index(device):\n if isinstance(device, str):\n # could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0\n device = torch.device(device)\n elif isinstance(device, int):\n return device\n return 0 if device.index is None else device.index\n\n\ndef get_device_str(device):\n if isinstance(device, str):\n # could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0\n if device.find(':') == -1:\n device += ':' + str(torch.cuda.current_device())\n elif isinstance(device, int):\n device = 'cuda:' + str(device)\n elif isinstance(device, torch.device):\n if device.index is None:\n device = device.type + ':' + str(torch.cuda.current_device())\n else:\n device = device.type + ':' + str(device.index)\n else:\n raise wrap_exception(ORTModuleDeviceException, RuntimeError('Unsupported device type'))\n return device\n\n\ndef get_device_from_module(module):\n '''Returns the first device found in the `module`'s parameters or None\n\n Args:\n module (torch.nn.Module): PyTorch model to extract device from\n\n Raises:\n ORTModuleFallbackException: When more than one device is found at `module`\n '''\n device = None\n try:\n device = next(module.parameters()).device\n for param in module.parameters():\n if param.device != device:\n raise wrap_exception(ORTModuleDeviceException,\n RuntimeError('ORTModule supports a single device per model'))\n except StopIteration:\n # Model doesn't have a device set to any of the model parameters\n pass\n return device\n\n\ndef get_device_from_inputs(args, kwargs):\n '''Returns device from first PyTorch Tensor within args or kwargs\n\n Args:\n args: List with inputs\n kwargs: Dictionary with inputs\n '''\n\n device = None\n if args:\n device = torch.device(args[0].device)\n elif kwargs:\n device = torch.device(next(iter(kwargs.values())).device)\n return device\n\n\ndef _create_iobinding(io_binding, inputs, model, device):\n '''Creates IO binding for a `model` inputs and output'''\n for idx, value_info in enumerate(model.graph.input):\n io_binding.bind_ortvalue_input(value_info.name, OrtValue(_ortvalue_from_torch_tensor(inputs[idx])))\n\n for value_info in model.graph.output:\n io_binding.bind_output(value_info.name, device.type, device_id=get_device_index(device))\n\ndef check_for_name_collisions_and_bind_methods_to_ortmodule(ortmodule: torch.nn.Module,\n user_module: torch.nn.Module):\n \"\"\"Warns if there are any common attributes between the user's model and ORTModule and binds user methods to ORTModule\n\n If there are methods defined on the user's model that ORTModule does not recognize (custom methods),\n then this function binds these methods to ORTModule.\n\n Args:\n ortmodule: the ORTModule instance\n user_module: the user's torch.nn.Module\n\n Raises:\n UserWarning: If there are any overlapping attributes between the ortmodule and user_module (except forward)\n \"\"\"\n\n ortmodule_attributes = dict(inspect.getmembers(ortmodule))\n torch_module_attributes = dict(inspect.getmembers(torch.nn.Module()))\n user_module_attributes = inspect.getmembers(user_module)\n\n # Check if any user defined attribute collides with ORTModule's attributes\n for attribute_name, attribute in user_module_attributes:\n if inspect.ismethod(attribute):\n # Skip the dunder methods\n if attribute_name.startswith('__'):\n continue\n\n # if the attribute is not a torch attribute, or if the torch attribute\n # corresponding to attribute_name is not a method or the user attribute\n # does not equal the torch attribute, then this is a user defined method.\n if attribute_name not in torch_module_attributes or \\\n not inspect.ismethod(torch_module_attributes[attribute_name]) or \\\n attribute.__func__ != torch_module_attributes[attribute_name].__func__:\n\n # forward is expected to be defined by the user.\n if attribute_name == 'forward':\n continue\n\n # This is a user defined/overriden method. Check for collisions.\n if attribute_name in ortmodule_attributes:\n # This is a user defined method, issue a warning.\n warnings.warn(f\"User Module's attribute name {attribute_name} collides with ORTModule's attribute name. \"\n \"User Module's method may not be called upon invocation through ORTModule.\")\n else:\n # This is a custom method, copy it and bind the copy to ORTModule.\n # This is needed for cases where the user's custom method invokes\n # the forward method. It should go through ORTModule's forward implementation\n # and not go through the user defined forward method.\n ortmodule.__dict__[attribute_name] = types.MethodType(copy.deepcopy(attribute.__func__), ortmodule)\n else:\n if attribute_name not in torch_module_attributes and attribute_name in ortmodule_attributes:\n # This is a user defined attribute that collides with ORTModule\n if attribute_name in ortmodule_attributes:\n warnings.warn(f\"User Module's attribute name {attribute_name} collides with ORTModule's attribute name. \"\n \"User Module's attribute may not be returned when trying to retrieve the attribute through ORTModule.\")\n\ndef parse_os_env_skip_check_flags(env_name, default_skip_check_str):\n \"\"\"Returns a list of SkipChecks as defined by os env variable env_name or default provided\"\"\"\n\n return os.getenv(env_name, default_skip_check_str).split('|')\n\ndef get_exception_as_string(exception):\n assert isinstance(exception, Exception), 'exception must be a `Exception`'\n\n try:\n raise exception\n except:\n return traceback.format_exc()\n"
] | [
[
"torch.utils.dlpack.to_dlpack",
"torch.cuda.current_device",
"torch.nn.Module",
"torch.utils.dlpack.from_dlpack",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
harunpehlivan/ParlAI | [
"5507d4745ca23b23af311673a6b0d1b7e72eb5cd"
] | [
"projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_transformer_ranker.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.agents.transformer.transformer import TransformerRankerAgent\nfrom .wizard_dict import WizardDictAgent\n\nimport numpy as np\nimport torch\n\n\nSOC_TOKEN = '__SOC__'\n\n\nclass WizardTransformerRankerAgent(TransformerRankerAgent):\n @staticmethod\n def dictionary_class():\n return WizardDictAgent\n\n @classmethod\n def add_cmdline_args(cls, argparser):\n \"\"\"Add command-line arguments specifically for this agent.\"\"\"\n super(WizardTransformerRankerAgent, cls).add_cmdline_args(argparser)\n agent = argparser.add_argument_group('Wizard Transformer Ranker Arguments')\n agent.add_argument(\n '--use-knowledge',\n type='bool',\n default=True,\n help='use knowledge field instead of personas',\n )\n agent.add_argument(\n '--knowledge-dropout',\n type=float,\n default=0.7,\n help='dropout some knowledge during training',\n )\n agent.add_argument(\n '--chosen-sentence',\n type='bool',\n default=False,\n help='instead of using all knowledge, use gold'\n 'label, i.e. the chosen sentence',\n )\n agent.add_argument(\n '--knowledge-truncate',\n type=int,\n default=50,\n help='truncate knowledge to this length',\n )\n agent.add_argument('--legacy', type='bool', default=False, help='legacy model')\n argparser.set_defaults(\n learningrate=0.0008,\n eval_candidates='inline',\n candidates='batch',\n lr_factor=1,\n add_p1_after_newln=False,\n delimiter=' ',\n )\n\n return agent\n\n def __init__(self, opt, shared=None):\n \"\"\"Set up model.\"\"\"\n\n super().__init__(opt, shared)\n self.use_knowledge = opt.get('use_knowledge', False)\n if self.use_knowledge:\n self.opt['use_memories'] = True\n self.chosen_sentence = opt.get('chosen_sentence', False) and self.use_knowledge\n self.knowledge_dropout = opt.get('knowledge_dropout', 0)\n self.knowledge_truncate = opt.get('knowledge_truncate', 50)\n\n def _set_text_vec(self, *args, **kwargs):\n \"\"\"\n Sets the 'text_vec' field in the observation.\n\n Useful to override to change vectorization behavior\n \"\"\"\n obs = super()._set_text_vec(*args, **kwargs)\n if self.opt.get('legacy'):\n soc_tensor = torch.LongTensor([self.dict[SOC_TOKEN]])\n obs['text_vec'] = torch.cat([soc_tensor, obs['text_vec']])\n return obs\n\n def _vectorize_memories(self, observation):\n \"\"\"Override abstract method from TransformerRankerAgent to use\n knowledge field as memories.\"\"\"\n\n if not self.use_knowledge:\n return observation\n\n observation['memory_vecs'] = []\n\n checked = observation.get('checked_sentence', '')\n if observation.get('knowledge'):\n knowledge = observation['knowledge'].split('\\n')[:-1]\n else:\n knowledge = []\n\n to_vectorize = []\n if checked and self.chosen_sentence:\n # if `self.chosen_sentence` is True, only keep golden knowledge\n to_vectorize = [checked]\n elif (self.knowledge_dropout == 0 or not self.is_training) and knowledge:\n # during evaluation we use all of the knowledge\n to_vectorize = knowledge\n elif knowledge:\n for line in knowledge:\n if checked and checked in line:\n # make sure we keep the chosen sentence\n keep = 1\n else:\n # dropout knowledge\n keep = np.random.binomial(1, 1 - self.knowledge_dropout)\n if keep:\n to_vectorize.append(line)\n\n # vectorize knowledge\n observation['memory_vecs'] = [\n self._vectorize_text(line, truncate=self.knowledge_truncate)\n for line in to_vectorize\n ]\n return observation\n\n def load(self, path):\n \"\"\"Return opt and model states.\n\n Override this method from TorchAgent to allow us to load partial\n weights from pre-trained models.\n \"\"\"\n states = torch.load(path, map_location=lambda cpu, _: cpu)\n\n if 'model' in states:\n new_state_dict = states['model']\n # load params\n current_state = self.model.state_dict()\n # filter out unnecessary params\n pre_trained_state = {\n k: v for k, v in new_state_dict.items() if k in current_state\n }\n # upload pretrained state\n current_state.update(pre_trained_state)\n self.model.load_state_dict(current_state)\n\n if 'optimizer' in states and hasattr(self, 'optimizer'):\n self.optimizer.load_state_dict(states['optimizer'])\n return states\n"
] | [
[
"numpy.random.binomial",
"torch.LongTensor",
"torch.cat",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hanzhiwangchn/SynthSR | [
"35c2483f1da272855bbeea3e76140845106b623d"
] | [
"SynthSR/metrics_model.py"
] | [
"\"\"\"\nIf you use this code, please the SynthSR paper in:\nhttps://github.com/BBillot/SynthSR/blob/master/bibtex.bib\n\nCopyright 2020 Benjamin Billot\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in\ncompliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is\ndistributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\nimplied. See the License for the specific language governing permissions and limitations under the\nLicense.\n\"\"\"\n\n\n# python imports\nimport numpy as np\nimport tensorflow as tf\nimport keras.layers as KL\nimport keras.backend as K\nfrom keras.models import Model\n\n# third-party imports\nfrom ext.lab2im import utils\nfrom ext.lab2im import layers\n\n\ndef metrics_model(input_model, loss_cropping=16, metrics='l1', work_with_residual_channel=None):\n\n # If probabilistic, split predictions of intensities and spreads\n if metrics == 'laplace':\n n_channels = int(input_model.outputs[0].shape[-1]/2)\n intensities_list = list()\n spreads_list = list()\n tensor = input_model.outputs[0]\n for c in range(n_channels):\n tmp_intensities = KL.Lambda(lambda x: tf.expand_dims(x[..., c], axis=-1))(tensor)\n intensities_list.append(tmp_intensities)\n tmp_spreads = KL.Lambda(lambda x: tf.expand_dims(x[..., c + n_channels], axis=-1))(tensor)\n spreads_list.append(tmp_spreads)\n if n_channels > 1:\n intensities_tensor = KL.Lambda(lambda x: tf.concat(x, axis=-1))(intensities_list)\n spreads_tensor = KL.Lambda(lambda x: tf.concat(x, axis=-1))(spreads_list)\n else:\n intensities_tensor = intensities_list[0]\n spreads_tensor = spreads_list[0]\n else:\n intensities_tensor = input_model.outputs[0]\n spreads_tensor = None\n\n # add residual if needed\n if work_with_residual_channel is None:\n intensities_tensor = KL.Lambda(lambda x: x, name='predicted_image')(intensities_tensor)\n else:\n slice_list = list()\n for c in work_with_residual_channel:\n tensor = input_model.get_layer('image_out').output\n tmp_slice = KL.Lambda(lambda x: tf.expand_dims(x[..., c], axis=-1))(tensor)\n slice_list.append(tmp_slice)\n if len(slice_list) > 1:\n slices = KL.Lambda(lambda x: tf.concat(x, axis=-1))(slice_list)\n else:\n slices = slice_list[0]\n intensities_tensor = KL.Add(name='predicted_image')([slices, intensities_tensor])\n\n # get crisp, ground truth image\n image_gt = input_model.get_layer('regression_target').output\n image_gt = KL.Lambda(lambda x: x, name='target')(image_gt)\n\n # crop output to evaluate loss function in centre patch\n if loss_cropping is not None:\n # format loss_cropping\n target_shape = image_gt.get_shape().as_list()[1:-1]\n n_dims, _ = utils.get_dims(target_shape)\n loss_cropping = utils.reformat_to_list(loss_cropping, length=n_dims)\n\n # perform cropping\n begin_idx = [int((target_shape[i] - loss_cropping[i]) / 2) for i in range(n_dims)]\n image_gt = KL.Lambda(lambda x: tf.slice(x, begin=tf.convert_to_tensor([0] + begin_idx + [0], dtype='int32'),\n size=tf.convert_to_tensor([-1] + loss_cropping + [-1], dtype='int32')),\n name='cropping_gt')(image_gt)\n intensities_tensor = KL.Lambda(lambda x:\n tf.slice(x, begin=tf.convert_to_tensor([0] + begin_idx + [0], dtype='int32'),\n size=tf.convert_to_tensor([-1] + loss_cropping + [-1], dtype='int32')),\n name='cropping_pred')(intensities_tensor)\n if metrics == 'laplace':\n spreads_tensor = KL.Lambda(lambda x:\n tf.slice(x, begin=tf.convert_to_tensor([0] + begin_idx + [0], dtype='int32'),\n size=tf.convert_to_tensor([-1] + loss_cropping + [-1], dtype='int32')),\n name='cropping_pred_spread')(spreads_tensor)\n\n # metrics is computed as part of the model\n if metrics == 'laplace':\n err_tensor = KL.Subtract()([intensities_tensor, image_gt])\n b_tensor = KL.Lambda(lambda x: 1e-5 + 0.02 * tf.exp(x), name='predicted_bs')(spreads_tensor)\n loss_tensor = KL.Lambda(lambda x: K.mean(tf.math.log(2*x[0]) + (K.abs(x[1]) / x[0])),\n name='laplace_loss')([b_tensor, err_tensor])\n elif metrics == 'l2':\n err_tensor = KL.Subtract()([intensities_tensor, image_gt])\n loss_tensor = KL.Lambda(lambda x: K.mean(K.square(x)), name='L2_loss')(err_tensor)\n elif metrics == 'l1':\n err_tensor = KL.Subtract()([intensities_tensor, image_gt])\n loss_tensor = KL.Lambda(lambda x: K.mean(K.abs(x)), name='L1_loss')(err_tensor)\n elif metrics == 'ssim':\n\n # TODO: true 3D\n\n # TODO: multiple output channels\n if image_gt.get_shape()[-1] > 1:\n raise Exception('SSIM metric does not currently support multiple channels')\n\n ssim_xy = KL.Lambda(\n lambda x: tf.image.ssim(x[0], x[1],\n 1.0), name='ssim_xy')([intensities_tensor, image_gt])\n ssim_xz = KL.Lambda(\n lambda x: tf.image.ssim(tf.transpose(x[0], perm=[0, 1, 3, 2, 4]), tf.transpose(x[1], perm=[0, 1, 3, 2, 4]),\n 1.0), name='ssim_xz')([intensities_tensor, image_gt])\n ssim_yz = KL.Lambda(\n lambda x: tf.image.ssim(tf.transpose(x[0], perm=[0, 2, 3, 1, 4]), tf.transpose(x[1], perm=[0, 2, 3, 1, 4]),\n 1.0), name='ssim_yz')([intensities_tensor, image_gt])\n\n loss_tensor = KL.Lambda(\n lambda x: -(1 / 3) * tf.reduce_mean(x[0]) - (1 / 3) * tf.reduce_mean(x[1]) - (1 / 3) * tf.reduce_mean(x[2]),\n name='ssim_loss')([ssim_xy, ssim_xz, ssim_yz])\n\n else:\n raise Exception('metrics should either be \"l1\" or \"l2\" or \"ssim\" oro \"laplace\", got {}'.format(metrics))\n\n # create the model and return\n model = Model(inputs=input_model.inputs, outputs=loss_tensor)\n return model\n\n\n# Add pretrained segmentation CNN to model to regularize synthesis\ndef add_seg_loss_to_model(input_model,\n seg_model,\n generation_labels,\n segmentation_label_equivalency,\n rel_weight,\n loss_cropping,\n m=None,\n M=None,\n fs_header=False):\n\n # get required layers from input models\n image_loss = input_model.outputs[0]\n predicted_image = input_model.get_layer('predicted_image').output\n segm_target = input_model.get_layer('segmentation_target').output\n\n # normalise/clip predicted image if needed\n if m is None:\n input_norm = KL.Lambda(lambda x: x + .0, name='input_normalized')(predicted_image)\n else:\n input_norm = KL.Lambda(lambda x: (K.clip(x, m, M) - m) / (M - m), name='input_normalized')(predicted_image)\n\n # Push predicted image through segmentation CNN\n if fs_header:\n input_normalized_rotated = KL.Lambda(lambda x: K.reverse(K.permute_dimensions(x, [0, 1, 3, 2, 4]), axes=2),\n name='input_normalized_rotated')(input_norm)\n predicted_seg_rotated = seg_model(input_normalized_rotated)\n predicted_seg = KL.Lambda(lambda x: K.permute_dimensions(K.reverse(x, axes=2),\n [0, 1, 3, 2, 4]))(predicted_seg_rotated)\n else:\n predicted_seg = seg_model(input_norm)\n\n # crop output to evaluate loss function in centre patch\n if loss_cropping is not None:\n # format loss_cropping\n target_shape = predicted_image.get_shape().as_list()[1:-1]\n n_dims, _ = utils.get_dims(target_shape)\n loss_cropping = utils.reformat_to_list(loss_cropping, length=n_dims)\n\n # perform cropping\n begin_idx = [int((target_shape[i] - loss_cropping[i]) / 2) for i in range(n_dims)]\n segm_target = KL.Lambda(lambda x: tf.slice(x,\n begin=tf.convert_to_tensor([0] + begin_idx + [0], dtype='int32'),\n size=tf.convert_to_tensor([-1] + loss_cropping + [-1],\n dtype='int32')))(segm_target)\n predicted_seg = KL.Lambda(lambda x: tf.slice(x,\n begin=tf.convert_to_tensor([0] + begin_idx + [0], dtype='int32'),\n size=tf.convert_to_tensor([-1] + loss_cropping + [-1],\n dtype='int32')))(predicted_seg)\n\n # reformat gt to have the same label values as for the segmentations\n segmentation_label_equivalency = utils.load_array_if_path(segmentation_label_equivalency)\n generation_labels = utils.load_array_if_path(generation_labels)\n gt_onehot = list()\n pred_onehot = list()\n for i in range(len(generation_labels)):\n idx = np.where(segmentation_label_equivalency == generation_labels[i])[0]\n if len(idx) > 0:\n tensor = KL.Lambda(lambda x: tf.cast(x[..., -1] == i, dtype='float32'))(segm_target)\n gt_onehot.append(tensor)\n if len(idx) == 1:\n tensor2 = KL.Lambda(lambda x: x[..., idx[0]])(predicted_seg)\n elif len(idx) == 2:\n tensor2 = KL.Lambda(lambda x: x[..., idx[0]] + x[..., idx[1]])(predicted_seg)\n elif len(idx) == 3:\n tensor2 = KL.Lambda(lambda x: x[..., idx[0]] + x[..., idx[1]] + x[..., idx[2]])(predicted_seg)\n else:\n raise Exception(\"uuummm weird that you're merging so many labels...\")\n pred_onehot.append(tensor2)\n gt = KL.Lambda(lambda x: tf.stack(x, -1), name='gt')(gt_onehot) if len(gt_onehot) > 1 else gt_onehot[0]\n pred = KL.Lambda(lambda x: tf.stack(x, -1), name='pred')(pred_onehot) if len(pred_onehot) > 1 else pred_onehot[0]\n\n # Dice loss: it's crucial to disable the checks, so we can use incomplete segmentations\n dice_loss = layers.DiceLoss(enable_checks=False, name='dice_loss')([gt, pred])\n\n total_loss = KL.Lambda(lambda x: x[0] + rel_weight * x[1])([image_loss, dice_loss])\n\n # create the model and return\n model = Model(inputs=input_model.inputs, outputs=total_loss)\n\n return model\n\n\nclass IdentityLoss(object):\n \"\"\"Very simple loss, as the computation of the loss as been directly implemented in the model.\"\"\"\n def __init__(self, keepdims=True):\n self.keepdims = keepdims\n\n def loss(self, y_true, y_predicted):\n \"\"\"Because the metrics is already calculated in the model, we simply return y_predicted.\n We still need to put y_true in the inputs, as it's expected by keras.\"\"\"\n loss = y_predicted\n\n tf.debugging.check_numerics(loss, 'Loss not finite')\n return loss\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.exp",
"tensorflow.math.log",
"tensorflow.debugging.check_numerics",
"tensorflow.image.ssim",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
arbab97/IIC-1 | [
"2bbfe4cb82b01714864f06aa3a890b5eb640be25"
] | [
"data.py"
] | [
"import numpy as np\nimport os\nimport warnings\n\nimport tensorflow as tf\n#tf.enable_eager_execution()## ADded to visualize the dataset values !!!! TURN OFF FOR FASTER TRAINING\n\nimport tensorflow_datasets as tfds\n\ndef mnist_x(x_orig, mdl_input_dims, is_training):\n\n # rescale to [0, 1]\n x_orig = tf.cast(x_orig, dtype=tf.float32) / x_orig.dtype.max\n\n # get common shapes\n height_width = mdl_input_dims[:-1]\n n_chans = mdl_input_dims[-1]\n\n # training transformations\n if is_training:\n x1 = tf.image.central_crop(x_orig, np.mean(20 / np.array(x_orig.shape.as_list()[1:-1])))\n x2 = tf.image.random_crop(x_orig, tf.concat((tf.shape(x_orig)[:1], [20, 20], [n_chans]), axis=0))\n x = tf.stack([x1, x2])\n x = tf.transpose(x, [1, 0, 2, 3, 4])\n i = tf.squeeze(tf.random.categorical([[1., 1.]], tf.shape(x)[0]))\n x = tf.map_fn(lambda y: y[0][y[1]], (x, i), dtype=tf.float32)\n x = tf.image.resize(x, height_width)\n\n # testing transformations\n else:\n x = tf.image.central_crop(x_orig, np.mean(20 / np.array(x_orig.shape.as_list()[1:-1])))\n x = tf.image.resize(x, height_width)\n\n return x\n\n\ndef mnist_gx(x_orig, mdl_input_dims, is_training, sample_repeats):\n\n # if not training, return a constant value--it will unused but needs to be same shape to avoid TensorFlow errors\n if not is_training:\n return tf.zeros([0] + mdl_input_dims)\n\n # rescale to [0, 1]\n x_orig = tf.cast(x_orig, dtype=tf.float32) / x_orig.dtype.max\n\n # repeat samples accordingly\n x_orig = tf.tile(x_orig, [sample_repeats] + [1] * len(x_orig.shape.as_list()[1:]))\n\n # get common shapes\n height_width = mdl_input_dims[:-1]\n n_chans = mdl_input_dims[-1]\n\n # random rotation\n rad = 2 * np.pi * 25 / 360\n x_rot = tf.contrib.image.rotate(x_orig, tf.random.uniform(shape=tf.shape(x_orig)[:1], minval=-rad, maxval=rad))\n gx = tf.stack([x_orig, x_rot])\n gx = tf.transpose(gx, [1, 0, 2, 3, 4])\n i = tf.squeeze(tf.random.categorical([[1., 1.]], tf.shape(gx)[0]))\n gx = tf.map_fn(lambda y: y[0][y[1]], (gx, i), dtype=tf.float32)\n\n # random crops\n x1 = tf.image.random_crop(gx, tf.concat((tf.shape(x_orig)[:1], [16, 16], [n_chans]), axis=0))\n x2 = tf.image.random_crop(gx, tf.concat((tf.shape(x_orig)[:1], [20, 20], [n_chans]), axis=0))\n x3 = tf.image.random_crop(gx, tf.concat((tf.shape(x_orig)[:1], [24, 24], [n_chans]), axis=0))\n gx = tf.stack([tf.image.resize(x1, height_width),\n tf.image.resize(x2, height_width),\n tf.image.resize(x3, height_width)])\n gx = tf.transpose(gx, [1, 0, 2, 3, 4])\n i = tf.squeeze(tf.random.categorical([[1., 1., 1.]], tf.shape(gx)[0]))\n gx = tf.map_fn(lambda y: y[0][y[1]], (gx, i), dtype=tf.float32)\n\n # apply random adjustments\n def rand_adjust(img):\n img = tf.image.random_brightness(img, 0.4)\n img = tf.image.random_contrast(img, 0.6, 1.4)\n if img.shape.as_list()[-1] == 3:\n img = tf.image.random_saturation(img, 0.6, 1.4)\n img = tf.image.random_hue(img, 0.125)\n return img\n\n gx = tf.map_fn(lambda y: rand_adjust(y), gx, dtype=tf.float32)\n\n return gx\n\n\ndef pre_process_data(ds, info, is_training, **kwargs):\n \"\"\"\n :param ds: TensorFlow Dataset object\n :param info: TensorFlow DatasetInfo object\n :param is_training: indicator to pre-processing function\n :return: the passed in data set with map pre-processing applied\n \"\"\"\n # apply pre-processing function for given data set and run-time conditions\n if info.name == 'mnist':\n return ds.map(lambda d: {'x': mnist_x(d['image'],\n mdl_input_dims=kwargs['mdl_input_dims'],\n is_training=is_training),\n 'gx': mnist_gx(d['image'],\n mdl_input_dims=kwargs['mdl_input_dims'],\n is_training=is_training,\n sample_repeats=kwargs['num_repeats']),\n 'label': d['label']},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n else:\n raise Exception('Unsupported data set: ' + info.name)\n\n\ndef configure_data_set(ds, info, batch_size, is_training, **kwargs):\n \"\"\"\n :param ds: TensorFlow data set object\n :param info: TensorFlow DatasetInfo object\n :param batch_size: batch size\n :param is_training: indicator to pre-processing function\n :return: a configured TensorFlow data set object\n \"\"\"\n # enable shuffling and repeats\n ds = ds.shuffle(10 * batch_size, reshuffle_each_iteration=True).repeat(1)\n\n # batch the data before pre-processing\n ds = ds.batch(batch_size)\n\n # pre-process the data set\n with tf.device('/cpu:0'):\n ds = pre_process_data(ds, info, is_training, **kwargs)\n\n # enable prefetch\n ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n return ds\n\n\ndef load(data_set_name, **kwargs):\n \"\"\"\n :param data_set_name: data set name--call tfds.list_builders() for options\n :return:\n train_ds: TensorFlow Dataset object for the training data\n test_ds: TensorFlow Dataset object for the testing data\n info: data set info object\n \"\"\"\n # get data and its info\n ds, info = tfds.load(name=data_set_name, split=tfds.Split.ALL, with_info=True)\n\n # configure the data sets\n if 'train' in info.splits:\n train_ds = configure_data_set(ds=ds, info=info, is_training=True, **kwargs)\n else:\n train_ds = None\n if 'test' in info.splits:\n test_ds = configure_data_set(ds=ds, info=info, is_training=False, **kwargs)\n else:\n test_ds = None\n\n return train_ds, test_ds, info\n"
] | [
[
"tensorflow.device",
"tensorflow.image.random_brightness",
"tensorflow.transpose",
"tensorflow.image.random_contrast",
"tensorflow.image.random_hue",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.image.random_saturation",
"tensorflow.map_fn",
"tensorflow.image.resize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juanhenao21/exact_distributions_financial | [
"02eb058e5f963fbccb9029aae3fb6e15def7a93a"
] | [
"project/exact_distributions_correlation/exact_distributions_correlation_analysis.py"
] | [
"\"\"\"Exact distributions correlation analysis module.\n\nThe functions in the module compute the returns and the aggregated returns of\nfinancial time series.\n\nThis script requires the following modules:\n * pickle\n * typing\n * numpy\n * pandas\n * exact_distributions_correlation_tools\n\nThe module contains the following functions:\n * returns_data - computes the returns of the time series.\n * aggregated_dist_returns_market_data - computes the aggregated\n distribution of returns for a market.\n * main - the main function of the script.\n\n..moduleauthor:: Juan Camilo Henao Londono <www.github.com/juanhenao21>\n\"\"\"\n\n# -----------------------------------------------------------------------------\n# Modules\n\nimport pickle\nfrom typing import List\n\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\n\nimport exact_distributions_correlation_tools\n\n# -----------------------------------------------------------------------------\n\n\ndef returns_data(dates: List[str], time_step: str) -> None:\n \"\"\"Computes the returns of the time series.\n\n :param dates: List of the interval of dates to be analyzed\n (i.e. ['1980-01', '2020-12']).\n :param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).\n :return: None -- The function saves the data in a file and does not return\n a value.\n \"\"\"\n\n function_name: str = returns_data.__name__\n exact_distributions_correlation_tools.function_header_print_data(\n function_name, dates, time_step\n )\n\n try:\n\n # Load data\n data: pd.DataFrame = pickle.load(\n open(\n f\"../data/original_data/original_data_{dates[0]}_{dates[1]}_step\"\n + f\"_{time_step}.pickle\",\n \"rb\",\n )\n )\n\n returns_df: pd.DataFrame = data.pct_change().dropna()\n\n # Saving data\n exact_distributions_correlation_tools.save_data(\n returns_df, function_name, dates, time_step\n )\n\n except FileNotFoundError as error:\n print(\"No data\")\n print(error)\n print()\n\n\n# -----------------------------------------------------------------------------\n\n\ndef normalized_returns_data(dates: List[str], time_step: str) -> None:\n \"\"\"Normalizes the returns of the time series.\n :param dates: List of the interval of dates to be analyzed\n (i.e. ['1980-01', '2020-12']).\n :param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).\n :return: None -- The function saves the data in a file and does not return\n a value.\n \"\"\"\n\n function_name: str = normalized_returns_data.__name__\n exact_distributions_correlation_tools.function_header_print_data(\n function_name, dates, time_step\n )\n\n try:\n\n # Load data\n data: pd.DataFrame = pickle.load(\n open(\n f\"../data/exact_distributions_correlation/returns_data_{dates[0]}\"\n + f\"_{dates[1]}_step_{time_step}.pickle\",\n \"rb\",\n )\n )\n\n normalized_df: pd.DataFrame = (data - data.mean()) / data.std()\n\n # Saving data\n exact_distributions_correlation_tools.save_data(\n normalized_df, function_name, dates, time_step\n )\n\n except FileNotFoundError as error:\n print(\"No data\")\n print(error)\n print()\n\n\n# ----------------------------------------------------------------------------\n\n\ndef aggregated_dist_returns_market_data(dates: List[str], time_step: str) -> None:\n \"\"\"Computes the aggregated distribution of returns for a market.\n\n :param dates: List of the interval of dates to be analyzed\n (i.e. ['1980-01', '2020-12']).\n :param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).\n :return: None -- The function saves the data in a file and does not return\n a value.\n \"\"\"\n\n function_name: str = aggregated_dist_returns_market_data.__name__\n exact_distributions_correlation_tools.function_header_print_data(\n function_name, dates, time_step\n )\n\n try:\n\n # Load data\n returns_vals: pd.DataFrame = pickle.load(\n open(\n f\"../data/exact_distributions_correlation/normalized_returns_data\"\n + f\"_{dates[0]}_{dates[1]}_step_{time_step}.pickle\",\n \"rb\",\n )\n )\n\n corr: pd.DataFrame = returns_vals.corr()\n # eig_vec: eigenvector, eig_val: eigenvalues\n eig_val, eig_vec = np.linalg.eig(corr)\n\n # rot: rotation, scal: scaling\n rot, scale = eig_vec, np.diag(1 / np.sqrt(eig_val))\n # trans: transformation matrix\n # trans = rot . scal\n trans = rot.dot(scale)\n\n trans_returns: pd.DataFrame = returns_vals.dot(trans)\n trans_returns.columns = returns_vals.columns\n\n one_col: List[pd.Series] = []\n\n for col in trans_returns.columns:\n\n one_col.append(trans_returns[col])\n\n agg_returns = pd.concat(one_col, ignore_index=True)\n\n print(f\"Std. Dev. {dates} = {agg_returns.std()}\")\n\n # Saving data\n exact_distributions_correlation_tools.save_data(\n agg_returns, function_name, dates, time_step\n )\n\n del returns_vals\n del trans_returns\n del agg_returns\n del one_col\n\n except FileNotFoundError as error:\n print(\"No data\")\n print(error)\n print()\n\n\n# ----------------------------------------------------------------------------\n\n\ndef main() -> None:\n \"\"\"The main function of the script.\n\n The main function is used to test the functions in the script.\n\n :return: None.\n \"\"\"\n\n\n# -----------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.linalg.eig",
"pandas.concat",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ns-rse/spym | [
"5356d97d6baf774a3bdd8c03b436052b8d74dbd0"
] | [
"spym/process/level.py"
] | [
"import numpy as np\n\nclass Level():\n ''' Level.\n\n '''\n\n def __init__(self, spym_instance):\n self._spym = spym_instance\n\n def fixzero(self, **kwargs):\n ''' Add a constant to all the data to move the minimum (or the mean value) to zero.\n\n Args:\n to_mean: bool, optional. If true move mean value to zero, if false move mimimum to zero (default).\n\n '''\n\n self._spym._dr.data = fixzero(self._spym._dr.data, **kwargs)\n\n def plane(self, **kwargs):\n '''Corrects for sample tilting by subtraction of a plane.\n\n '''\n\n if not self._spym._dr.data.ndim == 2:\n print(\"The DataArray is not an image. Abort.\")\n return\n\n self._spym._dr.data, self._spym._bkg = plane(self._spym._dr.data.astype(float), **kwargs)\n\n def align(self, **kwargs):\n '''Align rows.\n\n Args:\n baseline: defines how baselines are estimated; 'mean' (default), 'median', 'poly'.\n axis: axis along wich calculate the baselines.\n poly_degree: polnomial degree if baseline='poly'.\n\n '''\n\n if not self._spym._dr.data.ndim == 2:\n print(\"The DataArray is not an image. Abort.\")\n return\n\n self._spym._dr.data, self._spym._bkg = align(self._spym._dr.data.astype(float), **kwargs)\n\ndef fixzero(image,\n to_mean=False):\n ''' Add a constant to all the data to move the minimum (or the mean value) to zero.\n\n Args:\n image: numpy array.\n to_mean: bool, optional. If true move mean value to zero, if false move mimimum to zero (default).\n\n Returns:\n numpy array.\n\n '''\n \n if to_mean:\n fixed = image - image.mean()\n else:\n fixed = image - image.min()\n\n return fixed\n\ndef plane(image):\n '''Corrects for image tilting by subtraction of a plane.\n\n Args:\n image: 2d numpy array.\n\n Returns:\n flattened image as 2d numpy array.\n\n '''\n\n bkg_x = _poly_bkg(image.mean(axis=0), 1)\n bkg_y = _poly_bkg(image.mean(axis=1), 1)\n\n bkg_xx = np.apply_along_axis(_fill, 1, image, bkg_x)\n bkg_yy = np.apply_along_axis(_fill, 0, image, bkg_y)\n\n bkg = bkg_xx + bkg_yy\n planned = image - bkg\n\n return planned, bkg\n\ndef align(image, baseline='mean', axis=1, poly_degree=2):\n '''Align rows.\n\n Args:\n image: 2d numpy array.\n baseline: defines how baselines are estimated; 'mean' (default), 'median', 'poly'.\n axis: axis along wich calculate the baselines.\n poly_degree: polnomial degree if baseline='poly'.\n\n Returns:\n corrected 2d numpy array.\n\n '''\n\n if baseline == 'mean':\n bkg = np.apply_along_axis(_mean_bkg, axis, image)\n elif baseline == 'median':\n bkg = np.apply_along_axis(_median_bkg, axis, image)\n elif baseline == 'poly':\n bkg = np.apply_along_axis(_poly_bkg, axis, image, poly_degree)\n\n aligned = image - bkg\n\n return aligned, bkg\n\ndef _mean_bkg(line):\n return np.full(line.shape[0], line.mean())\n\ndef _median_bkg(line):\n return np.full(line.shape[0], np.median(line))\n\ndef _poly_bkg(line, poly_degree):\n x = np.linspace(-.5, .5, line.shape[0])\n coefs = np.polyfit(x, line, poly_degree)\n return np.polyval(coefs, x)\n\ndef _fill(line, value):\n return value\n"
] | [
[
"numpy.polyfit",
"numpy.linspace",
"numpy.median",
"numpy.apply_along_axis",
"numpy.polyval"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ford442/oglplu2 | [
"abf1e28d9bcd0d2348121e8640d9611a94112a83"
] | [
"assets/scripts/voronoi-svg.py"
] | [
"#!/usr/bin/python3\n# coding: UTF-8\n# Copyright Matus Chochlik.\n# Distributed under the Boost Software License, Version 1.0.\n# See accompanying file LICENSE_1_0.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt\n\nimport os\nimport sys\nimport math\nimport numpy\nimport random\nimport argparse\nimport multiprocessing\n\n# ------------------------------------------------------------------------------\ndef mix(b, i, f):\n return (1.0-f)*b + f*i\n# ------------------------------------------------------------------------------\ndef inverse_logistic(x):\n eps = 0.001\n return math.log(max(x, eps)) - math.log(max(1.0 - x, eps ))\n# ------------------------------------------------------------------------------\ndef logistic(x):\n return 1.0 / (1.0 + math.exp(-x))\n# ------------------------------------------------------------------------------\ndef sigmoid(x, c):\n return logistic(c * inverse_logistic(x))\n# ------------------------------------------------------------------------------\ndef perpendicular(v1):\n v2 = numpy.empty_like(v1)\n v2[0] = -v1[1]\n v2[1] = v1[0]\n return v2\n\n# ------------------------------------------------------------------------------\ndef set_center(points):\n return sum(points)/len(points)\n\n# ------------------------------------------------------------------------------\ndef segment_point(p1, p2, c):\n return (1-c)*p1 + c*p2;\n\n# ------------------------------------------------------------------------------\ndef segment_midpoint(p1, p2):\n return (p1+p2)*0.5\n\n# ------------------------------------------------------------------------------\ndef segment_normal(p1, p2):\n return perpendicular(p2-p1)\n\n# ------------------------------------------------------------------------------\ndef line_intersect_param(l1, l2):\n d1 = l1[1]\n d2 = l2[1]\n dp = l2[0]-l1[0]\n d2p = perpendicular(d2)\n\n num = numpy.dot(d2p, dp)\n den = numpy.dot(d2p, d1)\n\n if abs(den) > 0.00001:\n return num / den\n return None\n\n# ------------------------------------------------------------------------------\nclass ImageSampler(object):\n # --------------------------------------------------------------------------\n def __init__(self, image, width, height):\n self._im = image\n self._w = width\n self._h = height\n\n # --------------------------------------------------------------------------\n @classmethod\n def from_file(cls, path, width, height):\n import PIL.Image\n image = PIL.Image.open(path).convert(\"RGB\")\n\n if width is None:\n width, unused = image.size\n if height is None:\n unused, height = image.size\n\n if (width, height) != image.size:\n image = image.resize((width, height), PIL.Image.BICUBIC)\n return cls(image, width, height)\n\n # --------------------------------------------------------------------------\n def width(self):\n return self._w\n\n # --------------------------------------------------------------------------\n def height(self):\n return self._h\n\n # --------------------------------------------------------------------------\n def get_pixel(self, x, y):\n x = max(min(x, self._w-1), 0)\n y = max(min(y, self._h-1), 0)\n c0, c1, c2 = self._im.getpixel((x, y))\n\n return (c0/255.0, c1/255.0, c2/255.0)\n\n # --------------------------------------------------------------------------\n def converted(self, mode):\n return ImageSampler(self._im.convert(mode), self._w, self._h)\n\n# ------------------------------------------------------------------------------\nclass NoImageSampler(object):\n # --------------------------------------------------------------------------\n def __init__(self):\n pass\n\n # --------------------------------------------------------------------------\n def get_pixel(self, x, y):\n return (0.0, 0.0, 0.0)\n\n # --------------------------------------------------------------------------\n def converted(self, mode):\n return self\n\n# ------------------------------------------------------------------------------\nclass RandomGenerator(object):\n\n # --------------------------------------------------------------------------\n def __init__(self, mrg, rrg):\n self._mrg = mrg\n self._rrg = rrg\n\n # --------------------------------------------------------------------------\n def get(self, rim):\n if rim:\n try:\n return self._rrg.random()\n except:\n pass\n return self._mrg.random()\n\n# ------------------------------------------------------------------------------\nclass Randomized(object):\n # --------------------------------------------------------------------------\n def _get_rng0(self):\n try:\n return self.rng0\n except:\n self.rng0 = random.Random(self._mid_seed)\n return self.rng0\n\n # --------------------------------------------------------------------------\n def _mid_rng(self):\n import random\n\n if self._mid_seed is None:\n import time\n try: return random.SystemRandom()\n except: return random.Random(time.time())\n else:\n return random.Random(self._get_rng0().randrange(0, sys.maxsize))\n\n # --------------------------------------------------------------------------\n def _rim_rng(self):\n if self._rim_seed is not None:\n return random.Random(self._rim_seed)\n return None\n\n # --------------------------------------------------------------------------\n def get_rng(self):\n return RandomGenerator(self._mid_rng(), self._rim_rng())\n\n # --------------------------------------------------------------------------\n def __init__(self, options):\n self._mid_seed = options.seed\n self._rim_seed = options.rim_seed\n\n# ------------------------------------------------------------------------------\nclass RandomCellValues(Randomized):\n # --------------------------------------------------------------------------\n def _gen_values(self, w, h, transformable):\n\n rc = self.get_rng()\n\n cell_data = list()\n for y in range(h):\n r = list()\n for x in range(w):\n rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h\n r.append(rc.get(rim))\n cell_data.append(r)\n\n if transformable:\n r = range(int(w/2)+1)\n rv = [rc.get(True) for i in r]\n for i in r:\n v = 0.5 + (rv[i]-0.5)*0.75\n\n cell_data[i][0] = v\n cell_data[h-i-1][0] = v\n cell_data[i][w-1] = v\n cell_data[h-i-1][w-1] = v\n cell_data[0][i] = v\n cell_data[0][w-i-1] = v\n cell_data[h-1][i] = v\n cell_data[h-1][w-i-1] = v\n return cell_data\n\n # --------------------------------------------------------------------------\n def __init__(self, options, w, h):\n Randomized.__init__(self, options)\n self._values = self._gen_values(w, h, options.transformable)\n\n # --------------------------------------------------------------------------\n def get(self, x, y):\n return self._values[y][x]\n\n# ------------------------------------------------------------------------------\nclass RandomCellOffsets(Randomized):\n # --------------------------------------------------------------------------\n def _gen_offsets(self, w, h, transformable):\n\n rx = self.get_rng()\n ry = self.get_rng()\n\n cell_data = list()\n for y in range(h):\n row = list()\n for x in range(w):\n rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h\n row.append((rx.get(rim), ry.get(rim)))\n cell_data.append(row)\n\n if transformable:\n r = range(int(w/2)+1)\n rv = [(rx.get(True), ry.get(True)) for i in r]\n for i in r:\n xo, yo = rv[i]\n l = 0.8\n cell_data[i][0] = (l*xo, yo)\n cell_data[h-i-1][0] = (l*xo, 1.0-yo)\n cell_data[i][w-1] = (1.0-l*xo, 1.0-yo)\n cell_data[h-i-1][w-1] = (1.0-l*xo, yo)\n cell_data[0][i] = (xo, l*yo)\n cell_data[0][w-i-1] = (1.0-xo, l*yo)\n cell_data[h-1][i] = (1.0-xo, 1.0-l*yo)\n cell_data[h-1][w-i-1] = (xo, 1.0-l*yo)\n return cell_data\n # --------------------------------------------------------------------------\n def __init__(self, options, w, h):\n Randomized.__init__(self, options)\n self._offsets = self._gen_offsets(w, h, options.transformable)\n\n # --------------------------------------------------------------------------\n def get(self, x, y):\n return self._offsets[y][x]\n\n# ------------------------------------------------------------------------------\nclass ImageContourCellOffsets(object):\n # --------------------------------------------------------------------------\n def _gen_offsets(self, im, bg, w, h):\n\n def _distmod(x, y):\n d = abs(x - y)\n return d if d < 0.5 else 1.0-d\n\n kernel = [\n (-1, -1),\n ( 0, -1),\n ( 1, -1),\n (-1, 0),\n ( 1, 0),\n (-1, 1),\n ( 0, 1),\n ( 1, 1)\n ]\n kn = 1.0/(len(kernel))\n\n\n cell_data = list()\n for y in range(h):\n row = list()\n for x in range(w):\n nx = 0.0\n ny = 0.0\n\n dispx = 0.0\n dispy = 0.0\n\n h, s, v = im.get_pixel(x, y)\n for ox, oy in kernel:\n oh, os, ov = im.get_pixel(x+ox, y+oy)\n dh = _distmod(h, oh)\n ds = s - os\n dv = v - ov\n adh = abs(dh)\n ads = abs(ds)\n adv = abs(dv)\n dw = dv if adv > ads else ds if ads > adh else dh\n vx, vy = ox, oy\n vl = math.sqrt(vx*vx + vy*vy)\n vx /= vl\n vy /= vl\n nx += vx*dw\n ny += vy*dw\n dispx += nx*nx\n dispy += ny*ny\n\n nx = nx*kn\n ny = ny*kn\n dispx = math.sqrt(dispx)*kn\n dispy = math.sqrt(dispy)*kn\n dispw = sigmoid(\n math.sqrt(\n max(abs(nx), abs(ny), abs(dispx-dispy))\n ),\n 2.5\n )\n nx = 0.5 + 0.5*nx\n ny = 0.5 + 0.5*ny\n bx, by = bg.get(x, y)\n row.append((mix(bx, nx, dispw), mix(by, ny, dispw)))\n\n cell_data.append(row)\n return cell_data\n # --------------------------------------------------------------------------\n def __init__(self, options, bg, w, h):\n self._offsets = self._gen_offsets(\n options.image.converted(\"HSV\"),\n bg,\n w, h)\n\n # --------------------------------------------------------------------------\n def get(self, x, y):\n return self._offsets[y][x]\n\n# ------------------------------------------------------------------------------\nclass HoneycombXCellOffsets(object):\n # --------------------------------------------------------------------------\n def __init__(self, options, bg, w, h):\n self._fact_x = 0.8\n self._fact_y = 0.9\n self._bg = bg\n # --------------------------------------------------------------------------\n def get(self, x, y):\n hx, hy = (0.5, 0.0 if x % 2 == 0 else 0.5)\n bx, by = self._bg.get(x, y)\n return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))\n# ------------------------------------------------------------------------------\nclass HoneycombYCellOffsets(object):\n # --------------------------------------------------------------------------\n def __init__(self, options, bg, w, h):\n self._fact_x = 0.9\n self._fact_y = 0.8\n self._bg = bg\n # --------------------------------------------------------------------------\n def get(self, x, y):\n hx, hy = (0.0 if y % 2 == 0 else 0.5, 0.5)\n bx, by = self._bg.get(x, y)\n return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))\n# ------------------------------------------------------------------------------\nclass VoronoiArgumentParser(argparse.ArgumentParser):\n # --------------------------------------------------------------------------\n def _nonnegative_int(self, x):\n try:\n i = int(x)\n assert i > 0\n return i\n except:\n self.error(\"`%s' is not a positive integer value\" % str(x))\n # --------------------------------------------------------------------------\n def __init__(self, **kw):\n argparse.ArgumentParser.__init__(self, **kw)\n\n self.add_argument(\n 'output',\n nargs='?',\n type=argparse.FileType('w'),\n default=sys.stdout\n )\n\n self.add_argument(\n '--log', '-l',\n type=argparse.FileType('w'),\n default=sys.stderr\n )\n\n self.add_argument(\n '--jobs', '-j',\n dest=\"job_count\",\n type=self._nonnegative_int,\n action=\"store\",\n default=multiprocessing.cpu_count()\n )\n\n self.add_argument(\n '--x-cells', '-X',\n type=self._nonnegative_int,\n action=\"store\",\n default=None\n )\n\n self.add_argument(\n '--y-cells', '-Y',\n type=self._nonnegative_int,\n action=\"store\",\n default=None\n )\n\n self.add_argument(\n '--width', '-W',\n type=self._nonnegative_int,\n action=\"store\",\n default=512\n )\n\n self.add_argument(\n '--height', '-H',\n type=self._nonnegative_int,\n action=\"store\",\n default=512\n )\n\n self.add_argument(\n '--units', '-U',\n action=\"store\",\n default=\"px\"\n )\n\n self.add_argument(\n '--stroke-width', '-s',\n type=float,\n action=\"store\",\n default=0.5\n )\n\n self.add_argument(\n '--value-low', '-vl',\n type=float,\n action=\"store\",\n default=0.05\n )\n\n self.add_argument(\n '--value-high', '-vh',\n type=float,\n action=\"store\",\n default=0.95\n )\n\n self.add_argument(\n '--cell-z-coord', '-cz',\n type=float,\n action=\"store\",\n default=0.0\n )\n\n self.add_argument(\n '--scale', '-S',\n type=float,\n action=\"store\",\n default=0.9\n )\n\n self.add_argument(\n '--scale-mode', '-Q',\n type=str,\n choices=[\"constant\", \"linear\", \"sqrt\", \"pow2\", \"exp\", \"sigmoid\"],\n action=\"store\",\n default=\"constant\"\n )\n\n self.add_argument(\n '--seed', '-rs',\n type=float,\n action=\"store\",\n default=None\n )\n\n self.add_argument(\n '--rim-seed', '-Rs',\n type=float,\n action=\"store\",\n default=None\n )\n\n self.add_argument(\n '--transformable', '-T',\n action=\"store_true\",\n default=False\n )\n\n self.add_argument(\n '--color-mode', '-M',\n type=str,\n choices=[\"grayscale\", \"cell-coord\", \"image-rgb\"],\n action=\"store\",\n default=\"grayscale\"\n )\n\n self.add_argument(\n '--cell-mode', '-C',\n type=str,\n choices=[\"full\", \"scaled\", \"flagstone\",\"pebble\", \"worley\"],\n action=\"store\",\n default=\"full\"\n )\n\n self.add_argument(\n '--offs-mode', '-O',\n type=str,\n choices=[\"default\", \"honeycomb-x\", \"honeycomb-y\"],\n action=\"store\",\n default=\"default\"\n )\n\n self.add_argument(\n '--image', '-i',\n dest=\"image_path\",\n type=os.path.realpath,\n action=\"store\",\n default=None\n )\n\n self.add_argument(\n '--verbose', '-v',\n action=\"store_true\",\n default=False\n )\n # --------------------------------------------------------------------------\n def process_parsed_options(self, options):\n if options.transformable:\n if options.width != options.height:\n self.error(\"width and height must be the same in transformable mode\")\n if options.x_cells != options.y_cells:\n self.error(\"X-cells and Y-cells must be the same in transformable mode\")\n\n if options.image_path is not None:\n options.image = ImageSampler.from_file(\n options.image_path,\n options.x_cells,\n options.y_cells\n )\n\n if options.x_cells is None:\n options.x_cells = options.image.width()\n if options.y_cells is None:\n options.y_cells = options.image.height()\n else:\n options.image = NoImageSampler()\n if options.x_cells is None:\n options.x_cells = 32\n if options.y_cells is None:\n options.y_cells = 32\n\n if options.cell_mode in [\"worley\"]:\n options.need_neighbors = True\n options.job_count = 1\n else:\n options.need_neighbors = False\n\n return options\n # --------------------------------------------------------------------------\n def parse_args(self):\n return self.process_parsed_options(\n argparse.ArgumentParser.parse_args(self)\n )\n\n# ------------------------------------------------------------------------------\ndef make_argument_parser():\n return VoronoiArgumentParser(\n prog=\"voronoi-svg\",\n description=\"\"\"\n Utility annotating lines read from standard input\n \"\"\"\n )\n\n# ------------------------------------------------------------------------------\nclass Renderer(object):\n # --------------------------------------------------------------------------\n def grayscale_color_str(self, v):\n c = \"%02x\" % int(255*v)\n return \"#\"+3*c\n\n # --------------------------------------------------------------------------\n def cell_offset(self, x, y):\n cy = (y+self.y_cells)%self.y_cells\n cx = (x+self.x_cells)%self.x_cells\n return self.cell_offsets.get(cx, cy)\n\n # --------------------------------------------------------------------------\n def cell_value(self, x, y):\n cy = (y+self.y_cells)%self.y_cells\n cx = (x+self.x_cells)%self.x_cells\n return self.cell_values.get(cx, cy)\n\n # --------------------------------------------------------------------------\n def cell_grayscale_color(self, x, y):\n cv = self.cell_value(x, y)\n v = self.value_low + cv*(self.value_high-self.value_low)\n return self.grayscale_color_str(v)\n\n # --------------------------------------------------------------------------\n def cell_coord_color(self, x, y):\n x = (x + self.x_cells) % self.x_cells\n y = (y + self.y_cells) % self.y_cells\n\n r = int((256*x)/self.x_cells)\n g = int((256*y)/self.y_cells)\n b = int((256*self.cell_z_coord))\n\n return \"#%02x%02x%02x\" % (r, g, b)\n\n # --------------------------------------------------------------------------\n def cell_image_color(self, x, y):\n r, g, b = self.image.get_pixel(x, y)\n\n return \"#%02x%02x%02x\" % (int(r*255), int(g*255), int(b*255))\n\n # --------------------------------------------------------------------------\n def cell_gradient_id(self, x, y, i, j):\n s = \"grad%d_%d\" % (\n (y+3) * (self.x_cells + 6) + (x+3),\n (y+j+3) * (self.x_cells + 6) + (x+i+3)\n )\n return s\n\n # --------------------------------------------------------------------------\n def cell_scale(self, x, y):\n coef = 1.0\n if self.scale_mode == \"linear\":\n coef = self.cell_value(x, y)\n elif self.scale_mode == \"sqrt\":\n coef = math.sqrt(self.cell_value(x, y))\n elif self.scale_mode == \"pow2\":\n coef = math.pow(self.cell_value(x, y), 2)\n elif self.scale_mode == \"exp\":\n coef = math.exp(self.cell_value(x, y)) / math.exp(1)\n elif self.scale_mode == \"sigmoid\":\n coef = 0.5 - 0.5*math.cos(self.cell_value(x, y)*math.pi)\n return self.scale * coef\n\n # --------------------------------------------------------------------------\n def full_cell_element_str(self, x, y, unused, corners, offs):\n clist = [\"%.3f %.3f\" % (c[0], c[1]) for c in corners]\n pathstr = \"M\"+\" L\".join(clist)+\" Z\"\n yield \"\"\"\n <path d=\"%(def)s\" stroke=\"%(color)s\" fill=\"%(color)s\"/>\\n\"\"\" % {\n \"def\": pathstr,\n \"color\": self.cell_color(x, y)\n }\n\n # --------------------------------------------------------------------------\n def scaled_cell_element_str(self, x, y, center, corners, offs):\n m = set_center(corners)\n newcorners = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]\n yield self.full_cell_element_str(x, y, center, newcorners);\n\n # --------------------------------------------------------------------------\n def flagstone_cell_element_str(self, x, y, center, corners, offs):\n zcorners = zip(corners, corners[1:] + [corners[0]])\n c = self.cell_value(x, y)\n newcorners = [segment_point(a, b, c) for (a, b) in zcorners]\n yield self.scaled_cell_element_str(x, y, center, newcorners);\n\n # --------------------------------------------------------------------------\n def pebble_cell_element_str(self, x, y, center, corners, offs):\n m = set_center(corners)\n apoints = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]\n bpoints = apoints[1:] + [apoints[0]]\n c = self.cell_value(x, y)\n zpoints = zip(apoints, bpoints)\n cpoints = [segment_point(a, b, c) for (a, b) in zpoints]\n dpoints = cpoints[1:] + [cpoints[0]]\n\n zpoints = zip(bpoints, dpoints)\n\n cfmt = lambda c : \"%.3f %.3f\" % (c[0], c[1])\n\n clist = [\"%s, %s\" % (cfmt(b), cfmt(d)) for (b, d) in zpoints]\n pathstr = \"M%s Q\" % cfmt(cpoints[0])+\" Q\".join(clist)+\" Z\"\n yield \"\"\"<path d=\"%(def)s\" stroke=\"%(color)s\" fill=\"%(color)s\"/>\\n\"\"\" % {\n \"def\": pathstr,\n \"color\": self.cell_color(x, y)\n }\n\n # --------------------------------------------------------------------------\n def worley_cell_element_str(self, x, y, center, corners, offs):\n n = len(corners)\n for t in range(n):\n i, j = offs[t]\n verts = (center, corners[t], corners[(t+1)%n])\n clist = [\"%.3f %.3f\" % (v[0], v[1]) for v in verts]\n pathstr = \"M\"+\" L\".join(clist)+\" Z\"\n yield \"\"\"<path d=\"%(def)s\" stroke=\"url(#%(gref)s)\" fill=\"url(#%(gref)s)\"/>\\n\"\"\" % {\n \"def\": pathstr,\n \"gref\": self.cell_gradient_id(x, y, i, j)\n }\n\n # --------------------------------------------------------------------------\n def __init__(self):\n\n useropts = make_argument_parser().parse_args()\n\n for k, v in useropts.__dict__.items():\n self.__dict__[k] = v\n\n if self.color_mode == \"grayscale\":\n self.cell_color = lambda x, y: self.cell_grayscale_color(x, y)\n elif self.color_mode == \"cell-coord\":\n self.cell_color = lambda x, y: self.cell_coord_color(x, y)\n elif self.color_mode == \"image-rgb\":\n self.cell_color = lambda x, y: self.cell_image_color(x, y)\n\n if self.cell_mode == \"full\":\n self.cell_element_str = self.full_cell_element_str\n elif self.cell_mode == \"scaled\":\n self.cell_element_str = self.scaled_cell_element_str\n elif self.cell_mode == \"flagstone\":\n self.cell_element_str = self.flagstone_cell_element_str\n elif self.cell_mode == \"pebble\":\n self.cell_element_str = self.pebble_cell_element_str\n elif self.cell_mode == \"worley\":\n self.cell_element_str = self.worley_cell_element_str\n\n self.cell_values = RandomCellValues(\n self,\n self.x_cells,\n self.y_cells\n )\n\n rco = RandomCellOffsets(\n self,\n self.x_cells,\n self.y_cells\n )\n\n if self.offs_mode == \"honeycomb-x\":\n self.cell_offsets = HoneycombXCellOffsets(\n self,\n rco,\n self.x_cells,\n self.y_cells\n )\n elif self.offs_mode == \"honeycomb-y\":\n self.cell_offsets = HoneycombYCellOffsets(\n self,\n rco,\n self.x_cells,\n self.y_cells\n )\n else:\n self.cell_offsets = ImageContourCellOffsets(\n self,\n rco,\n self.x_cells,\n self.y_cells\n )\n\n self.values = dict()\n self.values[\"width\"] = self.width\n self.values[\"height\"] = self.height\n self.values[\"wunit\"] = self.units\n self.values[\"hunit\"] = self.units\n\n self.cell_fmt = \"%%%dd %%%dd\\n\" % (\n int(math.log10(self.x_cells)+1),\n int(math.log10(self.y_cells)+1)\n )\n\n# ------------------------------------------------------------------------------\ndef cell_world_coord(renderer, x, y):\n\n c = renderer.cell_offset(x, y)\n return numpy.array((\n (x+c[0])*(renderer.width/renderer.x_cells),\n (y+c[1])*(renderer.height/renderer.y_cells)\n ))\n\n# ------------------------------------------------------------------------------\ndef cell_value(renderer, x, y):\n return renderer.get_value(x, y)\n\n# ------------------------------------------------------------------------------\ndef cell_color(renderer, x, y):\n return grayscalestr(\n renderer.value_low+\n cell_value(renderer, x, y)*\n (renderer.value_high-renderer.value_low)\n )\n\n# ------------------------------------------------------------------------------\ndef offs_cell_world_coord(renderer, x, y, o):\n return cell_world_coord(renderer, x+o[0], y+o[1])\n\n# ------------------------------------------------------------------------------\ndef make_cell(renderer, x, y):\n\n owc = cell_world_coord(renderer, x, y)\n\n offsets = []\n\n for j in range(-2, 3):\n for i in range(-2, 3):\n if j != 0 or i != 0:\n offsets.append((i, j))\n\n loffs = len(offsets)\n cuts = []\n\n for o in offsets:\n cwc = offs_cell_world_coord(renderer, x, y, o)\n\n sm = segment_midpoint(owc, cwc)\n sn = segment_normal(owc, cwc)\n cuts.append((sm, sn))\n\n assert loffs == len(cuts)\n\n intersections = []\n\n for cj in range(loffs):\n for ci in range(cj+1, loffs):\n t = line_intersect_param(cuts[cj], cuts[ci])\n if t is not None:\n intersections.append((cuts[cj][0]+cuts[cj][1]*t, set([ci, cj])))\n\n corners_and_cuts = []\n\n for isc, cus in intersections:\n seg = (owc, isc-owc)\n eps = 0.001\n skip = False\n\n for cut in cuts:\n t = line_intersect_param(seg, cut)\n if t is not None and t >= 0 and t < 1-eps:\n skip = True\n break\n\n if not skip:\n corners_and_cuts.append((isc, cus))\n\n def corner_angle(p):\n v = p[0] - owc\n return math.atan2(v[1], v[0])\n\n sorted_corners_and_cuts = sorted(corners_and_cuts, key=corner_angle)\n\n corners = []\n neighbors = []\n\n caclen = len(sorted_corners_and_cuts)\n for c in range(caclen):\n co0, cu0 = sorted_corners_and_cuts[c]\n co1, cu1 = sorted_corners_and_cuts[(c+1)%caclen]\n cu = cu0.intersection(cu1)\n\n corners.append(co0)\n if renderer.need_neighbors:\n assert len(cu) == 1\n neighbors.append(offsets[cu.pop()])\n\n if renderer.need_neighbors:\n assert len(corners) == len(neighbors)\n\n return owc, corners, neighbors\n \n# ------------------------------------------------------------------------------\ndef do_make_cell(renderer, job, output_lock):\n w = renderer.x_cells + 2\n h = renderer.y_cells + 2\n k = job\n n = w * h\n\n res = []\n log = []\n\n def _flush(res, log):\n r = str().join(res)\n if renderer.verbose:\n l = str().join(log)\n try:\n output_lock.acquire()\n renderer.output.write(r)\n if renderer.verbose:\n renderer.log.write(l)\n finally:\n output_lock.release()\n return ([], [])\n\n try:\n while k < n:\n y = int(k / w) - 1\n x = int(k % w) - 1\n\n center, corners, offs = make_cell(renderer, x, y)\n for svg_str in renderer.cell_element_str(x, y, center, corners, offs):\n res.append(svg_str)\n if renderer.verbose:\n log.append(renderer.cell_fmt % (x, y))\n else:\n log.append(None)\n\n if len(res) >= renderer.job_count:\n res, log = _flush(res, log)\n\n k += renderer.job_count\n except KeyboardInterrupt:\n pass\n\n _flush(res, log)\n\n# ------------------------------------------------------------------------------\ndef make_gradients(renderer):\n w = renderer.x_cells\n h = renderer.y_cells\n\n grad_fmt = \"\"\"<linearGradient gradientUnits=\"userSpaceOnUse\" id=\"%(gref)s\" \"\"\"+\\\n \"\"\"x1=\"%(x1)f\" y1=\"%(y1)f\" x2=\"%(x2)f\" y2=\"%(y2)f\">\\n\"\"\"\n stop_fmt = \"\"\"<stop offset=\"%(soffs)d%%\" style=\"stop-color:%(color)s\"/>\\n\"\"\"\n\n offsets = []\n for j in range(-2, 3):\n for i in range(-2, 3):\n if j != 0 or i != 0:\n offsets.append((i, j))\n\n for y in range(-1, h+2):\n for x in range(-1, w+2):\n for i, j in offsets:\n cwc = cell_world_coord(renderer, x, y)\n owc = cell_world_coord(renderer, x+i, y+j)\n vec = cwc - owc\n\n renderer.output.write(grad_fmt % {\n \"gref\": renderer.cell_gradient_id(x, y, i, j),\n \"x1\": cwc[0],\n \"y1\": cwc[1],\n \"x2\": owc[0],\n \"y2\": owc[1] \n })\n if renderer.cell_mode == \"worley\":\n renderer.output.write(stop_fmt % {\n \"soffs\": 0.0,\n \"color\": \"#%(r)02x%(g)02x%(b)02x%(a)02x\" % {\n \"r\": int(255*float((x+w) % w)/w),\n \"g\": int(255*float((y+h) % h)/h),\n \"a\": int(255*renderer.cell_value(x, y)),\n \"b\": 255\n }\n })\n renderer.output.write(stop_fmt % {\n \"soffs\": 50.0,\n \"color\": \"#%(r)02x%(g)02x%(b)02x%(a)02x\" % {\n \"r\": int(255*float((x+w) % w)/w),\n \"g\": int(255*float((y+h) % h)/h),\n \"a\": int(255*renderer.cell_value(x, y)),\n \"b\": 0\n }\n })\n renderer.output.write(\"\"\"</linearGradient>\\n\"\"\")\n# ------------------------------------------------------------------------------\ndef print_svg(renderer):\n renderer.output.write(\"\"\"<?xml version=\"1.0\" encoding=\"utf8\"?>\\n\"\"\")\n renderer.output.write(\"\"\"<svg xmlns=\"http://www.w3.org/2000/svg\"\n xmlns:svg=\"http://www.w3.org/2000/svg\"\n width=\"%(width)s%(wunit)s\" height=\"%(height)s%(hunit)s\"\n viewBox=\"0 0 %(width)s %(height)s\"\n version=\"1.1\"\n contentScriptType=\"text/ecmascript\"\n contentStyleType=\"text/css\"\\n>\\n\"\"\" % renderer.values)\n renderer.output.write(\n \"\"\"<g class=\"voronoi\" stroke-width=\"%(stroke_width)f\">\\n\"\"\" % {\n \"stroke_width\": renderer.stroke_width\n }\n )\n\n renderer.output.write(\"<defs>\\n\")\n if renderer.cell_mode in [\"worley\"]:\n make_gradients(renderer)\n renderer.output.write(\"</defs>\\n\")\n renderer.output.flush()\n\n try:\n output_lock = multiprocessing.Lock()\n\n def call_do_make_cell(renderer, job, output_lock):\n try:\n do_make_cell(renderer, job, output_lock)\n except Exception:\n sys.stderr.write(\"failed to generate SVG, please retry\\n\")\n raise SystemExit\n\n tasks = []\n for job in range(renderer.job_count):\n t = multiprocessing.Process(\n target=call_do_make_cell,\n args=(renderer, job, output_lock)\n )\n t.start()\n tasks.append(t)\n\n for t in tasks:\n t.join()\n if t.exitcode is not None and t.exitcode != 0:\n return 1\n except KeyboardInterrupt:\n pass\n\n renderer.output.write(\"\"\"\\n\"\"\")\n\n renderer.output.write(\"\"\"</g>\\n\"\"\")\n renderer.output.write(\"\"\"</svg>\\n\"\"\")\n return 0\n\n# ------------------------------------------------------------------------------\ndef main():\n renderer = Renderer()\n sys.exit(print_svg(renderer))\n \n# ------------------------------------------------------------------------------\nif __name__ == \"__main__\": main()\n\n"
] | [
[
"numpy.empty_like",
"numpy.dot",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cocoaaa/TileMani | [
"ca006f201be530af32d7c5dcae03df5daa08359a"
] | [
"tilemani/compute/features.py"
] | [
"import osmnx as ox\nfrom typing import Tuple, Dict\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\n\n\ndef get_total_area(G) -> float:\n \"\"\"Computes the total area (in square meters) of the square maptile of the graph (when it's rasterized)\n G: unprojected (ie.e in lat,lng degree crs)\n\n \"\"\"\n gdf_n, gdf_e = ox.utils_graph.graph_to_gdfs(G)\n gdfproj_n, gdfproj_e = ox.project_gdf(gdf_n), ox.project_gdf(gdf_e)\n node_bounds, edge_bounds = gdfproj_n.total_bounds, gdfproj_e.total_bounds\n total_bounds = (\n min(node_bounds[0], edge_bounds[0]),\n min(node_bounds[1], edge_bounds[1]),\n max(node_bounds[2], edge_bounds[2]),\n max(node_bounds[3], edge_bounds[3])\n )\n\n minx, miny, maxx, maxy = total_bounds\n dx = maxx - minx\n dy = maxy - miny\n total_area = dx * dy\n return total_area\n\n\ndef get_road_area(G, avg_road_radius=3.0):\n \"\"\"\n G: unprojected (ie. in lat,lng degree)\n avg_road_radis: radius of the roads on average, in meters\n \"\"\"\n gdf_nodes, gdf_edges = ox.utils_graph.graph_to_gdfs(G)\n road_geom = ox.project_gdf(gdf_edges).unary_union # Multistring obj in UTM\n buffered_road_geom = road_geom.buffer(avg_road_radius)\n road_area = buffered_road_geom.area\n print('Area of the roads: ', road_area)\n\n return road_area\n\n\ndef compute_road_network_stats(G) -> Dict:\n # compute total area of the area covered by the rastered image of this graph/network\n total_area = get_total_area(G)\n\n # unpack dicts into individiual keys:values\n stats = ox.basic_stats(G, area=total_area)\n for k, count in stats[\"streets_per_node_counts\"].items():\n stats[\"int_{}_count\".format(k)] = count\n for k, proportion in stats[\"streets_per_node_proportion\"].items():\n stats[\"int_{}_prop\".format(k)] = proportion\n\n # delete the no longer needed dict elements\n del stats[\"streets_per_node_counts\"]\n del stats[\"streets_per_node_proportion\"]\n\n # change key named 'n' to 'n_nodes', and 'm' to 'n_edges'\n stats['n_nodes'] = stats.pop('n', None)\n stats['n_edges'] = stats.pop('m', None)\n\n return stats\n\n\ndef get_road_figure_and_nway_proportion(G,\n figsize=(8, 8),\n bgcolor='k',\n **pg_kwargs) -> Tuple[plt.Figure, Dict[int, float]]:\n \"\"\"Given an unprojected graph of road networks,\n show a figure of (i) network graph and (ii) distribution of the streets_per_node_proportions\n\n Other args:\n ----------\n - pg_kwargs:\n -\n \"\"\"\n f = plt.figure(figsize=figsize)\n gs = GridSpec(2, 1, height_ratios=[4, 1])\n ax0 = f.add_subplot(gs[0])\n ax1 = f.add_subplot(gs[1])\n\n # Plot the graph of road network\n ax0.set_facecolor(bgcolor)\n _ = ox.plot_graph(G, ax0, **pg_kwargs)\n\n # Plot bar chart of the distribution of nways\n # todo: add other stats we want to extract\n\n stats = ox.basic_stats(G)\n dist_n_ways = stats['streets_per_node_proportion']\n ax1.bar(dist_n_ways.keys(), dist_n_ways.values(), width=0.2, color='grey')\n ax1.set_xlabel('N ways')\n ax1.set_ylim([0, 1])\n return f, dist_n_ways\n\n\n## todo\n# def show_ntw_ori(lat_deg: float,\n# lng_deg: float,\n# radius_y: float = 804.,\n# radius_x: float = 1222.,\n# network_type: str = 'drive_service',\n# use_extended_stats: bool = False,\n# avg_road_radius=3.0,\n# weight_by_length=True,\n# verbose=False,\n# ) -> plt.Figure: # Tuple[plf.Figure, plt.Axes]:\n# \"\"\"\n# Args\n# ----\n# - lat_deg, lng_deg: center point's lat,lng in degree\n# - radius_y, radius_x: radius in latitudal(y) and longigtudal(x) direction, in meters\n# Use half of y_extent (of the maptile's coverage) for `radius_y`\n# Use half of x_extent for `radius_x`\n# - network_type (str): what kind of network to query from OSM\n# - avg_road_radius (float):\n# \n# \"\"\"\n# center = (lat_deg, lng_deg)\n# radius = max(radius_y, radius_x) # todo: min?\n#\n# # create network from point, inside bounding box of N, S, E, W each `radius` (meter) from point\n# G = ox.graph_from_point(center, dist=radius, dist_type='bbox', network_type=network_type)\n# G_proj = ox.project_graph(G)\n#\n# # Compute the area of roads\n# road_area = get_road_area(G, avg_road_radius=avg_road_radius) # square meters\n#\n# # Compute basic stats\n# stats = ox.basic_stats(G, area=road_area, circuity_dist='gc')\n#\n# # -- Optionally, compute extra, extended network stats, merge them together, and display\n# if use_extended_stats:\n# extended_stats = ox.extended_stats(G, ecc=True, bc=True, cc=True)\n# for key, value in extended_stats.items():\n# stats[key] = value\n# # -- unpack dicts into individiual keys:values\n# for k, count in stats['streets_per_node_counts'].items():\n# stats['int_{}_count'.format(k)] = count\n# for k, proportion in stats['streets_per_node_proportion'].items():\n# stats['int_{}_prop'.format(k)] = proportion\n#\n# # -- delete the no longer needed dict elements\n# del stats['streets_per_node_counts']\n# del stats['streets_per_node_proportion']\n#\n# # load as a pandas dataframe\n# df_stats = pd.DataFrame(pd.Series(stats, name='value'))\n#\n# if verbose:\n# display(df_stats)\n#\n# # calculate edge bearings\n# # weight_by_length = True\n# Gu = ox.add_edge_bearings(ox.get_undirected(G))\n#\n# bearings = {}\n# if weight_by_length:\n# # weight bearings by length (meters)\n# city_bearings = []\n# for u, v, k, d in Gu.edges(keys=True, data=True):\n# city_bearings.extend([d['bearing']] * int(d['length']))\n# b = pd.Series(city_bearings)\n# bearings[(lat_deg, lng_deg)] = pd.concat([b, b.map(reverse_bearing)]).reset_index(drop='True')\n# else:\n# # don't weight bearings, just take one value per street segment\n# b = pd.Series([d['bearing'] for u, v, k, d in Gu.edges(keys=True, data=True)])\n# bearings[(lat_deg, lng_deg)] = pd.concat([b, b.map(reverse_bearing)]).reset_index(drop='True')\n#\n# # breakpoint()\n# # Plot the queries network graph G_proj and polar historgram of bearings in this area\n# f = plt.Figure()\n# ax_g = f.add_subplot(121)\n# ax_g.axis('equal')\n# ox.plot_graph(G_proj, ax=ax_g, node_size=10, node_color='#66cc66')\n#\n# # plot polar histogram\n# ax_polar = f.add_subplot(122, projection=\"polar\")\n# title = f'{lat_deg:.2f}-{lng_deg:.2f}'\n# polar_plot(ax_polar, bearings[(lat_deg, lng_deg)].dropna(), title=title)\n#\n# # add super title and save full image\n# # fig.savefig('images/street-orientations.png', dpi=120, bbox_inches='tight')\n# # plt.show()\n# return f\n#\n# # todo: put two figures side-by-side for each (lat,lng)\n"
] | [
[
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alvarosanz/loadit | [
"dee6b68397040a3c16d9a83ee6cbbadbfd5bff76"
] | [
"loadit/queries.py"
] | [
"from numba import guvectorize\nimport numpy as np\n\n\nnp.seterr(invalid='ignore') # Ignore nan warnings\n\n\n@guvectorize(['(int32[:], int32[:], int32[:, :], int32[:, :])'],\n '(n), (m) -> (n, m), (n, m)',\n target='cpu', nopython=True)\ndef set_index(index0, index1, out0, out1):\n \"\"\"\n Set index columns as a combination of both.\n\n Parameters\n ----------\n index0 : numpy.array\n First index column non-combined (i.e. [10, 20, 30]).\n index1 : numpy.array\n Second index column non-combined (i.e. [1, 2]).\n out0 : numpy.array\n First index column combined (i.e. [10, 10, 20, 20, 30, 30]).\n out1 : numpy.array\n Second index column combined (i.e. [1, 2, 1, 2, 1, 2]).\n \"\"\"\n\n for i in range(len(index0)):\n\n for j in range(len(index1)):\n out0[i, j] = index0[i]\n out1[i, j] = index1[j]\n\n\n@guvectorize(['(float32[:, :], int64[:], boolean, float32[:], int64[:])',\n '(float64[:, :], int64[:], boolean, float64[:], int64[:])'],\n '(n, m), (n), () -> (m), (m)',\n target='cpu', nopython=True)\ndef max_load(array, LIDs, use_previous_agg, out, LIDs_out):\n \"\"\"\n Get maximum load case for each item.\n\n Parameters\n ----------\n array : numpy.array\n Values array (one row for each load case).\n LIDs : numpy.array\n LIDs array.\n use_previous_agg : bool\n Whether to perform aggregation taking into account\n previous aggregation stored at `out` and `LIDs_out` or not.\n out : numpy.array\n Maximum values array.\n LIDs_out : numpy.array\n Maximum LIDs array.\n \"\"\"\n\n for j in range(array.shape[1]):\n\n if not use_previous_agg or array[0, j] > out[j] or np.isnan(out[j]):\n out[j] = array[0, j]\n LIDs_out[j] = LIDs[0]\n\n for i in range(1, array.shape[0]):\n\n for j in range(array.shape[1]):\n\n if array[i, j] > out[j] or np.isnan(out[j]):\n out[j] = array[i, j]\n LIDs_out[j] = LIDs[i]\n\n\n@guvectorize(['(float32[:, :], int64[:], boolean, float32[:], int64[:])',\n '(float64[:, :], int64[:], boolean, float64[:], int64[:])'],\n '(n, m), (n), () -> (m), (m)',\n target='cpu', nopython=True)\ndef min_load(array, LIDs, use_previous_agg, out, LIDs_out):\n \"\"\"\n Get minimum load case for each item.\n\n Parameters\n ----------\n array : numpy.array\n Values array (one row for each load case).\n LIDs : numpy.array\n LIDs array.\n use_previous_agg : bool\n Whether to perform aggregation taking into account\n previous aggregation stored at `out` and `LIDs_out` or not.\n out : numpy.array\n Minimum values array.\n LIDs_out : numpy.array\n Minimum LIDs array.\n \"\"\"\n\n for j in range(array.shape[1]):\n\n if not use_previous_agg or array[0, j] < out[j] or np.isnan(out[j]):\n out[j] = array[0, j]\n LIDs_out[j] = LIDs[0]\n\n for i in range(1, array.shape[0]):\n\n for j in range(array.shape[1]):\n\n if array[i, j] < out[j] or np.isnan(out[j]):\n out[j] = array[i, j]\n LIDs_out[j] = LIDs[i]\n\n\n@guvectorize(['(float32[:, :], int64[:], float32[:], float32[:])',\n '(float64[:, :], int64[:], float64[:], float64[:])'],\n '(n, m), (l), (l) -> (m)',\n target='cpu', nopython=True)\ndef combine(array, indexes, coeffs, out):\n \"\"\"\n Combine load cases.\n\n Parameters\n ----------\n array : numpy.ndarray\n Field values (not combined).\n indexes : numpy.ndarray\n Indexes of LIDs to combine.\n coeffs : numpy.ndarray\n Multiplication coefficients.\n out : numpy.ndarray\n Output argument. Combined field values.\n \"\"\"\n\n for j in range(array.shape[1]):\n out[j] = 0\n\n for i in range(len(indexes)):\n index = indexes[i]\n coeff = coeffs[i]\n\n for j in range(array.shape[1]):\n out[j] += array[index, j] * coeff\n\n\n@guvectorize(['(float32[:, :], float32[:, :], float32[:, :], float32[:, :])',\n '(float64[:, :], float64[:, :], float64[:, :], float64[:, :])'],\n '(n, m), (n, m), (n, m) -> (n, m)',\n target='cpu', nopython=True)\ndef von_mises_2D(sxx, syy, sxy, out):\n\n for i in range(out.shape[0]):\n\n for j in range(out.shape[1]):\n out[i, j] = (sxx[i, j] ** 2 + syy[i, j] ** 2 - sxx[i, j] * syy[i, j] + 3 * sxy[i, j] ** 2) ** 0.5\n\n\n@guvectorize(['(float32[:, :], float32[:, :], float32[:, :], float32[:, :])',\n '(float64[:, :], float64[:, :], float64[:, :], float64[:, :])'],\n '(n, m), (n, m), (n, m) -> (n, m)',\n target='cpu', nopython=True)\ndef max_ppal_2D(sxx, syy, sxy, out):\n\n for i in range(out.shape[0]):\n\n for j in range(out.shape[1]):\n out[i, j] = (sxx[i, j] + syy[i, j]) / 2 + (((sxx[i, j] - syy[i, j]) / 2) ** 2 + sxy[i, j] ** 2) ** 0.5\n\n\n@guvectorize(['(float32[:, :], float32[:, :], float32[:, :], float32[:, :])',\n '(float64[:, :], float64[:, :], float64[:, :], float64[:, :])'],\n '(n, m), (n, m), (n, m) -> (n, m)',\n target='cpu', nopython=True)\ndef min_ppal_2D(sxx, syy, sxy, out):\n\n for i in range(out.shape[0]):\n\n for j in range(out.shape[1]):\n out[i, j] = (sxx[i, j] + syy[i, j]) / 2 - (((sxx[i, j] - syy[i, j]) / 2) ** 2 + sxy[i, j] ** 2) ** 0.5\n\n\n@guvectorize(['(float32[:, :], float32[:, :], float32[:, :], float32[:, :])',\n '(float64[:, :], float64[:, :], float64[:, :], float64[:, :])'],\n '(n, m), (n, m), (n, m) -> (n, m)',\n target='cpu', nopython=True)\ndef max_shear_2D(sxx, syy, sxy, out):\n\n for i in range(out.shape[0]):\n\n for j in range(out.shape[1]):\n out[i, j] = (((sxx[i, j] - syy[i, j]) / 2) ** 2 + sxy[i, j] ** 2) ** 0.5\n\n\n@guvectorize(['(float32[:, :], float32[:], float32[:, :])',\n '(float64[:, :], float64[:], float64[:, :])'],\n '(n, m), (m) -> (n, m)',\n target='cpu', nopython=True)\ndef stress_2D(value, thickness, out):\n\n for i in range(out.shape[0]):\n\n for j in range(out.shape[1]):\n out[i, j] = value[i, j] / thickness[j]\n\n\nquery_functions = {\n 'ELEMENT FORCES - QUAD4 (33)': {\n 'VonMises': [von_mises_2D, ('NX', 'NY', 'NXY')],\n 'MaxPpal': [max_ppal_2D, ('NX', 'NY', 'NXY')],\n 'MinPpal': [min_ppal_2D, ('NX', 'NY', 'NXY')],\n 'MaxShear': [max_shear_2D, ('NX', 'NY', 'NXY')],\n 'sx': [stress_2D, ('NX', 'thickness')],\n 'sy': [stress_2D, ('NY', 'thickness')],\n 'sxy': [stress_2D, ('NXY', 'thickness')],\n 'sVonMises': [von_mises_2D, ('sx', 'sy', 'sxy')],\n 'sMaxPpal': [max_ppal_2D, ('sx', 'sy', 'sxy')],\n 'sMinPpal': [min_ppal_2D, ('sx', 'sy', 'sxy')],\n 'sMaxShear': [max_shear_2D, ('sx', 'sy', 'sxy')],\n },\n 'ELEMENT FORCES - TRIA3 (74)': {\n 'VonMises': [von_mises_2D, ('NX', 'NY', 'NXY')],\n 'MaxPpal': [max_ppal_2D, ('NX', 'NY', 'NXY')],\n 'MinPpal': [min_ppal_2D, ('NX', 'NY', 'NXY')],\n 'MaxShear': [max_shear_2D, ('NX', 'NY', 'NXY')],\n 'sx': [stress_2D, ('NX', 'thickness')],\n 'sy': [stress_2D, ('NY', 'thickness')],\n 'sxy': [stress_2D, ('NXY', 'thickness')],\n 'sVonMises': [von_mises_2D, ('sx', 'sy', 'sxy')],\n 'sMaxPpal': [max_ppal_2D, ('sx', 'sy', 'sxy')],\n 'sMinPpal': [min_ppal_2D, ('sx', 'sy', 'sxy')],\n 'sMaxShear': [max_shear_2D, ('sx', 'sy', 'sxy')],\n },\n}\n\nquery_geometry = {\n 'ELEMENT FORCES - QUAD4 (33)': {\n 'thickness',\n },\n 'ELEMENT FORCES - TRIA3 (74)': {\n 'thickness',\n },\n}\n"
] | [
[
"numpy.isnan",
"numpy.seterr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mohazahran/Detecting-anomalies-in-user-trajectories | [
"e1513905c2ef7b87a5050b36060c4a49006e8b87"
] | [
"scripts/paper-data/plot_figure_mem.py"
] | [
"#-*- coding: utf8\nfrom __future__ import division, print_function\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import rc\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef initialize_matplotlib():\n inches_per_pt = 1.0 / 72.27\n fig_width = 240 * inches_per_pt # width in inches\n fig_height = 160 * inches_per_pt #.4 * fig_width\n \n rc('axes', labelsize=8)\n rc('axes', titlesize=8)\n rc('axes', unicode_minus=False)\n rc('axes', grid=False)\n rc('figure', figsize=(fig_width, fig_height))\n rc('grid', linestyle=':')\n rc('font', family='serif')\n rc('legend', fontsize=8)\n rc('lines', linewidth=.7)\n rc('ps', usedistiller='xpdf')\n rc('text', usetex=True)\n rc('xtick', labelsize=8)\n rc('ytick', labelsize=8)\n\ninitialize_matplotlib()\ndf = pd.read_excel('results_for_figure1.xlsx', sheetname='Figure5')\n\ncolors = {\n 'LFM-1k':'go-',\n 'LFM-G':'ms-',\n 'Bkite':'y*-',\n 'FourSQ':'bD-',\n 'Yoo':'rH-'\n }\n\nfor dset in colors:\n idx = (df['Dataset'] == dset)\n \n x_ax = df[idx]['MEM']\n y_ax = df[idx]['MRR'] \n \n plt.plot(x_ax, y_ax, colors[dset], alpha=.5, markersize=5, label=dset)\n\nax = plt.gca()\nax.tick_params(direction='out', pad=0.3)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\nplt.ylim((0.1, 0.6))\nplt.xlim((0, 6))\nplt.minorticks_off()\nplt.ylabel('Mean Reciprocal Rank (MRR)', labelpad=0)\nplt.xlabel('Burst size', labelpad=0)\nplt.tight_layout(pad=0.2)\nplt.legend(loc='center right', frameon=False, ncol=3)\nplt.savefig('burst.pdf')\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"pandas.read_excel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.minorticks_off",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.rc",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tomtuamnuq/qiskit-optimization | [
"04adb4952f3f42b39d4809338d3ee6e1900f2dfe"
] | [
"qiskit_optimization/algorithms/goemans_williamson_optimizer.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nImplementation of the Goemans-Williamson algorithm as an optimizer.\nRequires CVXPY to run.\n\"\"\"\nimport logging\nfrom typing import Optional, List, Tuple, Union\n\nimport numpy as np\nfrom qiskit.exceptions import MissingOptionalLibraryError\n\nfrom .optimization_algorithm import (\n OptimizationResult,\n OptimizationResultStatus,\n OptimizationAlgorithm,\n SolutionSample,\n)\nfrom ..problems.quadratic_program import QuadraticProgram\nfrom ..problems.variable import Variable\n\ntry:\n import cvxpy as cvx\n from cvxpy import DCPError, DGPError, SolverError\n\n _HAS_CVXPY = True\nexcept ImportError:\n _HAS_CVXPY = False\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GoemansWilliamsonOptimizationResult(OptimizationResult):\n \"\"\"\n Contains results of the Goemans-Williamson algorithm. The properties ``x`` and ``fval`` contain\n values of just one solution. Explore ``samples`` for all possible solutions.\n \"\"\"\n\n def __init__(\n self,\n x: Optional[Union[List[float], np.ndarray]],\n fval: float,\n variables: List[Variable],\n status: OptimizationResultStatus,\n samples: Optional[List[SolutionSample]],\n sdp_solution: Optional[np.ndarray] = None,\n ) -> None:\n \"\"\"\n Args:\n x: the optimal value found in the optimization.\n fval: the optimal function value.\n variables: the list of variables of the optimization problem.\n status: the termination status of the optimization algorithm.\n samples: the solution samples.\n sdp_solution: an SDP solution of the problem.\n \"\"\"\n super().__init__(x, fval, variables, status, samples=samples)\n self._sdp_solution = sdp_solution\n\n @property\n def sdp_solution(self) -> Optional[np.ndarray]:\n \"\"\"\n Returns:\n Returns an SDP solution of the problem.\n \"\"\"\n return self._sdp_solution\n\n\nclass GoemansWilliamsonOptimizer(OptimizationAlgorithm):\n \"\"\"\n Goemans-Williamson algorithm to approximate the max-cut of a problem.\n The quadratic program for max-cut is given by:\n\n max sum_{i,j<i} w[i,j]*x[i]*(1-x[j])\n\n Therefore the quadratic term encodes the negative of the adjacency matrix of\n the graph.\n \"\"\"\n\n def __init__(\n self,\n num_cuts: int,\n sort_cuts: bool = True,\n unique_cuts: bool = True,\n seed: int = 0,\n ):\n \"\"\"\n Args:\n num_cuts: Number of cuts to generate.\n sort_cuts: True if sort cuts by their values.\n unique_cuts: The solve method returns only unique cuts, thus there may be less cuts\n than ``num_cuts``.\n seed: A seed value for the random number generator.\n\n Raises:\n MissingOptionalLibraryError: CVXPY is not installed.\n \"\"\"\n if not _HAS_CVXPY:\n raise MissingOptionalLibraryError(\n libname=\"CVXPY\",\n name=\"GoemansWilliamsonOptimizer\",\n pip_install=\"pip install 'qiskit-optimization[cvxpy]'\",\n )\n super().__init__()\n\n self._num_cuts = num_cuts\n self._sort_cuts = sort_cuts\n self._unique_cuts = unique_cuts\n np.random.seed(seed)\n\n def get_compatibility_msg(self, problem: QuadraticProgram) -> str:\n \"\"\"Checks whether a given problem can be solved with the optimizer implementing this method.\n\n Args:\n problem: The optimization problem to check compatibility.\n\n Returns:\n Returns the incompatibility message. If the message is empty no issues were found.\n \"\"\"\n message = \"\"\n if problem.get_num_binary_vars() != problem.get_num_vars():\n message = (\n f\"Only binary variables are supported, while the total number of variables \"\n f\"{problem.get_num_vars()} and there are {problem.get_num_binary_vars()} \"\n f\"binary variables across them\"\n )\n return message\n\n def solve(self, problem: QuadraticProgram) -> OptimizationResult:\n \"\"\"\n Returns a list of cuts generated according to the Goemans-Williamson algorithm.\n\n Args:\n problem: The quadratic problem that encodes the max-cut problem.\n\n Returns:\n cuts: A list of generated cuts.\n \"\"\"\n self._verify_compatibility(problem)\n\n adj_matrix = self._extract_adjacency_matrix(problem)\n\n try:\n chi = self._solve_max_cut_sdp(adj_matrix)\n except (DCPError, DGPError, SolverError):\n logger.error(\"Can't solve SDP problem\")\n return GoemansWilliamsonOptimizationResult(\n x=[],\n fval=0,\n variables=problem.variables,\n status=OptimizationResultStatus.FAILURE,\n samples=[],\n )\n\n cuts = self._generate_random_cuts(chi, len(adj_matrix))\n\n numeric_solutions = [\n (cuts[i, :], self.max_cut_value(cuts[i, :], adj_matrix)) for i in range(self._num_cuts)\n ]\n\n if self._sort_cuts:\n numeric_solutions.sort(key=lambda x: -x[1])\n\n if self._unique_cuts:\n numeric_solutions = self._get_unique_cuts(numeric_solutions)\n\n numeric_solutions = numeric_solutions[: self._num_cuts]\n samples = [\n SolutionSample(\n x=solution[0],\n fval=solution[1],\n probability=1.0 / len(numeric_solutions),\n status=OptimizationResultStatus.SUCCESS,\n )\n for solution in numeric_solutions\n ]\n\n return GoemansWilliamsonOptimizationResult(\n x=samples[0].x,\n fval=samples[0].fval,\n variables=problem.variables,\n status=OptimizationResultStatus.SUCCESS,\n samples=samples,\n sdp_solution=chi,\n )\n\n def _get_unique_cuts(\n self, solutions: List[Tuple[np.ndarray, float]]\n ) -> List[Tuple[np.ndarray, float]]:\n \"\"\"\n Returns:\n Unique Goemans-Williamson cuts.\n \"\"\"\n\n # Remove symmetry in the cuts to chose the unique ones.\n # Cuts 010 and 101 are symmetric(same cut), so we convert all cuts\n # starting from 1 to start from 0. In the next loop repetitive cuts will be removed.\n for idx, cut in enumerate(solutions):\n if cut[0][0] == 1:\n solutions[idx] = (\n np.array([0 if _ == 1 else 1 for _ in cut[0]]),\n cut[1],\n )\n\n seen_cuts = set()\n unique_cuts = []\n for cut in solutions:\n cut_str = \"\".join([str(_) for _ in cut[0]])\n if cut_str in seen_cuts:\n continue\n\n seen_cuts.add(cut_str)\n unique_cuts.append(cut)\n\n return unique_cuts\n\n @staticmethod\n def _extract_adjacency_matrix(problem: QuadraticProgram) -> np.ndarray:\n \"\"\"\n Extracts the adjacency matrix from the given quadratic program.\n\n Args:\n problem: A QuadraticProgram describing the max-cut optimization problem.\n\n Returns:\n adjacency matrix of the graph.\n \"\"\"\n adj_matrix = -problem.objective.quadratic.coefficients.toarray()\n adj_matrix = (adj_matrix + adj_matrix.T) / 2\n\n return adj_matrix\n\n def _solve_max_cut_sdp(self, adj_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculates the maximum weight cut by generating |V| vectors with a vector program,\n then generating a random plane that cuts the vertices. This is the Goemans-Williamson\n algorithm that gives a .878-approximation.\n\n Returns:\n chi: a list of length |V| where the i-th element is +1 or -1, representing which\n set the it-h vertex is in. Returns None if an error occurs.\n \"\"\"\n num_vertices = len(adj_matrix)\n constraints, expr = [], 0\n\n # variables\n x = cvx.Variable((num_vertices, num_vertices), PSD=True)\n\n # constraints\n for i in range(num_vertices):\n constraints.append(x[i, i] == 1)\n\n # objective function\n expr = cvx.sum(cvx.multiply(adj_matrix, (np.ones((num_vertices, num_vertices)) - x)))\n\n # solve\n problem = cvx.Problem(cvx.Maximize(expr), constraints)\n problem.solve()\n\n return x.value\n\n def _generate_random_cuts(self, chi: np.ndarray, num_vertices: int) -> np.ndarray:\n \"\"\"\n Random hyperplane partitions vertices.\n\n Args:\n chi: a list of length |V| where the i-th element is +1 or -1, representing\n which set the i-th vertex is in.\n num_vertices: the number of vertices in the graph\n\n Returns:\n An array of random cuts.\n \"\"\"\n eigenvalues = np.linalg.eigh(chi)[0]\n if min(eigenvalues) < 0:\n chi = chi + (1.001 * abs(min(eigenvalues)) * np.identity(num_vertices))\n elif min(eigenvalues) == 0:\n chi = chi + 0.00001 * np.identity(num_vertices)\n x = np.linalg.cholesky(chi).T\n\n r = np.random.normal(size=(self._num_cuts, num_vertices))\n\n return (np.dot(r, x) > 0) + 0\n\n @staticmethod\n def max_cut_value(x: np.ndarray, adj_matrix: np.ndarray):\n \"\"\"Compute the value of a cut from an adjacency matrix and a list of binary values.\n\n Args:\n x: a list of binary value in numpy array.\n adj_matrix: adjacency matrix.\n\n Returns:\n float: value of the cut.\n \"\"\"\n cut_matrix = np.outer(x, (1 - x))\n return np.sum(adj_matrix * cut_matrix)\n"
] | [
[
"numpy.dot",
"numpy.random.seed",
"numpy.ones",
"numpy.random.normal",
"numpy.linalg.eigh",
"numpy.identity",
"numpy.linalg.cholesky",
"numpy.outer",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Neharika2089/minerva_analysis | [
"25215a17219070b1433b58439040e2f4fdb715b4"
] | [
"minerva_analysis/server/utils/pre_normalization.py"
] | [
"import numpy as np\n\n\ndef preNormalize(input_csvPath, output_csvPath, skip_columns=[]):\n RAW_DATA = np.genfromtxt(input_csvPath, names=True, dtype=float, delimiter=',')\n marker_list = RAW_DATA.dtype.names\n norm_data = RAW_DATA.view((np.float, len(marker_list)))\n\n # A list of markers to skip normalization\n # markers_notToNorm = ['Field_Row', 'Field_Col', 'CellID', 'X_position','Y_position','Percent_Touching','Number_Neighbors','Neighbor_1','Neighbor_2','Neighbor_3','Neighbor_4','Neighbor_5', 'Eccentricity',\t'Solidity',\t'Extent',\t'EulerNumber',\t'Perimeter',\t'MajorAxisLength',\t'MinorAxisLength',\t'Orientation',\t'X_position',\t'Y_position']\n # A list of markers to skip log10 transform\n # markers_notToLog = ['DAPI1', 'A488b1', 'A555b1'] # skip 'DAPI1', 'A488b1', 'A555b1'\n markers_notToLog = [] # nothing to skip\n\n for marker_id in range(norm_data.shape[1]):\n if marker_list[marker_id] in skip_columns:\n continue\n if marker_list[marker_id] in markers_notToLog:\n # Log10 transform\n norm_data[:, marker_id] = np.log10(norm_data[:, marker_id] + 1)\n print(marker_list[marker_id], 'with log10 transform')\n else:\n print(marker_list[marker_id], 'without log10 transform')\n # Percentile normalization by mapping [0.1%, 99.9%] into [0, 1]\n min_tile, max_tile = np.percentile(norm_data[:, marker_id], [0.1, 99.9])\n\n # norm_data[:, marker_id] =(norm_data[:, marker_id] - min_tile) * (max_tile - min_tile) / (max_tile - min_tile) + min_tile\n norm_data[:, marker_id] = (norm_data[:, marker_id] - min_tile) / (max_tile - min_tile)\n norm_data[:, marker_id] = np.minimum(norm_data[:, marker_id], 1)\n norm_data[:, marker_id] = np.maximum(norm_data[:, marker_id], 0)\n\n with open(output_csvPath, 'w') as f:\n for marker_id, marker_name in enumerate(marker_list):\n f.write(marker_name)\n if marker_id != (len(marker_list) - 1):\n f.write(',')\n f.write('\\n')\n for norm_row in norm_data:\n for elem_id, norm_elem in enumerate(norm_row):\n f.write(str(norm_elem))\n if elem_id != (norm_row.shape[0] - 1):\n f.write(',')\n f.write('\\n')\n\n# input_csvPath = 'Sample_23.csv'\n# output_csvPath = 'Sample_23_norm2.csv'\n# preNormalize(input_csvPath, output_csvPath)\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.percentile",
"numpy.genfromtxt",
"numpy.log10"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheoViel/kaggle_birdcall_identification | [
"2de708b9871cf388f91b9b0a33e738a24cca565d"
] | [
"src/model_zoo/models.py"
] | [
"import torch\nimport torchvision\nimport torch.nn as nn\nimport pretrainedmodels\nimport resnest.torch as resnest_torch\n\nfrom efficientnet_pytorch import EfficientNet\n\nfrom params import DEVICE\n\n\ndef get_model(name, num_classes=1):\n \"\"\"\n Loads a pretrained model. \n Supports ResNest, ResNext-wsl, EfficientNet, ResNext and ResNet.\n\n Arguments:\n name {str} -- Name of the model to load\n\n Keyword Arguments:\n num_classes {int} -- Number of classes to use (default: {1})\n\n Returns:\n torch model -- Pretrained model\n \"\"\"\n if \"resnest\" in name:\n model = getattr(resnest_torch, name)(pretrained=True)\n elif \"wsl\" in name:\n model = torch.hub.load(\"facebookresearch/WSL-Images\", name)\n elif \"resnext\" in name or \"resnet\" in name:\n model = torch.hub.load(\"pytorch/vision:v0.6.0\", name, pretrained=True)\n elif \"efficientnet\" in name:\n model = EfficientNet.from_pretrained(name)\n else:\n raise NotImplementedError\n\n if \"efficientnet\" not in name and \"se\" not in name:\n nb_ft = model.fc.in_features\n del model.fc\n model.fc = nn.Linear(nb_ft, num_classes)\n else:\n nb_ft = model._fc.in_features\n del model._fc\n model._fc = nn.Linear(nb_ft, num_classes)\n\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.hub.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wbrandenburger/MTPIA | [
"02c773ce60b7efd5b15f270f047a6da5a8f00b7e"
] | [
"dl_multi/archive/train_single_task_classification.py"
] | [
"# ===========================================================================\n# train.py ----------------------------------------------------------------\n# ===========================================================================\n\n# import ------------------------------------------------------------------\n# ---------------------------------------------------------------------------\nfrom dl_multi.__init__ import _logger \nimport dl_multi.tftools.tfrecord\nimport dl_multi.tftools.augmentation\n\nimport os\nimport tensorflow as tf\n\n# function ----------------------------------------------------------------\n# ---------------------------------------------------------------------------\ndef train(\n param_log,\n param_batch,\n param_save, \n param_train\n ): \n \n _logger.debug(\"Start training multi task classification and regression model with settings:\\n'param_log':\\t'{}'\\n'param_batch':\\t'{}',\\n'param_save':\\t'{}',\\n'param_train':\\t'{}'\".format(param_log, param_batch, param_save,param_train))\n\n # settings ------------------------------------------------------------\n # -----------------------------------------------------------------------\n\n # Create the log and checkpoint folders if they do not exist\n folder = dl_multi.utils.general.Folder()\n checkpoint = folder.set_folder(**param_train[\"checkpoint\"])\n log_dir = folder.set_folder(**param_log)\n\n img, PLACEHOLDER, truth = dl_multi.tftools.tfrecord.read_tfrecord_queue(tf.train.string_input_producer([param_train[\"tfrecords\"]]))\n\n img = dl_multi.plugin.get_module_task(\"tftools\", param_train[\"input\"][\"method\"], \"tfnormalization\")(img, **param_train[\"input\"][\"param\"])\n truth = dl_multi.plugin.get_module_task(\"tftools\", param_train[\"output\"][\"method\"], \"tfnormalization\")(PLACEHOLDER, **param_train[\"output\"][\"param\"])\n\n img, _, truth = dl_multi.tftools.augmentation.rnd_crop_rotate_90_with_flips_height(img, PLACEHOLDER, truth + 1, param_train[\"image-size\"], 0.95, 1.1)\n\n # Create batches by randomly shuffling tensors. The capacity specifies the maximum of elements in the queue\n img_batch,truth_batch = tf.train.shuffle_batch(\n [img, truth], **param_batch)\n\n # execution -----------------------------------------------------------\n # ----------------------------------------------------------------------- \n with tf.variable_scope(\"net\"):\n pred, argmax = dl_multi.plugin.get_module_task(\"models\", *param_train[\"model\"])(img_batch)\n\n mask = tf.to_float(tf.squeeze(tf.greater(truth_batch, 0.)))\n truth = tf.to_int32(tf.squeeze(tf.maximum(truth_batch-1, 0), axis=3))\n\n loss = tf.reduce_mean(\n tf.losses.compute_weighted_loss(\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=truth, \n logits=pred\n ),\n weights = mask\n )\n )\n\n acc = 1 - ( tf.count_nonzero((tf.to_float(argmax)-tf.to_float(truth)), dtype=tf.float32)\n / (param_train[\"image-size\"][0] * param_train[\"image-size\"][1] * param_batch[\"batch_size\"]))\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_step_both = tf.contrib.opt.AdamWOptimizer(0).minimize(loss)\n \n tf.summary.scalar('loss', loss)\n tf.summary.scalar('accuracy', acc)\n merged_summary_op = tf.summary.merge_all()\n summary_string_writer = tf.summary.FileWriter(log_dir)\n\n # tfsession -----------------------------------------------------------\n # -----------------------------------------------------------------------\n # The op for initializing the variables.\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()) \n saver = dl_multi.tftools.tfsaver.Saver(tf.train.Saver(), **param_save, logger=_logger)\n with tf.Session() as sess:\n sess.run(init_op)\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n \n loss_v = 0\n acc_v = 0\n loss_v_r = 0\n\n # iterate epochs\n for epoch in saver:\n loss_v, acc_v, summary_string, _ = sess.run([loss, acc, merged_summary_op, train_step_both])\n \n summary_string_writer.add_summary(summary_string, epoch._index)\n \n print(\"Step: {}, Loss: {:.3f}, Accuracy: {:.3f}\".format(epoch._index, loss_v, acc_v))\n saver.save(sess, checkpoint, step=True)\n\n coord.request_stop()\n coord.join(threads)\n saver.save(sess, checkpoint)\n # tfsession -----------------------------------------------------------\n # -----------------------------------------------------------------------\n summary_string_writer.close()"
] | [
[
"tensorflow.control_dependencies",
"tensorflow.summary.scalar",
"tensorflow.greater",
"tensorflow.get_collection",
"tensorflow.to_float",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.shuffle_batch",
"tensorflow.train.Coordinator",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.opt.AdamWOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.train.string_input_producer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.summary.FileWriter",
"tensorflow.local_variables_initializer",
"tensorflow.train.start_queue_runners",
"tensorflow.maximum",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
HuthLab/ContactPose | [
"722c755fb66032875fe08a3a3fd5fd78fbb69073"
] | [
"utilities/dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# Code by Samarth Brahmbhatt\n\"\"\"\nContactPose dataset loading utilities\n\"\"\"\nimport os\nimport json\nimport numpy as np\nimport pickle\n\nfrom . import misc as mutils\n\nosp = os.path\n\n\ndef get_object_names(p_num, intent, ignore_hp=True):\n \"\"\"\n returns list of objects grasped by this participant with this intent\n \"\"\"\n sess_dir = 'full{:d}_{:s}'.format(p_num, intent)\n sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir)\n ignored_objects = ('hands', 'palm_print') if ignore_hp else ()\n return [o for o in next(os.walk(sess_dir))[1] if o not in ignored_objects]\n\n\ndef get_intents(p_num, object_name):\n \"\"\"\n returns list of intents with which this participant grasped object\n \"\"\"\n out = []\n for ins in ('use', 'handoff'):\n sess_dir = 'full{:d}_{:s}'.format(p_num, ins)\n sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir, object_name)\n if osp.isdir(sess_dir):\n out.append(ins)\n return out\n\n \ndef get_p_nums(object_name, intent):\n \"\"\"\n returns list of participants who grasped this object with this intent\n \"\"\"\n out = []\n for p_num in range(1, 51):\n sess_dir = 'full{:d}_{:s}'.format(p_num, intent)\n sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir, object_name)\n if osp.isdir(sess_dir):\n out.append(p_num)\n return out\n\n\nclass ContactPose(object):\n \"\"\"\n Base class for accessing the ContactPose dataset\n \"\"\"\n _mano_dicts = None # class variable so that large data is not loaded repeatedly\n def __init__(self, p_num, intent, object_name, mano_pose_params=15,\n load_mano=True):\n \"\"\"\n load_mano: Flag can be used to prevent loading MANO hand models, which is\n time consuming\n \"\"\"\n if (object_name == 'palm_print') or (object_name == 'hands'):\n print('This class is not meant to be used with palm_print or hands')\n raise ValueError\n self.p_num = p_num\n self.intent = intent\n self.object_name = object_name\n self._mano_pose_params = mano_pose_params\n \n p_id = 'full{:d}_{:s}'.format(p_num, intent)\n self.data_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', p_id, object_name)\n assert(osp.isdir(self.data_dir))\n \n # read grasp data\n with open(self.annotation_filename, 'r') as f:\n ann = json.load(f)\n self._n_frames = len(ann['frames'])\n self._valid_cameras = [cn for cn,cv in ann['cameras'].items() if cv['valid']]\n self._is_object_pose_optimized = [f['object_pose_optimized'] for\n f in ann['frames']]\n self._valid_hands = [hand_idx for hand_idx, hand in enumerate(ann['hands'])\n if hand['valid']]\n\n im_filenames = {}\n for camera_name in self.valid_cameras:\n im_dir = osp.join(self.data_dir, 'images_full', camera_name, '{:s}')\n im_filenames[camera_name] = [\n osp.join(im_dir, 'frame{:03d}.png'.format(i)) for i in range(len(self))]\n self._im_filenames = [{k: v for k,v in zip(im_filenames.keys(), vv)} for\n vv in zip(*im_filenames.values())]\n\n oX = [] # 3D joints w.r.t. object\n all_oTh = []\n for hand_idx, hand in enumerate(ann['hands']):\n if hand['valid']:\n hX = np.asarray(hand['joints']) # hand joints w.r.t. hand root\n if hand['moving']:\n # object pose w.r.t. hand\n oThs = [np.linalg.inv(mutils.pose_matrix(f['hTo'][hand_idx])) for f\n in ann['frames']]\n all_oTh.append(oThs)\n oX.append([mutils.tform_points(oTh, hX) for oTh in oThs])\n else:\n oX.append([hX for _ in range(len(self))])\n all_oTh.append([np.eye(4) for _ in range(len(self))])\n else:\n oX.append([None for _ in range(len(self))])\n all_oTh.append([np.eye(4) for _ in range(len(self))])\n self._oX = list(map(tuple, zip(*oX)))\n self._oTh = list(map(tuple, zip(*all_oTh)))\n\n # world pose w.r.t. object\n oTws = [mutils.pose_matrix(f['oTw']) for f in ann['frames']]\n self._cTo = {} # object pose w.r.t. camera\n self._K = {} # camera intrinsics\n for camera_name in self.valid_cameras:\n cam = ann['cameras'][camera_name]\n self._K[camera_name] = np.array([[cam['K']['fx'], 0, cam['K']['cx']],\n [0, cam['K']['fy'], cam['K']['cy']],\n [0, 0, 1]])\n # camera pose w.r.t. world\n wTc = mutils.pose_matrix(cam['wTc'])\n self._cTo[camera_name] = [np.linalg.inv(oTw @ wTc) for oTw in oTws]\n\n # projections\n self._ox = [] # joint projections\n self._om = [] # marker projections\n # 3D marker locations w.r.t. object\n oM = np.loadtxt(osp.join(osp.dirname(__file__), '..', 'data',\n 'object_marker_locations',\n '{:s}_final_marker_locations.txt'.\n format(object_name)))[:, :3]\n for frame_idx in range(len(self)):\n this_ox = {}\n this_om = {}\n for camera_name in self.valid_cameras:\n this_om[camera_name] = mutils.project(self.P(camera_name, frame_idx),\n oM)\n x = []\n for hand_idx in range(2):\n if hand_idx not in self._valid_hands:\n x.append(None)\n else:\n x.append(mutils.project(self.P(camera_name, frame_idx),\n self._oX[frame_idx][hand_idx]))\n this_ox[camera_name] = tuple(x)\n self._ox.append(this_ox)\n self._om.append(this_om)\n\n\n # check if MANO code and models are present\n if mutils.MANO_PRESENT and load_mano:\n # load MANO data for the class\n if ContactPose._mano_dicts is not None:\n return\n ContactPose._mano_dicts = []\n for hand_name in ('LEFT', 'RIGHT'):\n filename = osp.join(osp.dirname(__file__), '..', 'thirdparty',\n 'mano', 'models',\n 'MANO_{:s}.pkl'.format(hand_name))\n with open(filename, 'rb') as f:\n ContactPose._mano_dicts.append(pickle.load(f, encoding='latin1'))\n elif load_mano:\n print('MANO code was not detected, please follow steps in README.md. '\n 'mano_meshes() will return (None, None)')\n\n\n def __len__(self):\n \"\"\"\n Number of RGB-D time frames\n \"\"\"\n return self._n_frames\n\n def __repr__(self):\n hand_names = ['left', 'right']\n hand_str = ' '.join([hand_names[i] for i in self._valid_hands])\n return 'Participant {:d}, intent {:s}, object {:s}\\n'.format(self.p_num,\n self.intent,\n self.object_name) +\\\n '{:d} frames\\n'.format(len(self)) +\\\n 'Cameras present: {:s}\\n'.format(' '.join(self.valid_cameras)) +\\\n 'Hands present: {:s}'.format(hand_str)\n\n @property\n def contactmap_filename(self):\n return osp.join(self.data_dir, '{:s}.ply'.format(self.object_name))\n\n @property\n def annotation_filename(self):\n return osp.join(self.data_dir, 'annotations.json')\n\n @property\n def mano_filename(self):\n \"\"\"\n return name of file containing MANO fit params\n \"\"\"\n return osp.join(self.data_dir,\n 'mano_fits_{:d}.json'.format(self._mano_pose_params))\n\n @property\n def valid_cameras(self):\n \"\"\"\n return list of cameras valid for this grasp\n \"\"\"\n return self._valid_cameras \n\n @property\n def mano_params(self):\n \"\"\"\n List of 2 [left, right]. Each element is None or a dict containing\n 'pose' (PCA pose space of dim self._mano_pose_params),\n 'betas' (PCA shape space), and root transform 'hTm'\n \"\"\"\n with open(self.mano_filename, 'r') as f:\n params = json.load(f)\n out = []\n for p in params:\n if not p['valid']:\n out.append(None)\n continue\n \n # MANO root pose w.r.t. hand\n hTm = np.linalg.inv(mutils.pose_matrix(p['mTc']))\n out.append({\n 'pose': p['pose'],\n 'betas': p['betas'],\n 'hTm': hTm,\n })\n return out\n \n def im_size(self, camera_name):\n \"\"\"\n (width, height) in pixels\n \"\"\"\n return (960, 540) if camera_name == 'kinect2_middle' else (540, 960)\n \n def image_filenames(self, mode, frame_idx):\n \"\"\"\n return dict with full image filenames for all valid cameras\n mode = color or depth\n \"\"\"\n return {k: v.format(mode) for k,v in self._im_filenames[frame_idx].items()}\n\n def hand_joints(self, frame_idx=None):\n \"\"\"\n 3D hand joints w.r.t. object\n randomly sampled time frame if frame_idx is None\n tuple of length 2, 21 joints per hand, None if hand is not present\n \"\"\"\n if frame_idx is None:\n frame_idx = np.random.choice(len(self))\n return self._oX[frame_idx]\n\n def K(self, camera_name):\n \"\"\"\n Camera intrinsics 3x3\n You will almost never need this. Use self.P() for projection\n \"\"\"\n return self._K[camera_name]\n\n def A(self, camera_name):\n \"\"\"\n Affine transform to be applied to 2D points after projection\n Included in self.P\n \"\"\"\n return mutils.get_A(camera_name, 960, 540)\n\n def P(self, camera_name, frame_idx):\n \"\"\"\n 3x4 3D -> 2D projection matrix\n Use this for all projection operations, not self.K\n \"\"\"\n P = self.K(camera_name) @ self.object_pose(camera_name, frame_idx)[:3]\n P = self.A(camera_name) @ P\n return P\n\n def object_pose(self, camera_name, frame_idx):\n \"\"\"\n Pose of object w.r.t. camera at frame frame_idx\n 4x4 homogeneous matrix\n \"\"\"\n return self._cTo[camera_name][frame_idx]\n\n def projected_hand_joints(self, camera_name, frame_idx):\n \"\"\"\n hand joints projected into camera image\n tuple of length 2\n 21x2 or None based on if hand is present in this grasp\n \"\"\"\n return self._ox[frame_idx][camera_name]\n\n def projected_object_markers(self, camera_name, frame_idx):\n \"\"\"\n object markers projected into camera image\n Nx2 where N in [5, 10]\n \"\"\"\n return self._om[frame_idx][camera_name]\n\n def mano_meshes(self, frame_idx=None):\n \"\"\"\n return list of 2 dicts. Element is None if that hand is absent,\n or contains 'vertices', 'faces', and 'joints'\n \"\"\"\n if frame_idx is None:\n frame_idx = np.random.choice(len(self))\n return mutils.load_mano_meshes(self.mano_params, ContactPose._mano_dicts,\n self._oTh[frame_idx])\n"
] | [
[
"numpy.asarray",
"numpy.eye",
"numpy.array",
"numpy.linalg.inv"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AguaClara/AguaClara-Design-code | [
"cc40815dfed5c4043daede0aabb23a1c13f6dca1"
] | [
"aguaclara/design/sed_tank.py"
] | [
"\"\"\" A sedimentation tank of an AguaClara water treatment plant\n\nExample:\n >>> from aguaclara.design.sed_tank import *\n >>> sed_tank = SedimentationTank(q = 60 * u.L / u.s)\n >>> round(sed_tank.diffuser_hl, 5)\n <Quantity(0.00926, 'centimeter')>\n\"\"\"\nfrom aguaclara.core.units import u\nimport aguaclara.core.constants as con\nimport aguaclara.core.materials as mat\nimport aguaclara.core.pipes as pipe\nfrom aguaclara.core import drills\nimport aguaclara.core.utility as ut\nfrom aguaclara.design.component import Component\nimport aguaclara.core.physchem as pc\nimport aguaclara.core.head_loss as hl\n\nimport numpy as np\nimport math\n\n\nclass SedimentationTank(Component):\n \"\"\"Design an AguaClara plant's sedimentation tank.\n\n An sedimentation tank's design relies on the sedimentation channel's design\n in the same plant, but assumed/default values may be used to design an\n sedimentation tank by itself. To design these components in tandem, use\n :class:`aguaclara.design.sed.Sedimentor`.\n\n Constants:\n - ``INLET_MAN_Q_RATIO (float)``: The ratio of the flow in the inlet\n manifold.\n - ``OUTLET_MAN_HL (float * u.cm)``: The headloss of the outlet manifold\n - ``JET_REVERSER_ND (float * u.inch)``: The nominal diameter of the jet\n reverser.\n - ``JET_PLANE_RATIO (float)``: The ratio for the jet plane\n - ``JET_REVERSER_TO_DIFFUSERS_H (float * u.cm)``: The height between\n the jet reverser and diffusers.\n - ``WALL_THICKNESS (float * u.m)``: The thickness of the sed tank walls\n - ``DIFFUSER_L (float * u.cm)``: The length of a diffuser.\n\n Design Inputs:\n - ``q (float * u.L / u.s)``: Plant flow rate\n (recommended, defaults to 20L/s)\n - ``temp (float * u.degC)``: Water temperature (recommended, defaults to\n 20°C)\n - ``vel_upflow (float * u.mm / u.s)``: Upflow velocity\n (optional, defaults to 1mm/s)\n - ``l_inner (float * u.m)``: The inner length\n (optional, defaults to 5.8m)\n - ``w_inner (float * u.inch)``: The inner width\n (optional, defaults to 42in.)\n - ``diffuser_vel_max (float * u.cm / u.s)``: The max velocity of a\n diffuser (optional, defaults to 44.29 cm/s)\n - ``diffuser_n (int)``:The nunber of diffusers\n (optional, defaults to 108)\n - ``diffuser_wall_thickness (float * u.inch)``: The thickness of the\n wall of a diffuser (optional, defaults to 1.17in.)\n - ``diffuser_sdr (int)``: The standard dimension ratio of a diffuser\n (optional, defaults to 41)\n - ``inlet_man_hl (float * u.cm)``: The headloss of the inlet manifold\n (optional, defaults to 1cm)\n - ``inlet_man_sdr (float)``: The standard dimension ratio of the inlet\n manifold (optional, defaults to 41)\n - ``jet_reverser_sdr (int)``: The standard dimension ratio of the jet\n reverser (optional, defaults to 26)\n - ``plate_settler_angle (float * u.deg)``: The angle of the plate\n settler (optional, defaults to 60°)\n - ``plate_settler_s (float * u.cm)``: Spacing in between plate settlers\n (optional, defaults to 2.5cm)\n - ``plate_settler_thickness (float * u.mm)``: Thickness of a plate\n settler (optional, defaults to 2mm)\n - ``plate_settler_cantilever_l_max (float * u.cm)``: The max length of\n the plate settler cantilever (optional, defaults to 20cm)\n - ``plate_settler_vel_capture (float * u.mm / u.s)``: The capture\n velocity of a plate settler (optional, defaults to 0.12mm/s)\n - ``outlet_man_orifice_hl (float * u.cm)``: The headloss of the\n orifices in the outlet manifold (optional, defaults to 4cm)\n - ``outlet_man_orifice_q_ratio_max (float)``: The max ratio of the flow\n rate for the orifices of the outlet manifold (optional, defaults to 0.8)\n - ``outlet_man_orifice_n_est (int)``: The estimated number of orifices\n for the outlet manifold (optional, defaults to 58)\n - ``outlet_man_sdr (int)``: The standard dimension ratio of the outlet\n manifold (optional, defaults to 41)\n - ``slope_angle (float * u.deg)``: The angle at the bottom of the sed tank\n (optional, defaults to 50°)\n - ``side_slope_to_floc_weir_h_min (float * u.cm)``: The minimum height\n between the side slope and the floc weir. (optional, defaults to 5cm)\n - ``sed_chan_w_outer (float * u.cm)``: The outer width of the\n sedimentation channel (optional, defaults to 60cm)\n - ``sed_chan_weir_thickness (float * u.cm)``: The thickness of the\n sedimentation channel weir (optional, defaults to 5cm)\n - ``floc_weir_to_plate_frame_h (float * u.cm)``: The height from the\n top of the floc weir to the plate settler frame (optional, defaults\n to 10cm)\n - ``hopper_slope_vertical_angle (float * u.deg)``: The angle of the\n hopper wall slopes to vertical (optional, defaults to 60°)\n\n \"\"\"\n INLET_MAN_Q_RATIO = 0.8\n OUTLET_MAN_HL = 4. * u.cm\n JET_REVERSER_ND = 3. * u.inch\n JET_PLANE_RATIO = 0.0124\n JET_REVERSER_TO_DIFFUSERS_H = 3.0* u.cm\n WALL_THICKNESS = 0.15 * u.m\n DIFFUSER_L = 15.0 * u.cm\n\n def __init__(self, **kwargs):\n self.vel_upflow=1.0 * u.mm / u.s\n self.l_inner=5.8 * u.m\n self.w_inner=42.0 * u.inch\n\n self.diffuser_vel_max=44.29 * u.cm / u.s\n self.diffuser_n=108\n self.diffuser_wall_thickness=1.17 * u.inch\n self.diffuser_sdr=41\n\n self.inlet_man_hl=1. * u.cm\n self.inlet_man_sdr = 41\n self.jet_reverser_sdr = 26\n\n self.plate_settler_angle=60.0 * u.deg\n self.plate_settler_s=2.5 * u.cm\n self.plate_settler_thickness=2.0 * u.mm\n self.plate_settler_cantilever_l_max=20.0 * u.cm\n self.plate_settler_vel_capture=0.12 * u.mm / u.s\n\n self.outlet_man_orifice_hl=4.0 * u.cm\n self.outlet_man_orifice_q_ratio_max=0.8\n self.outlet_man_orifice_n_est = 58\n self.outlet_man_sdr=41\n\n self.slope_angle=50. * u.deg\n self.side_slope_to_floc_weir_h_min = 5.0 * u.cm\n self.sed_chan_w_outer = 60.0 * u.cm\n self.sed_chan_weir_thickness = 5.0 * u.cm\n self.floc_weir_to_plate_frame_h = 10.0 * u.cm\n self.hopper_slope_vertical_angle = 60.0 * u.deg\n\n super().__init__(**kwargs)\n\n @property\n def q_tank(self):\n \"\"\"The flow rate present in the tank.\"\"\"\n q_tank = self.l_inner * self.w_inner * self.vel_upflow\n return q_tank.to(u.L / u.s)\n\n @property\n def diffuser_hl(self):\n \"\"\"The headloss of the diffuser.\"\"\"\n return self.inlet_man_hl / self.diffuser_n\n\n @property\n def diffuser_vel(self):\n \"\"\"The velocity of the diffuser\"\"\"\n diffuser_vel = np.sqrt(2 * con.GRAVITY * self.diffuser_hl)\n return diffuser_vel.to(u.mm / u.s)\n\n @property\n def diffuser_w_inner(self):\n \"\"\"The inner width(neglecting walls) of the diffuser.\"\"\"\n diffuser_w_inner = self.w_inner * self.vel_upflow / self.diffuser_vel\n return diffuser_w_inner.to(u.cm)\n\n @property\n def diffuser_a(self):\n \"\"\"The area of the diffuser\"\"\"\n diffuser_a = self.q_tank / (self.diffuser_vel * self.diffuser_n)\n return diffuser_a.to(u.cm ** 2)\n\n @property\n def inlet_man_v_max(self):\n \"\"\"The maximumum velocity in the inlet manifold.\"\"\"\n vel_manifold_max = np.sqrt(4 * con.GRAVITY * self.diffuser_hl *\n (1 - self.INLET_MAN_Q_RATIO ** 2) /\n (self.INLET_MAN_Q_RATIO ** 2 + 1)\n )\n return vel_manifold_max.to(u.m / u.s)\n\n @property\n def inlet_man_nd(self):\n \"\"\"The nominal diameter of the inlet manifold\"\"\"\n diam_inner = np.sqrt(4 * self.q_tank / (np.pi * self.inlet_man_v_max))\n inlet_man_nd = pipe.ND_SDR_available(diam_inner, self.inlet_man_sdr)\n return inlet_man_nd.to(u.cm)\n\n @property\n def outlet_man_nd(self):\n \"\"\"The nominal diameter of the outlet manifold.\"\"\"\n outlet_man_nd = pc.manifold_nd(\n self.q_tank,\n self.OUTLET_MAN_HL,\n self.l_inner,\n self.outlet_man_orifice_q_ratio_max,\n pc.viscosity_kinematic_water(self.temp),\n mat.PVC_PIPE_ROUGH.to(u.m),\n hl.PIPE_EXIT_K_MINOR,\n self.outlet_man_orifice_n_est,\n self.outlet_man_sdr\n )\n return outlet_man_nd\n\n @property\n def outlet_man_orifice_d(self):\n \"\"\"The diameter of the orifices in the outlet manifold.\"\"\"\n Q_orifice = self.q_tank / self.outlet_man_orifice_n_est\n D_orifice = pc.diam_circle(Q_orifice/(con.VC_ORIFICE_RATIO * \\\n np.sqrt(2 * con.GRAVITY* self.outlet_man_orifice_hl)))\n return ut.ceil_nearest(D_orifice, drills.DRILL_BITS_D_METRIC)\n\n @property\n def plate_l(self):\n \"\"\"The length of a plate in the plate settlers.\"\"\"\n L_sed_plate = ((self.plate_settler_s * ((self.vel_upflow / \\\n self.plate_settler_vel_capture) - 1)\n + self.plate_settler_thickness * (\n self.vel_upflow / self.plate_settler_vel_capture))\n / (np.sin(self.plate_settler_angle) * \\\n np.cos(self.plate_settler_angle))\n ).to(u.m)\n return L_sed_plate\n\n @property\n def outlet_man_orifice_q(self):\n \"\"\"The flow rate in the orifices of the outlet manifold.\"\"\"\n outlet_man_orifice_q = pc.flow_orifice_vert(\n self.outlet_man_orifice_d,\n self.outlet_man_orifice_hl,\n con.VC_ORIFICE_RATIO\n )\n return outlet_man_orifice_q.to(u.L / u.s)\n\n @property\n def outlet_man_orifice_spacing(self):\n \"\"\"The spacing between orifices on the outlet manifold.\"\"\"\n outlet_man_orifice_spacing = (\n self.l_inner -\n pipe.socket_depth(self.outlet_man_nd) -\n pipe.cap_thickness(self.outlet_man_nd) -\n self.outlet_man_orifice_d\n ) / ((self.q_tank / self.outlet_man_orifice_q) - 1)\n return outlet_man_orifice_spacing\n\n @property\n def outlet_man_orifice_n(self):\n \"\"\"The number of orifices on the outlet manifold.\"\"\"\n outlet_orifice_n = math.floor(\n (\n self.l_inner -\n pipe.socket_depth(self.outlet_man_nd) -\n pipe.cap_thickness(self.outlet_man_nd) -\n self.outlet_man_orifice_d\n ) / self.outlet_man_orifice_spacing\n ) + 1\n return outlet_orifice_n\n\n @property\n def outlet_orifice_hl(self):\n \"\"\"The headloss for the orifices of the outlet\"\"\"\n outlet_orifice_hl = pc.head_orifice(\n self.outlet_man_nd,\n con.VC_ORIFICE_RATIO,\n self.q_tank / self.outlet_man_orifice_n\n )\n return outlet_orifice_hl.to(u.mm)\n\n @property\n def side_slopes_w(self):\n \"\"\"The width of the side slopes.\"\"\"\n side_slopes_w = (\n self.w_inner -\n pipe.ID_SDR(self.JET_REVERSER_ND, self.jet_reverser_sdr)\n ) / 2\n return side_slopes_w.to(u.m)\n\n @property\n def side_slopes_h(self):\n \"\"\"The height of the side slopes.\"\"\"\n side_slopes_h = np.tan(self.slope_angle) * self.side_slopes_w\n return side_slopes_h.to(u.m)\n\n @property\n def inlet_man_h(self):\n \"\"\"The height of the inlet manifold height.\"\"\"\n inlet_man_h = self.JET_REVERSER_TO_DIFFUSERS_H + self.DIFFUSER_L + \\\n ( pipe.OD(self.inlet_man_nd)/ 2 )\n return inlet_man_h\n\n @property\n def floc_weir_h(self):\n \"\"\"The height of the floc weir.\"\"\"\n floc_weir_h = max(\n self.inlet_man_h + (pipe.OD(self.inlet_man_nd) / 2) + \\\n mat.CONCRETE_THICKNESS_MIN,\n self.side_slopes_h + self.side_slope_to_floc_weir_h_min\n )\n return floc_weir_h\n"
] | [
[
"numpy.tan",
"numpy.cos",
"numpy.sqrt",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qenett/pandas-intro-clone | [
"b79d5383f37faf797eb443165fd4e3f3546f41a8"
] | [
"lekce01_zakladni_dotazy/05_dotazy_jako_v_sql.py"
] | [
"import pandas\n\n# ## 5. Dotazy jako v SQL\n\n# Srovnáním DataFrame s tabulkou podobnost s databázemi nekončí. Pandas umožňují dotazovat se nad daty podobným způsobem jako SQL.\n#\n# Vrátíme názvy měst jako index pro lepší srozumitelnost.\n\nmesta = pandas.read_csv(\"mesta.csv\", index_col=\"mesto\", encoding=\"utf-8\")\nprint(mesta)\n\n# ### Výběr sloupečků\n\n# **SQL:** `SELECT linky, obyvatel FROM mesta;`\n\nprint(mesta[[\"linky\", \"obyvatel\"]])\n\n# ### Podmínky\n\n# **SQL:** `SELECT * FROM mesta WHERE linky > 10;`\n\nprint(mesta[mesta[\"linky\"] > 10])\n\n# **SQL:** `SELECT kraj, vymera FROM mesta WHERE vymera >= 100 AND vymera <= 200;`\n\nprint(mesta[(mesta[\"vymera\"] >= 100) & (mesta[\"vymera\"] <= 200)][[\"kraj\", \"vymera\"]])\n\nprint(mesta.loc[(mesta['vymera'] >= 100) & (mesta['vymera'] <= 200), [\"kraj\", \"vymera\"]])\n\n# ### Logické operátory v podmínkách\n\n# **SQL:** `SELECT linky FROM mesta WHERE kraj = 'JHM' OR kraj = 'OLK';`\n\nprint(mesta[(mesta['kraj'] == 'JHM') | (mesta['kraj'] == 'OLK')][['linky']])\n\n# **SQL:** `SELECT linky FROM mesta WHERE kraj IN ('JHM', 'ULK', 'OLK');`\n\nprint(mesta[mesta['kraj'].isin(['JHM', 'ULK', 'OLK'])][['linky']])\n\n# **SQL:** `SELECT linky FROM mesta WHERE kraj NOT IN ('JHM', 'ULK', 'OLK');`\n\nprint(mesta[~mesta['kraj'].isin(['JHM', 'ULK', 'OLK'])][['linky']])\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Yotsuyubi/drumgan | [
"eb6a9aa8b5c0d64bad65e4dbd14d444b7a859a29"
] | [
"tests/test_basic.py"
] | [
"# -*- coding: utf-8 -*-\nimport unittest\nimport numpy as np\nfrom .context import drumgan\ngan = drumgan.DrumGAN()\n\n\nclass BasicTestSuite(unittest.TestCase):\n\n def test_generate(self):\n z = np.random.rand(1, 128)\n sample, z_out = gan.generate(z)\n self.assertEqual(sample.shape, (16384,))\n self.assertEqual(z_out.shape, (1, 128))\n\n def test_random_generate(self):\n sample, z_out = gan.random_generate()\n self.assertEqual(sample.shape, (16384,))\n self.assertEqual(z_out.shape, (1, 128))\n\n def test_optim_feature(self):\n y = np.random.rand(1, 1, 16384)\n z = gan.optim_feature(y, iteration=1)\n self.assertEqual(z.shape, (1, 128))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
camilosalazar98/sunpy | [
"085093e2407874d41604b870627873bbba0c5f8d"
] | [
"examples/maps/map_data_histogram.py"
] | [
"\"\"\"\n=============\nMap Histogram\n=============\n\nHow to inspect the histogram of the data of a map.\n\"\"\"\nfrom __future__ import print_function, division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.map\nfrom astropy.coordinates import SkyCoord\nfrom sunpy.data.sample import AIA_171_IMAGE\n\n###############################################################################\n# We first create the Map using the sample data and we will create a submap\n# of a quiet region.\naia = sunpy.map.Map(AIA_171_IMAGE)\nbl = SkyCoord(-400 * u.arcsec, 0 * u.arcsec, frame=aia.coordinate_frame)\ntr = SkyCoord(0 * u.arcsec, 400 * u.arcsec, frame=aia.coordinate_frame)\naia_smap = aia.submap(bl, tr)\n\n###############################################################################\n# We now create a histogram of the data in this region.\ndmin = aia_smap.min()\ndmax = aia_smap.max()\nnum_bins = 50\nhist, bins = np.histogram(aia_smap.data, bins=np.linspace(dmin, dmax, num_bins))\nwidth = 0.7 * (bins[1] - bins[0])\nx = (bins[:-1] + bins[1:]) / 2\n\n###############################################################################\n# Let's plot the histogram as well as some standard values such as mean\n# upper, and lower value and the one-sigma range.\nplt.figure()\nplt.bar(x, hist, align='center', width=width, label='Histogram')\nplt.xlabel('Intensity')\nplt.axvline(dmin, label='Data min={:.2f}'.format(dmin), color='black')\nplt.axvline(dmax, label='Data max={:.2f}'.format(dmax), color='black')\nplt.axvline(aia_smap.data.mean(),\n label='mean={:.2f}'.format(aia_smap.data.mean()), color='green')\none_sigma = np.array([aia_smap.data.mean() - aia_smap.data.std(),\n aia_smap.data.mean() + aia_smap.data.std()])\nplt.axvspan(one_sigma[0], one_sigma[1], alpha=0.3, color='green',\n label='mean +/- std = [{0:.2f}, {1:.2f}]'.format(\n one_sigma[0], one_sigma[1]))\nplt.axvline(one_sigma[0], color='green')\nplt.axvline(one_sigma[1], color='red')\nplt.legend()\nplt.show()\n\n###############################################################################\n# Finally let's overplot what the one-sigma range means on the map\nfig = plt.figure()\nfig.add_subplot(projection=aia_smap)\naia_smap.plot()\nlevels = one_sigma / dmax * u.percent * 100\naia_smap.draw_contours(levels=levels, colors=['red', 'green'])\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"numpy.linspace",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lee-Gihun/MixCo-Mixup-Contrast | [
"1d8eacce057d385f187ee07b0365c9e4e1670981"
] | [
"simclr/data_aug/data_loader.py"
] | [
"import os\n\nimport torch\nimport torchvision.transforms as transforms\n\nfrom torchvision.datasets import CIFAR10, CIFAR100, ImageFolder\nfrom .tinyimagenet import TinyImageNet\nfrom .augmentation import *\n\n# Data loader\nDATASETS = {'cifar10': CIFAR10, 'cifar100': CIFAR100, 'tiny-imagenet': TinyImageNet, 'imagenet': None}\nMEAN = {'cifar10': [0.4914, 0.4822, 0.4465], 'cifar100': [0.5071, 0.4867, 0.4408], 'tiny-imagenet': [0.485, 0.456, 0.406], 'imagenet': [0.485, 0.456, 0.406]}\nSTD = {'cifar10': [0.2023, 0.1994, 0.2010], 'cifar100':[0.2675, 0.2565, 0.2761], 'tiny-imagenet': [0.229, 0.224, 0.225], 'imagenet': [0.229, 0.224, 0.225]}\n\n__all__ = ['data_loader']\n\n \ndef data_loader(dataset, data_path, batch_size, num_workers, download=False, distributed=True, supervised=False):\n # for self-supervised learning (pretrain)\n if not supervised:\n s = 1\n \n normalize = transforms.Normalize(MEAN[dataset], STD[dataset])\n \n # get a set of data augmentation transformations as described in the SimCLR paper.\n color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)\n augmentation = [transforms.RandomHorizontalFlip(),\n transforms.RandomApply([color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n GaussianBlur(kernel_size=int(0.1 * 224)),\n transforms.ToTensor(),\n normalize]\n augmentation.insert(0, transforms.RandomResizedCrop(224, scale=(0.2, 1.)))\n \n train_transform = SimCLRDataTransform(transforms.Compose(augmentation))\n\n if dataset == 'imagenet':\n traindir = os.path.join(data_path, 'train')\n train_dataset = ImageFolder(traindir, transform=train_transform)\n else:\n train_dataset = DATASETS[dataset](data_path, train=True, download=download, transform=train_transform)\n\n # for distributed learning\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed else None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),\n num_workers=num_workers, pin_memory=True, sampler=train_sampler, drop_last=True)\n\n return train_loader, train_sampler\n \n # for supervised learning (lincls)\n else:\n normalize = transforms.Normalize(MEAN[dataset], STD[dataset])\n \n train_transform = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n \n test_transform=transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize])\n \n if dataset == 'imagenet':\n traindir = os.path.join(data_path, 'train')\n testdir = os.path.join(data_path, 'val')\n train_dataset = ImageFolder(traindir, transform=train_transform)\n test_dataset = ImageFolder(testdir, transform=test_transform)\n else:\n train_dataset = DATASETS[dataset](data_path, train=True, download=download, transform=train_transform)\n test_dataset = DATASETS[dataset](data_path, train=False, download=download, transform=test_transform)\n\n # for distributed learning\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed else None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),\n num_workers=num_workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers, pin_memory=True)\n\n return train_loader, val_loader, train_sampler\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.