repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
issamhammad/conv3d-video-action-recognition | [
"362087449c7df68b3c43d87149afff14bad792c2"
] | [
"python/pca_svm.py"
] | [
"from sklearn.decomposition import PCA\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import classification_report\nimport numpy as np\n\n\ndef run_pca(X_train, X_test, n_components):\n \"\"\"\n Apply PCA on features\n \n Parameters\n ----------\n X_train : array\n Array of train features vector\n X_test : array\n Array of test features vector\n n_components : int\n Target dimension of features vector\n \n Returns\n -------\n array, array\n Returns train and test array of features vectors after applying PCA transformation\n \"\"\"\n pca = PCA(n_components=n_components) \n X_train_pca = pca.fit_transform(X_train) \n X_test_pca = pca.transform(X_test)\n print(\"Train/test shapes after PCA: %s / %s\" % (X_train_pca.shape, X_test_pca.shape)) \n return X_train_pca, X_test_pca\n\n\ndef run_svm(X_train, Y_train, X_test, Y_test, labels=None):\n \"\"\"\n Trains a simple linear SVM and prints out classification report\n \n Parameters\n ----------\n X_train : array\n Array of train features vector\n Y_train : array\n Array of train labels\n X_test : array\n Array of test features vector\n Y_test : array\n Array of test labels\n labels : list, optional\n List of label names used when printing classification report, by default None\n \n Returns\n -------\n array\n Array of predicted labels\n \"\"\"\n clf = LinearSVC(random_state=0)\n clf.fit(X_train, Y_train) \n \n Y_test_pred = clf.predict(X_test)\n count_correct = np.sum(Y_test == Y_test_pred)\n \n # Printing classification report\n print(classification_report(list(Y_test), list(Y_test_pred), target_names=labels))\n return Y_test_pred\n\n\ndef run_pca_svm(X_train, Y_train, X_test, Y_test, n_components, labels=None):\n \"\"\"\n Complete function that runs through PCA followed by SVM training\n \n Parameters\n ----------\n X_train : array\n Array of train features vector\n Y_train : array\n Array of train labels\n X_test : array\n Array of test features vector\n Y_test : array\n Array of test labels\n n_components : int\n Target dimension of features vector\n labels : list, optional\n List of label names used when printing classification report, by default None\n \n Returns\n -------\n array\n Array of predicted labels\n \"\"\"\n X_train_pca, X_test_pca = run_pca(X_train, X_test, n_components)\n return run_svm(X_train_pca, Y_train, X_test_pca, Y_test, labels=labels) "
] | [
[
"numpy.sum",
"sklearn.decomposition.PCA",
"sklearn.svm.LinearSVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fabioc-ms/driverlessai-recipes | [
"5d9f9a7d44a666e735740ae986901742eec9c50a"
] | [
"data/databases/create_dataset_from_mongodb_collection.py"
] | [
"\"\"\"Create dataset from MonogDB\"\"\"\n\n# Author: Nicholas Png\n# Created: 31/01/2020\n# Last Updated: 20/02/2020\n\nimport datatable as dt\nimport pandas as pd\nfrom h2oaicore.data import CustomData\n\n\n_global_modules_needed_by_name = [\"pymongo\", \"dnspython\"]\n\n# Please fill before usage\n# Note that this information is logged in Driverless AI logs.\nMONGO_CONNECTION_STRING = \"mongodb+srv://<username>:<password>@host[/[database][?options]]\"\nMONGO_DB = \"sample_mflix\"\nMONGO_COLLECTION = \"theaters\"\nDATASET_NAME = \"sample_mflix.theaters\"\n\n\nclass MongoDbData(CustomData):\n\n _modules_needed_by_name = [\"pymongo\", \"dnspython\"]\n\n @staticmethod\n def create_data(X: dt.Frame = None):\n from pymongo import MongoClient\n\n # Initialize MongoDB python client \n client = MongoClient(MONGO_CONNECTION_STRING)\n\n # Use MongoDB python client to obtain list of all documents in a specific database + collection\n db = client.get_database(MONGO_DB)\n coll = db.get_collection(MONGO_COLLECTION)\n docs = coll.find()\n\n # Convert MongoDB documents cursor to pandas dataframe\n df = pd.DataFrame.from_dict(docs)\n\n # Cast all object columns as string since datatable cannot accept arbitrary objects\n object_cols = df.select_dtypes(include=['object']).columns\n df[object_cols] = df[object_cols].astype(str)\n\n # return dict where key is name of dataset and value is a datatable Frame of the data.\n return {DATASET_NAME: dt.Frame(df)}\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chapman-phys220-2018f/cw08-forced-collaboration | [
"f21237668cc18bff47fbe506357848a47717fdb7"
] | [
"sinesum.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n########\n#Name: Conner Carnahan\n#ID: 1614309\n#Email: [email protected]\n#Class: PHYS220\n#Date: Oct 16, 2018\n########\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\[email protected]\ndef Sn(T,t,n):\n \"\"\"Sn(float T,float t,int n): Returns a value computed by a sum of sin functions in accordance with the Fourier series expansion of sign(x)\"\"\"\n if (abs(t) > T/2):\n print(\"t should be within the range -T/2, T/2\")\n pass\n K = np.arange(1,n+1)\n K = np.divide(4,np.pi*(2*K-1))\n SinPeriods = np.divide(2*np.pi*(2*np.arange(1,n+1)-1),T)\n value = np.sum(K*np.sin(SinPeriods*t))\n return value\n\[email protected]\ndef f(T,t):\n \"\"\"f(float T, float t): Returns a value that is the sign of the value inputed\n if t < 0 sign(t) = -1\n if t > 0 sign(t) = 1\n if t = 0 sign(t) = 0\"\"\"\n if (abs(t) > T/2):\n print(\"t should be between T/2\")\n pass\n return np.sign(t)\n\ndef Snarray(T,n,K = 300):\n \"\"\"Snarray(T float, n int, K = 300 int): returns numpy array, Generates an array of values for K values of t in [-T/2,T/2] of the summed sines which approximate sign(x)\"\"\"\n Time = np.linspace(-T/2,T/2,K)\n return Sn(T,Time,n)\n\ndef farray(T,K = 300):\n \"\"\"farray(T float, K = 300 int): returns numpy array of values evaluated by sign(x) for equally spaced K values of x in [-T/2,T/2]\"\"\"\n Time = np.linspace(-T/2,T/2,K)\n return f(T,Time)\n\ndef timespace(T,K = 300):\n \"\"\"timespace(T float, K = 300 int), returns numpy array of K equally spaced values in [-T/2,T/2]\"\"\"\n return np.linspace(-T/2,T/2,K)\n\n### DO NOT USE (OUT OF ORDER)\ndef buildallplots(alpha):\n \"\"\"DON'T DO IT ONLY A PROTOTYPE\n args: alpha (float),\n returns null\n This is a helper function that takes in a float, alpha,\n and generates a sequence of partial Fourier Sums that approximate the sign function\"\"\"\n T = alpha*2*np.pi\n F1Array = ss.Snarray(T,1)\n F3Array = ss.Snarray(T,3)\n F5Array = ss.Snarray(T,5)\n F10Array = ss.Snarray(T,10)\n F30Array = ss.Snarray(T,30)\n F100Array = ss.Snarray(T,100)\n FuncArray = ss.farray(T)\n Time = ss.timespace(T)\n###"
] | [
[
"numpy.linspace",
"numpy.arange",
"numpy.sin",
"numpy.sign",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | [
"bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11"
] | [
"start_visualisation.py"
] | [
"\"\"\"\nDescription: \n- File to create plots\n\"\"\"\n\n\"\"\" import packages \"\"\"\nimport os\nimport pandas as pd\nfrom pathlib import Path\n\n\n\"\"\" import project libraries \"\"\"\nimport modules.data.plot as plot\nimport modules.data.datamgm as dtm\n\n\"\"\" import configurations \"\"\"\nimport configurations.settings_visualisation as config\nimport configurations.settings_global as config_global\n\n\n\"\"\" Logging \"\"\"\nlogger = dtm.initialise_logger(__name__)\n\ndef main(): \n \n \"\"\" Descriptive plots (visualising characteristics of the instance) \"\"\"\n if config.bool_plot_passenger_demand: \n \n path = Path(config_global.instances_pas)\n \n logger.debug(path)\n \n instances = [str(e).replace(\"\\\\\",\"/\") for e in path.iterdir() if e.is_dir() and config.instance in str(e)]\n \n logger.debug(instances)\n \n \n for path_inst in instances: \n instance_name = path_inst.split(\"/\")[-1]\n instance_name = instance_name.split(\"-\")\n instance_name = \"-\".join(instance_name[1:3])\n \n path_output = f\"{config_global.output_ana}/{instance_name}/plots-data/\"\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n \n passenger_file = f\"{path_inst}/0-rep\"\n logger.debug(passenger_file)\n \n diff = path_inst.split(\"/\")[-1].strip()\n diff = diff.strip(f\"pas-{instance_name}\")\n if diff: \n diff = \"-\" + diff\n logger.debug(diff)\n plot.plot_passenger_arrival_intensity(passenger_file,path=path_output,station=\"all\",name=diff,FORMAT=config.FORMAT,show=config.show)\n \n \"\"\" Simulation plots \"\"\" \n if config.bool_plot_out_sim: \n \n path = dtm.get_path(config_global.output_ana)\n \n instances = [e for e in path.iterdir() if config.instance in str(e)]\n logger.debug(instances)\n \n for ins in instances:\n \n base_name = dtm.get_instance_name(ins, cut=True) \n \n sim_folder = ins.joinpath(\"sim\")\n timestamps_sim = [e for e in sim_folder.iterdir() if config.timestamp in e.name]\n \n for ts in timestamps_sim:\n logger.debug(ts)\n \n xs = []\n data = {}\n \n \"\"\" gain data per run \"\"\"\n if config.bool_plot_basic_instance or config.bool_plot_final: \n logs_files = ts.joinpath(\"logs_per_run\")\n dfs = []\n for file in logs_files.iterdir(): \n if \"analysis_run\" in str(file):\n dfs.append(dtm.get_data_from_csv(file)) \n\n data[\"Time\"] = pd.concat(dfs)\n logger.debug(data[\"Time\"].columns) \n \n \"\"\" base case analysis \"\"\"\n if config.bool_plot_basic_instance:\n xs.append(\"Time\") \n\n \n \n \n \"\"\" sensitivity analysis \"\"\"\n \n file = ts.joinpath(\"result_from_entities_log.csv\")\n logger.debug(file)\n \n \"\"\" Simulation plots: Sensitivity analysis for cargo \"\"\"\n if config.bool_plot_sensitivity_cargo and file.exists():\n xs.append(\"numCargo\")\n data[\"numCargo\"] = file\n \n if config.bool_plot_sensitivity_passengers and file.exists():\n xs.append(\"numPassengers\")\n data[\"numPassengers\"] = file\n \n # Iterate over transporation schemes\n for scheme in config.schemes:\n \n for x in xs:\n \n # Only create plots for selection of methods \n filter_value = {\"method\": [met for met in config.methods if config.methods[met]]}\n \n # Consider the base case\n if x==\"Time\": \n if config.passenger_set: \n filter_value[\"passenger_set\"] = f\"pas-{base_name}-{config.passenger_set}\" \n if config.numCargo >= 0:\n filter_value[\"numCargo\"] = config.numCargo\n aggregate_x = False\n plot_final = False\n \n # Consider only a given passenger set\n if x==\"numCargo\":\n if config.passenger_set: \n filter_value[\"passenger_set\"] = f\"pas-{base_name}-{config.passenger_set}\"\n logger.debug(f\"pas-{base_name}-{config.passenger_set}\")\n aggregate_x = False \n plot_final = config.bool_plot_final\n \n # Consider only a given number of cargo\n if x==\"numPassengers\": \n if int(config.numCargo):\n filter_value[\"numCargo\"] = config.numCargo\n aggregate_x = \"passenger_set\"\n logger.debug(filter_value)\n plot_final = config.bool_plot_final\n\n \n filter_value[\"scheme\"] = scheme \n \n # Iterate over plots to create\n for p in config.plots_sensitivity:\n\n # Only create plots if configured\n if p[\"plot\"] and config.schemes[scheme]: \n \n # extract plot design and configurations\n y = p[\"y\"]\n if \"y_axis\" in p: \n y_axis = p[\"y_axis\"]\n else: \n y_axis = \"\"\n if \"pos_legend\" in p: \n pos_legend = p[\"pos_legend\"]\n else: \n pos_legend = [0.00,0.99]\n logger.debug(y)\n logger.debug(config.show)\n \n # create (and save) the plot \n # try:\n plot.create_plots(data[x],x=x,y=y,compare=\"method\",y_axis=y_axis,show=config.show, filter_value = filter_value, aggregate_x=aggregate_x,pos_legend=pos_legend, FORMAT = config.FORMAT, path=ts)\n # except: \n # logger.error(f\"Error occured while plotting with x={x}\")\n \n if plot_final and \"final\" in p:\n # create (and save) the plot \n if p[\"final\"]:\n # try:\n plot.create_plots(data[\"Time\"],x=x,y=y,compare=\"method\",y_axis=y_axis,show=config.show, filter_value = {**filter_value, **{\"Time\":config.last_period}}, aggregate_x=aggregate_x,pos_legend=pos_legend, FORMAT = config.FORMAT, path=ts)\n # except: \n # logger.error(f\"Error occured while plotting with x={x} for period {config.last_period}\")\n \n for met in config.methods: \n \n # Iterate over plots to create per method\n for p in config.plots_sensitivity_per_method:\n\n # Only create plots if configured (per methods)\n if p[\"plot\"] and config.schemes[scheme] and config.methods[met]: \n \n filter_value[\"method\"] = met\n y = p[\"y\"]\n if \"y_axis\" in p: \n y_axis = p[\"y_axis\"]\n else: \n y_axis = \"\"\n if \"pos_legend\" in p: \n pos_legend = p[\"pos_legend\"]\n else: \n pos_legend = [0.00,0.99] \n logger.debug(filter_value)\n try:\n plot.create_plots(data[x],x=x,y=y,compare=\"method\",y_axis=y_axis,show=config.show, filter_value = filter_value, aggregate_x=aggregate_x,pos_legend=pos_legend, FORMAT = config.FORMAT,path=ts)\n except: \n logger.error(f\"Error occured while plotting with x={x}\") \n\n if plot_final and \"final\" in p:\n # create (and save) the plot \n if p[\"final\"]:\n try:\n plot.create_plots(data[\"Time\"],x=x,y=y,compare=\"method\",y_axis=y_axis,show=config.show, filter_value = {**filter_value, **{\"Time\":config.last_period}}, aggregate_x=aggregate_x,pos_legend=pos_legend, FORMAT = config.FORMAT, path=ts)\n except: \n logger.error(f\"Error occured while plotting with x={x} for period {config.last_period}\") \n \n \n # \"\"\" Create descriptive plots \"\"\"\n # if config.save_descriptive_plots:\n \n # if config.save_plots_time_space_network: \n # plot_time_space_network(entities_log, path)\n \n # if config.save_plots_time_series:\n # plot_time_series(entities_log,\"Tram\",\"Passengers\",path=path) \n # plot_time_series(entities_log,\"Tram\",\"Cargo\",path=path)\n # plot_time_series(entities_log,\"Stop\",\"Cargo\",path=path)\n # plot_time_series(entities_log,\"Stop\",\"Passengers\",path=path) \n \nif __name__=='__main__':\n main()"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
arnavdas88/QuTIpy | [
"0cd8263f7f6153b0de86e87479946e5479abc168"
] | [
"qutipy/Weyl/__init__.py"
] | [
"# This file is part of the QuTIpy package.\n# https://github.com/sumeetkhatri/QuTIpy\n#\n# Copyright (c) 2022 Sumeet Khatri.\n# --.- ..- - .. .--. -.--\n#\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport itertools\n\nimport numpy as np\nfrom numpy.linalg import matrix_power\n\nfrom qutipy.general_functions import ( # NOTE: What does the tensor function does?\n Tr,\n dag,\n ket,\n tensor,\n)\n\n\ndef discrete_Weyl_X(d):\n \"\"\"\n Generates the X shift operators.\n \"\"\"\n\n X = ket(d, 1) @ dag(ket(d, 0))\n\n for i in range(1, d):\n X = X + ket(d, (i + 1) % d) @ dag(ket(d, i))\n\n return X\n\n\ndef discrete_Weyl_Z(d):\n \"\"\"\n Generates the Z phase operators.\n \"\"\"\n\n w = np.exp(2 * np.pi * 1j / d)\n\n Z = ket(d, 0) @ dag(ket(d, 0))\n\n for i in range(1, d):\n Z = Z + w**i * ket(d, i) @ dag(ket(d, i))\n\n return Z\n\n\ndef discrete_Weyl(d, a, b):\n \"\"\"\n Generates the discrete Weyl operator X^aZ^b.\n \"\"\"\n\n return matrix_power(discrete_Weyl_X(d), a) @ matrix_power(discrete_Weyl_Z(d), b)\n\n\ndef generate_nQudit_X(d, indices):\n \"\"\"\n Generates a tensor product of discrete Weyl-X operators. indices is a\n list of dits (i.e., each element of the list is a number between 0 and\n d-1).\n \"\"\"\n\n X = discrete_Weyl_X(d)\n\n out = 1\n\n for index in indices:\n out = tensor(out, matrix_power(X, index))\n\n return out\n\n\ndef generate_nQudit_Z(d, indices):\n \"\"\"\n Generates a tensor product of discrete Weyl-Z operators. indices is a\n list of dits (i.e., each element of the list is a number between 0 and\n d-1).\n \"\"\"\n\n Z = discrete_Weyl_Z(d)\n\n out = 1\n\n for index in indices:\n out = tensor(out, matrix_power(Z, index))\n\n return out\n\n\ndef nQudit_cov_matrix(X, d, n):\n \"\"\"\n Generates the matrix of second moments (aka covariance matrix) of an\n n-qudit operator X.\n \"\"\"\n\n S = nQudit_quadratures(d, n)\n\n V = np.array(np.zeros((2 * n, 2 * n)), dtype=np.complex128)\n\n for i in range(2 * n):\n for j in range(2 * n):\n V[i, j] = Tr(X @ S[i + 1] @ dag(S[j + 1]))\n\n return V\n\n\ndef nQudit_quadratures(d, n):\n \"\"\"\n Returns the list of n-qudit \"quadrature\" operators, which are defined as\n (for two qudits)\n\n S[0]=X(0) ⊗ Id\n S[1]=Z(0) ⊗ Id\n S[2]=Id ⊗ X(0)\n S[3]=Id ⊗ Z(0)\n\n In general, for n qubits:\n\n S[0]=X(0) ⊗ Id ⊗ ... ⊗ Id\n S[1]=Z(0) ⊗ Id ⊗ ... ⊗ Id\n S[2]=Id ⊗ X(0) ⊗ ... ⊗ Id\n S[3]=Id ⊗ Z(0) ⊗ ... ⊗ Id\n .\n .\n .\n S[2n-2]=Id ⊗ Id ⊗ ... ⊗ X(0)\n S[2n-1]=Id ⊗ Id ⊗ ... ⊗ Z(0)\n \"\"\"\n\n S = {}\n\n count = 0\n\n for i in range(1, 2 * n + 1, 2):\n v = list(np.array(dag(ket(n, count)), dtype=int).flatten())\n S[i] = generate_nQudit_X(d, v)\n S[i + 1] = generate_nQudit_Z(d, v)\n count += 1\n\n return S\n\n\ndef nQudit_Weyl_coeff(X, d, n):\n \"\"\"\n Generates the coefficients of the operator X acting on n qudit\n systems.\n \"\"\"\n\n C = {}\n\n S = list(itertools.product(*[range(0, d)] * n))\n\n for s in S:\n s = list(s)\n for t in S:\n t = list(t)\n G = generate_nQudit_X(d, s) @ generate_nQudit_Z(d, t)\n C[(str(s), str(t))] = np.around(Tr(dag(X) @ G), 10)\n\n return C\n"
] | [
[
"numpy.linalg.matrix_power",
"numpy.exp",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
undeadinu/datasets | [
"a6f1bce86404d534b7343fb90f0ebfd6d098c346"
] | [
"tensorflow_datasets/core/utils/py_utils.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some python utils function and classes.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport hashlib\nimport io\nimport itertools\nimport os\nimport sys\nimport uuid\n\nimport tensorflow as tf\nfrom tensorflow_datasets.core import constants\n\n\n# pylint: disable=g-import-not-at-top\nif sys.version_info[0] > 2:\n import functools\nelse:\n import functools32 as functools\n# pylint: enable=g-import-not-at-top\n\n\n# NOTE: When used on an instance method, the cache is shared across all\n# instances and IS NOT per-instance.\n# See\n# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance\n# For @property methods, use @memoized_property below.\n# TODO(rsepassi): Write a @memoize decorator that does the right thing for\n# instance methods.\nmemoize = functools.lru_cache\n\n\[email protected]\ndef temporary_assignment(obj, attr, value):\n \"\"\"Temporarily assign obj.attr to value.\"\"\"\n original = getattr(obj, attr, None)\n setattr(obj, attr, value)\n yield\n setattr(obj, attr, original)\n\n\ndef zip_dict(*dicts):\n \"\"\"Iterate over items of dictionaries grouped by their keys.\"\"\"\n for key in set(itertools.chain(*dicts)): # set merge all keys\n # Will raise KeyError if the dict don't have the same keys\n yield key, tuple(d[key] for d in dicts)\n\n\nclass NonMutableDict(dict):\n \"\"\"Dict where keys can only be added but not modified.\n\n Will raise an error if the user try to overwrite one key. The error message\n can be customized during construction. It will be formatted using {key} for\n the overwritten key.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._error_msg = kwargs.pop(\n \"error_msg\",\n \"Try to overwrite existing key: {key}\",\n )\n if kwargs:\n raise ValueError(\"NonMutableDict cannot be initialized with kwargs.\")\n super(NonMutableDict, self).__init__(*args, **kwargs)\n\n def __setitem__(self, key, value):\n if key in self:\n raise ValueError(self._error_msg.format(key=key))\n return super(NonMutableDict, self). __setitem__(key, value)\n\n def update(self, other):\n if any(k in self for k in other):\n raise ValueError(self._error_msg.format(key=set(self) & set(other)))\n return super(NonMutableDict, self).update(other)\n\n\nclass classproperty(property): # pylint: disable=invalid-name\n \"\"\"Descriptor to be used as decorator for @classmethods.\"\"\"\n\n def __get__(self, obj, objtype=None):\n return self.fget.__get__(None, objtype)()\n\n\nclass memoized_property(property): # pylint: disable=invalid-name\n \"\"\"Descriptor that mimics @property but caches output in member variable.\"\"\"\n\n def __get__(self, obj, objtype=None):\n # See https://docs.python.org/3/howto/descriptor.html#properties\n if obj is None:\n return self\n if self.fget is None:\n raise AttributeError(\"unreadable attribute\")\n attr = \"__cached_\" + self.fget.__name__\n cached = getattr(obj, attr, None)\n if cached is None:\n cached = self.fget(obj)\n setattr(obj, attr, cached)\n return cached\n\n\ndef map_nested(function, data_struct, dict_only=False, map_tuple=False):\n \"\"\"Apply a function recursively to each element of a nested data struct.\"\"\"\n\n # Could add support for more exotic data_struct, like OrderedDict\n if isinstance(data_struct, dict):\n return {\n k: map_nested(function, v, dict_only, map_tuple)\n for k, v in data_struct.items()\n }\n elif not dict_only:\n types = [list]\n if map_tuple:\n types.append(tuple)\n if isinstance(data_struct, tuple(types)):\n mapped = [map_nested(function, v, dict_only, map_tuple)\n for v in data_struct]\n if isinstance(data_struct, list):\n return mapped\n else:\n return tuple(mapped)\n # Singleton\n return function(data_struct)\n\n\ndef zip_nested(arg0, *args, **kwargs):\n \"\"\"Zip data struct together and return a data struct with the same shape.\"\"\"\n # Python 2 do not support kwargs only arguments\n dict_only = kwargs.pop(\"dict_only\", False)\n assert not kwargs\n\n # Could add support for more exotic data_struct, like OrderedDict\n if isinstance(arg0, dict):\n return {\n k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)\n }\n elif not dict_only:\n if isinstance(arg0, list):\n return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]\n # Singleton\n return (arg0,) + args\n\n\ndef as_proto_cls(proto_cls):\n \"\"\"Simulate proto inheritance.\n\n By default, protobuf do not support direct inheritance, so this decorator\n simulates inheritance to the class to which it is applied.\n\n Example:\n\n ```\n @as_proto_class(proto.MyProto)\n class A(object):\n def custom_method(self):\n return self.proto_field * 10\n\n p = proto.MyProto(proto_field=123)\n\n a = A()\n a.CopyFrom(p) # a is like a proto object\n assert a.proto_field == 123\n a.custom_method() # But has additional methods\n\n ```\n\n Args:\n proto_cls: The protobuf class to inherit from\n\n Returns:\n decorated_cls: The decorated class\n \"\"\"\n\n def decorator(cls):\n \"\"\"Decorator applied to the class.\"\"\"\n\n class ProtoCls(object):\n \"\"\"Base class simulating the protobuf.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.__proto = proto_cls(*args, **kwargs)\n\n def __getattr__(self, attr_name):\n return getattr(self.__proto, attr_name)\n\n def __eq__(self, other):\n return self.__proto, other.get_proto()\n\n def get_proto(self):\n return self.__proto\n\n def __repr__(self):\n return \"<{cls_name}\\n{proto_repr}\\n>\".format(\n cls_name=cls.__name__, proto_repr=repr(self.__proto))\n\n decorator_cls = type(cls.__name__, (cls, ProtoCls), {\n \"__doc__\": cls.__doc__,\n })\n return decorator_cls\n return decorator\n\n\ndef tfds_dir():\n \"\"\"Path to tensorflow_datasets directory.\"\"\"\n return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n\[email protected]\ndef atomic_write(path, mode):\n \"\"\"Writes to path atomically, by writing to temp file and renaming it.\"\"\"\n tmp_path = \"%s%s_%s\" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)\n with tf.gfile.Open(tmp_path, mode) as file_:\n yield file_\n tf.gfile.Rename(tmp_path, path, overwrite=True)\n\n\nclass abstractclassmethod(classmethod): # pylint: disable=invalid-name\n \"\"\"Decorate a method to mark it as an abstract @classmethod.\"\"\"\n\n __isabstractmethod__ = True\n\n def __init__(self, fn):\n fn.__isabstractmethod__ = True\n super(abstractclassmethod, self).__init__(fn)\n\n\ndef get_tfds_path(relative_path):\n \"\"\"Returns absolute path to file given path relative to tfds root.\"\"\"\n path = os.path.join(tfds_dir(), relative_path)\n return path\n\n\ndef read_checksum_digest(path, checksum_cls=hashlib.sha256):\n \"\"\"Given a hash constructor, returns checksum digest and size of file.\"\"\"\n checksum = checksum_cls()\n size = 0\n with tf.gfile.Open(path, \"rb\") as f:\n while True:\n block = f.read(io.DEFAULT_BUFFER_SIZE)\n size += len(block)\n if not block:\n break\n checksum.update(block)\n return checksum.hexdigest(), size\n"
] | [
[
"tensorflow.gfile.Rename",
"tensorflow.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edoarn/cv-models | [
"5fa7e50fd69f76b54611bb323b15610eeb1bb5cf"
] | [
"cvmodels/segmentation/deeplab.py"
] | [
"from enum import Enum\nfrom typing import Type, Tuple\nimport torch\nimport torch.nn as nn\n\nfrom cvmodels.segmentation.backbones import resnet as rn, xception as xc\n\n\nclass ASPPVariants(Enum):\n \"\"\"Enum describing the possible dilations in the Atrous spatial Pyramid Pooling block.\n There are essentially two combinations, with output stride= 16 (smaller) or 8 (wider)\n \"\"\"\n OS16 = (1, 6, 12, 18)\n OS08 = (1, 12, 24, 36)\n\n\nclass ASPPModule(nn.Module):\n \"\"\"Atrous Spatial Pyramid Pooling module: this block is responsible for the multi-scale feature extraction,\n using multiple parallel convolutional blocks (conv, bn, relu) with different dilations.\n The four feature groups are then recombined into a single tensor together with an upscaled average pooling\n (that contrasts information loss), then again processed by a 1x1 convolution + dropout\n \"\"\"\n\n def __init__(self,\n in_tensor: Tuple[int, int, int],\n variant: ASPPVariants = ASPPVariants.OS16,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n \"\"\"Creates a new Atrous spatial Pyramid Pooling block. This module is responsible\n for the extraction of features at different scales from the input tensor (which is\n an encoder version of the image with high depth and low height/width).\n The module combines these multi-scale features into a single tensor via 1x convolutions\n\n :param in_tensor: input dimensions in (channels, height, width), defaults to (2048, 32, 32)\n :type in_tensor: Tuple[int, int, int], optional\n :param variant: which output stride are we dealing with, defaults to ASSPVariants.OS16\n :type variant: ASSPVariants, optional\n :param batch_norm: batch normalization clas to instatiate, defaults to nn.BatchNorm2d\n :type batch_norm: Type[nn.Module], optional\n \"\"\"\n super().__init__()\n dilations = variant.value\n in_channels, h, w = in_tensor\n self.aspp1 = self.assp_block(in_channels, 256, 1, 0, dilations[0], batch_norm=batch_norm)\n self.aspp2 = self.assp_block(in_channels, 256, 3, dilations[1], dilations[1], batch_norm=batch_norm)\n self.aspp3 = self.assp_block(in_channels, 256, 3, dilations[2], dilations[2], batch_norm=batch_norm)\n self.aspp4 = self.assp_block(in_channels, 256, 3, dilations[3], dilations[3], batch_norm=batch_norm)\n # this is redoncolous, but it's described in the paper: bring it down to 1x1 tensor and upscale\n self.avgpool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(in_channels, 256, kernel_size=1, bias=False),\n batch_norm(256),\n nn.ReLU(inplace=True),\n nn.Upsample((h, w), mode=\"bilinear\", align_corners=True))\n self.merge = self.assp_block(256 * 5, 256, kernel=1, padding=0, dilation=1, batch_norm=batch_norm)\n self.dropout = nn.Dropout(p=0.5)\n\n def assp_block(self, in_channels: int, out_channels: int, kernel: int, padding: int, dilation: int,\n batch_norm: Type[nn.Module]) -> nn.Sequential:\n \"\"\"Creates a basic ASPP block, a sequential module with convolution, batch normalization and relu activation.\n\n :param in_channels: number of input channels\n :type in_channels: int\n :param out_channels: number of output channels (usually fixed to 256)\n :type out_channels: int\n :param kernel: kernel size for the convolution (usually 3)\n :type kernel: int\n :param padding: convolution padding, usually equal to the dilation, unless no dilation is applied\n :type padding: int\n :param dilation: dilation for the atrous convolution, depends on ASPPVariant\n :type dilation: int\n :param batch_norm: batch normalization class yet to be instantiated\n :type batch_norm: Type[nn.Module]\n :return: sequential block representing an ASPP component\n :rtype: nn.Sequential\n \"\"\"\n module = nn.Sequential(\n nn.Conv2d(in_channels,\n out_channels,\n kernel_size=kernel,\n stride=1,\n padding=padding,\n dilation=dilation,\n bias=False),\n batch_norm(out_channels),\n nn.ReLU(inplace=True))\n return module\n\n def forward(self, batch: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes a forward pass on the ASPP module.\n The same input is processed five times with different dilations. Output sizes are the same,\n except for the pooled layer, which requires an upscaling.\n\n :param batch: input tensor with dimensions [batch, channels, height, width]\n :type batch: torch.Tensor\n :return: output tensor with dimensions [batch, 256, height, width]\n :rtype: torch.Tensor\n \"\"\"\n x1 = self.aspp1(batch)\n x2 = self.aspp2(batch)\n x3 = self.aspp3(batch)\n x4 = self.aspp4(batch)\n x5 = self.avgpool(batch)\n x5 = torch.cat((x1, x2, x3, x4, x5), dim=1)\n x = self.merge(x5)\n return self.dropout(x)\n\n\nclass DecoderV3(nn.Sequential):\n \"\"\"Decoder for DeepLabV3, consisting of a double convolution and a direct 16X upsampling.\n This is clearly not the best for performance, but, if memory is a problem, this can save a little space.\n \"\"\"\n\n def __init__(self,\n output_stride: int = 16,\n output_channels: int = 1,\n dropout: float = 0.1,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n \"\"\"Decoder output for the simpler DeepLabV3: this module simply processes the ASPP output\n and upscales it to the input size.The 3x3 convolution and the dropout do not appear in the paper,\n but they are implemented in the official release.\n\n :param output_stride: scaling factor of the backbone, defaults to 16\n :type output_stride: int, optional\n :param output_channels: number of classes in the output mask, defaults to 1\n :type output_channels: int, optional\n :param dropout: dropout probability before the final convolution, defaults to 0.1\n :type dropout: float, optional\n :param batch_norm: batch normalization class, defaults to nn.BatchNorm2d\n :type batch_norm: Type[nn.Module], optional\n \"\"\"\n super(DecoderV3, self).__init__(\n nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),\n batch_norm(256),\n nn.ReLU(inplace=True), nn.Dropout(p=dropout),\n nn.Conv2d(256, output_channels, kernel_size=1),\n nn.Upsample(scale_factor=output_stride, mode=\"bilinear\", align_corners=True))\n\n\nclass DecoderV3Plus(nn.Module):\n \"\"\"DeepLabV3+ decoder branch, with a skip branch embedding low level\n features (higher resolution) into the highly dimensional output. This typically\n produces much better results than a naive 16x upsampling.\n Original paper: https://arxiv.org/abs/1802.02611\n \"\"\"\n\n def __init__(self,\n low_level_channels: int,\n output_stride: int = 16,\n output_channels: int = 1,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n \"\"\"Returns a new Decoder for DeepLabV3+.\n The upsampling is divided into two parts: a fixed 4x from 128 to 512, and a 2x or 4x\n from 32 or 64 (when input=512x512) to 128, depending on the output stride.\n\n :param low_level_channels: how many channels on the lo-level skip branch\n :type low_level_channels: int\n :param output_stride: downscaling factor of the backbone, defaults to 16\n :type output_stride: int, optional\n :param output_channels: how many outputs, defaults to 1\n :type output_channels: int, optional\n :param batch_norm: batch normalization module, defaults to nn.BatchNorm2d\n :type batch_norm: Type[nn.Module], optional\n \"\"\"\n super().__init__()\n low_up_factor = 4\n high_up_factor = output_stride / low_up_factor\n self.low_level = nn.Sequential(\n nn.Conv2d(low_level_channels, 48, 1, bias=False),\n batch_norm(48),\n nn.ReLU(inplace=True))\n self.upsample = nn.Upsample(scale_factor=high_up_factor, mode=\"bilinear\", align_corners=True)\n\n # Table 2, best performance with two 3x3 convs\n self.output = nn.Sequential(\n nn.Conv2d(48+256, 256, 3, stride=1, padding=1, bias=False),\n batch_norm(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, 3, stride=1, padding=1, bias=False),\n batch_norm(256),\n nn.ReLU(inplace=True),\n nn.Dropout(0.1),\n nn.Conv2d(256, output_channels, 1, stride=1),\n nn.Upsample(scale_factor=low_up_factor, mode=\"bilinear\", align_corners=True)\n )\n\n def forward(self, x: torch.Tensor, skip: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass on the decoder. Low-level features 'skip' are processed and merged\n with the upsampled high-level features 'x'. The output then restores the tensor\n to the original height and width.\n\n :param x: high-level features, [batch, 2048, X, X], where X = input size / output stride\n :type x: torch.Tensor\n :param skip: low-level features, [batch, Y, 128, 128] where Y = 256 for ResNet, 128 for Xception\n :type skip: torch.Tensor\n :return: tensor with the final output, [batch, classes, input height, input width]\n :rtype: torch.Tensor\n \"\"\"\n skip = self.low_level(skip)\n x = self.upsample(x)\n return self.output(torch.cat((skip, x), dim=1))\n\n\nclass DeepLabVariants(Enum):\n \"\"\"Enum defining possible combinations of backbones and strides.\n Currently, only ResNet and Xception are supported as backbones.\n \"\"\"\n RESNET50_16 = (rn.ResNetVariants.RN50, rn.OutputStrides.OS16, ASPPVariants.OS16)\n RESNET50_08 = (rn.ResNetVariants.RN50, rn.OutputStrides.OS08, ASPPVariants.OS08)\n RESNET101_16 = (rn.ResNetVariants.RN101, rn.OutputStrides.OS16, ASPPVariants.OS16)\n RESNET101_08 = (rn.ResNetVariants.RN101, rn.OutputStrides.OS08, ASPPVariants.OS08)\n XCEPTION08_16 = (xc.XceptionVariants.MF08, xc.OutputStrides.OS16, ASPPVariants.OS16)\n XCEPTION08_08 = (xc.XceptionVariants.MF08, xc.OutputStrides.OS08, ASPPVariants.OS08)\n XCEPTION16_16 = (xc.XceptionVariants.MF16, xc.OutputStrides.OS16, ASPPVariants.OS16)\n XCEPTION16_08 = (xc.XceptionVariants.MF16, xc.OutputStrides.OS08, ASPPVariants.OS08)\n\n\nclass DeepLabBase(nn.Module):\n \"\"\"Generic DeepLab class that provides three inputs for the main block of the architecture,\n This provides a custom initialization when needed, otherwise it is advisable to use the specific\n V3 or V3Plus implementations.\"\"\"\n\n def __init__(self,\n in_channels: int,\n in_dimension: int,\n out_channels: int,\n pretrained: bool = False,\n variant: DeepLabVariants = DeepLabVariants.RESNET101_16,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n super().__init__()\n assert out_channels > 0, \"Please provide a valid number of classes!\"\n backbone_variant, output_strides, aspp_variant = variant.value\n backbone_name = variant.name.lower()\n if backbone_name.startswith(\"resnet\"):\n backbone = rn.ResNetBackbone(variant=backbone_variant,\n batch_norm=batch_norm,\n output_strides=output_strides,\n in_channels=in_channels,\n pretrained=pretrained)\n elif backbone_name.startswith(\"xception\"):\n backbone = xc.XceptionBackbone(in_channels=in_channels,\n output_strides=output_strides,\n variant=backbone_variant,\n batch_norm=batch_norm,\n pretrained=pretrained)\n output_dims = in_dimension // backbone.scaling_factor()\n features_high, _ = backbone.output_features()\n self.backbone = backbone\n self.aspp = ASPPModule(in_tensor=(features_high, output_dims, output_dims),\n variant=aspp_variant,\n batch_norm=batch_norm)\n\n\nclass DeepLabV3(DeepLabBase):\n \"\"\"Deeplab V3 implementation: considering previous iterations V3 introduces a more modular\n concept of feature encoder, called 'backbone', and improves the ASPP module with more convolutions\n and global pooling. The CRF is also removed from the official implementation details.\n \"\"\"\n\n def __init__(self,\n in_channels: int = 3,\n in_dimension: int = 512,\n out_channels: int = 1,\n pretrained: bool = False,\n variant: DeepLabVariants = DeepLabVariants.RESNET101_16,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n super().__init__(in_channels=in_channels,\n in_dimension=in_dimension,\n out_channels=out_channels,\n variant=variant,\n pretrained=pretrained,\n batch_norm=batch_norm)\n self.decoder = DecoderV3(output_stride=self.backbone.scaling_factor(),\n output_channels=out_channels,\n batch_norm=batch_norm)\n\n def forward(self, batch: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes a forward pass on the whole DeepLab.\n In V3, the low-level features are not used by the other components.\n\n :param batch: input tensor, [batch, channels, height, width]\n :type batch: torch.Tensor\n :return: tensor with size [batch, classes, height, width]\n :rtype: torch.Tensor\n \"\"\"\n x, _ = self.backbone(batch)\n x = self.aspp(x)\n return self.decoder(x)\n\n\nclass DeepLabV3Plus(DeepLabBase):\n \"\"\"DeepLabV3Plus implementation, almost the same as V3, but with a much better decoding branch.\n \"\"\"\n\n def __init__(self,\n in_channels: int = 3,\n in_dimension: int = 512,\n out_channels: int = 1,\n pretrained: bool = False,\n variant: DeepLabVariants = DeepLabVariants.XCEPTION16_16,\n batch_norm: Type[nn.Module] = nn.BatchNorm2d):\n super().__init__(in_channels=in_channels,\n in_dimension=in_dimension,\n out_channels=out_channels,\n variant=variant,\n pretrained=pretrained,\n batch_norm=batch_norm)\n _, features_low = self.backbone.output_features()\n self.decoder = DecoderV3Plus(low_level_channels=features_low,\n output_stride=self.backbone.scaling_factor(),\n output_channels=out_channels,\n batch_norm=batch_norm)\n\n def forward(self, batch: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes a forward pass on the whole network.\n In V3+, the low-level features are integrated for a high resolution mask.\n\n :param batch: input tensor, [batch, channels, height, width]\n :type batch: torch.Tensor\n :return: tensor with size [batch, classes, height, width]\n :rtype: torch.Tensor\n \"\"\"\n x, skip = self.backbone(batch)\n x = self.aspp(x)\n return self.decoder(x, skip)\n\n\nif __name__ == \"__main__\":\n x = torch.rand((2, 3, 480, 480))\n deeplab = DeepLabV3Plus(out_channels=10, in_dimension=480, variant=DeepLabVariants.RESNET101_16, pretrained=True)\n print(deeplab(x).size())\n"
] | [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Upsample",
"torch.rand",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Camiloasc1/AstronomyUNAL | [
"0d533c1737e5328605c70f614024e1759e8d0962"
] | [
"CelestialMechanics/orbits/parable.py"
] | [
"from typing import Tuple\n\nimport numpy as np\nfrom astropy import units as u\n\n\ndef r(q: float, angle: float) -> float:\n \"\"\"\n r = 2 * q / (1 + np.cos(angle))\n\n :param q: pericentric distance\n :type q: float\n :param angle: theta angle\n :type angle: float\n :return: radius vector\n :rtype: float\n \"\"\"\n angle = np.deg2rad(angle)\n return 2 * q / (1 + np.cos(angle))\n\n\ndef angles(q: float, r: float) -> Tuple[float, float]:\n \"\"\"\n cos(angle) = 2 * q / r - 1\n angle = arccos(2 * q / r - 1)\n\n :param q: pericentric distance\n :type q: float\n :param r: radius vector\n :type r: float\n :return: angle\n :rtype: float\n \"\"\"\n angle = np.rad2deg(np.arccos(2 * q / r - 1))\n return angle, (360. * u.deg) - angle\n\n\ndef e() -> float:\n \"\"\"\n e = 1\n\n :return: eccentricity\n :rtype: float\n \"\"\"\n return 1.\n\n\ndef c(a: float) -> float:\n \"\"\"\n c = -\n\n :param a: semi-major axis\n :type a: float\n :return: linear eccentricity\n :rtype: float\n \"\"\"\n return None\n\n\ndef l(a: float) -> float:\n \"\"\"\n l = 2 * a\n\n :param a: semi-major axis\n :type a: float\n :return: semi-latus rectum\n :rtype: float\n \"\"\"\n return 2. * a\n\n\ndef p(a: float) -> float:\n \"\"\"\n p = 2 * a\n\n :param a: semi-major axis\n :type a: float\n :return: focal parameter\n :rtype: float\n \"\"\"\n return 2. * a\n\n\ndef E() -> float:\n \"\"\"\n E = 0\n\n :return: energy of the 2-body system\n :rtype: float\n \"\"\"\n\n return 0\n\n\ndef v(r: float, m1: float, m2: float) -> float:\n \"\"\"\n v = sqrt(2 * G * (m1 + m2) / r)\n\n :param r: radius vector\n :type r: float\n :param m1: mass 1\n :type m1: float\n :param m2: mass 2\n :type m2: float\n :return: velocity\n :rtype: float\n \"\"\"\n from CelestialMechanics.mu import mu_gm1m2\n\n v = 2 * mu_gm1m2(m1, m2) / r\n v = np.sqrt(v)\n\n return v\n\n\ndef solve_C(q: float, mu: float, t_r: float, t: float) -> float:\n \"\"\"\n C = sqrt(mu / 2 / a ** 3) * (t - t_r)\n\n :param q: pericentric distance\n :type q: float\n :param mu: G * (m1 + m2)\n :type mu: float\n :param t_r: reference time\n :type t_r: float\n :param t: time\n :type t: float\n :return: C\n :rtype: float\n \"\"\"\n return np.sqrt(mu / 2 / q ** 3) * (t - t_r)\n\n\ndef solve_S(C: float) -> float:\n \"\"\"\n S = arctan(2 / 3 / C)\n\n :param C: C\n :type C: float\n :return: S\n :rtype: float\n \"\"\"\n return np.arctan(2. / 3. / C)\n\n\ndef solve_FI(S: float) -> float:\n \"\"\"\n FI = arctan(tan(S / 2) ** (1. / 3.))\n\n :param S: S\n :type S: float\n :return: FI\n :rtype: float\n \"\"\"\n angle = np.tan(S / 2)\n angle = np.sign(angle) * np.abs(angle) ** (1 / 3) # fix the issue with negative values\n return np.arctan(angle)\n\n\ndef angle_FI(FI: float) -> float:\n \"\"\"\n theta = 2 * arctan(2 / tan(2 * FI))\n\n :param FI: FI\n :type FI: float\n :return: theta angle\n :rtype: float\n \"\"\"\n return 2 * np.arctan(2 / np.tan(2 * FI))\n\n\ndef FI_angle(angle: float) -> float:\n \"\"\"\n FI = arctan(2 / tan(angle / 2)) / 2\n\n :param angle: theta angle\n :type angle: float\n :return: FI angle\n :rtype: float\n \"\"\"\n return np.arctan(2 / np.tan(angle / 2)) / 2\n\n\ndef S_FI(FI: float) -> float:\n \"\"\"\n S = 2 * arctan(tan(FI) ** 3)\n\n :param FI: FI angle\n :type FI: float\n :return: S angle\n :rtype: float\n \"\"\"\n return 2 * np.arctan(np.tan(FI) ** 3)\n\n\ndef r1(q: float, angle: float, mu: float) -> float:\n \"\"\"\n r. = sqrt(mu / 2 / q) * e * sin(angle)\n\n :param q: pericentric distance\n :type q: float\n :param angle: theta angle\n :type angle: float\n :param mu: G * (m1 + m2)\n :type mu: float\n :return: r.\n :rtype: float\n \"\"\"\n r1 = mu / 2 / q\n r1 = np.sqrt(r1)\n r1 *= np.sin(angle)\n return r1\n\n\ndef r_angle1(q: float, r: float, mu: float) -> float:\n \"\"\"\n rtheta.= sqrt(2 * q * mu) / r\n\n :param q: pericentric distance\n :type q: float\n :param r: radius vector\n :type r: float\n :param mu: G * (m1 + m2)\n :type mu: float\n :return: r.\n :rtype: float\n \"\"\"\n r_angle1 = 2 * q * mu\n r_angle1 = np.sqrt(r_angle1)\n r_angle1 /= r\n return r_angle1\n\n\ndef t0(S, t_r, n):\n \"\"\"\n t0 = t_r - M_r / n\n\n :param S: S angle\n :type S: float\n :param t_r: reference time\n :type t_r: float\n :param n: mean movement\n :type n: float\n :return: t0\n :rtype: float\n \"\"\"\n return t_r - 2 / 3 * np.sqrt(2) / n / np.tan(S)\n"
] | [
[
"numpy.sqrt",
"numpy.arctan",
"numpy.abs",
"numpy.arccos",
"numpy.cos",
"numpy.sin",
"numpy.tan",
"numpy.sign",
"numpy.deg2rad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
a3sha2/aslprep | [
"b92ff4a6526351df91f37de3bf56d90d13d89286"
] | [
"aslprep/interfaces/cbf_computation.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport nibabel as nb\nfrom nibabel.processing import smooth_image\nfrom scipy.stats import gmean\nfrom nipype import logging\nfrom nipype.utils.filemanip import fname_presuffix\nfrom nipype.interfaces.base import (traits, TraitedSpec, BaseInterfaceInputSpec, SimpleInterface,\n File, isdefined)\nfrom nipype.interfaces.fsl.base import (FSLCommand, FSLCommandInputSpec)\nfrom nipype.interfaces.ants import ApplyTransforms\nfrom pkg_resources import resource_filename as pkgrf\nfrom nipype.interfaces.fsl import MultiImageMaths\n\nLOGGER = logging.getLogger('nipype.interface')\n\n\nclass _refinemaskInputSpec(BaseInterfaceInputSpec):\n in_t1mask = File(exists=True, mandatory=True, desc='t1 mask')\n in_aslmask = File(exists=True, mandatory=True, desct='asl mask')\n transforms = File(exists=True, mandatory=True, desc='transfom')\n out_mask = File(exists=False, mandatory=False, desc='output mask')\n out_tmp = File(exists=False, mandatory=False, desc='tmp mask')\n\n\nclass _refinemaskOutputSpec(TraitedSpec):\n out_mask = File(exists=False, desc='output mask')\n out_tmp = File(exists=False, desc='tmp mask')\n\n\nclass refinemask(SimpleInterface):\n r\"\"\"\n the code refine the asl mask with t1w mask\n the output is refined asl mask\n\n \"\"\"\n input_spec = _refinemaskInputSpec\n output_spec = _refinemaskOutputSpec\n\n def _run_interface(self, runtime):\n self._results['out_tmp'] = fname_presuffix(self.inputs.in_aslmask,\n suffix='_tempmask', newpath=runtime.cwd)\n self._results['out_mask'] = fname_presuffix(self.inputs.in_aslmask,\n suffix='_refinemask', newpath=runtime.cwd)\n\n refine_ref_mask(t1w_mask = self.inputs.in_t1mask,\n ref_asl_mask = self.inputs.in_aslmask,\n t12ref_transform = self.inputs.transforms,\n tmp_mask = self._results['out_tmp'],\n refined_mask = self._results['out_mask'])\n \n return runtime\n\n\nclass _extractCBFInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='raw asl file')\n asl_file = File(exists=True, mandatory=True, desc='preprocessed asl file')\n in_mask = File(exists=True, mandatory=True, desc='mask')\n dummy_vols = traits.Int(default_value=0, exit=False, mandatory=False,\n desc='remove first n volumes')\n in_metadata = traits.Dict(exists=True, mandatory=True,\n desc='metadata for asl or deltam ')\n bids_dir=traits.Str(exits=True,mandatory=True,desc=' bids directory')\n fwhm = traits.Float(default_value=5, exists=True, mandatory=False, desc='fwhm')\n out_file = File(exists=False, mandatory=False, desc='cbf timeries data')\n out_avg = File(exists=False, mandatory=False, desc='average control')\n\n\nclass _extractCBFOutputSpec(TraitedSpec):\n out_file = File(exists=False, desc='cbf timeries data')\n out_avg = File(exists=False, desc='average control')\n\n\nclass extractCBF(SimpleInterface):\n \"\"\"\n extract CBF timeseries\n by substracting label from control\n or viceversa\n\n it generate M0 maps for cbf computation\n\n \"\"\"\n\n input_spec = _extractCBFInputSpec\n output_spec = _extractCBFOutputSpec\n\n def _run_interface(self, runtime):\n file1 = os.path.abspath(self.inputs.in_file)\n # check if there is m0 file\n #m0num = 0\n m0file = []\n aslfile_linkedM0=[]\n mask = nb.load(self.inputs.in_mask).get_fdata()\n aslcontext1 = file1.replace('_asl.nii.gz', '_aslcontext.tsv')\n idasl = pd.read_csv(aslcontext1)['volume_type'].tolist()\n\n #read the data\n allasl = nb.load(self.inputs.asl_file)\n dataasl = allasl.get_fdata()\n\n # get the control,tag,moscan or label \n controllist = [i for i in range(0, len(idasl)) if idasl[i] == 'control']\n labellist = [i for i in range(0, len(idasl)) if idasl[i] == 'label']\n m0list = [i for i in range(0, len(idasl)) if idasl[i] == 'm0scan']\n deltamlist = [i for i in range(0, len(idasl)) if idasl[i] == 'deltam']\n cbflist = [i for i in range(0, len(idasl)) if idasl[i] == 'CBF']\n \n # extcract m0 file and register it to ASL if separate\n if self.inputs.in_metadata['M0Type'] == 'Separate':\n m0file = self.inputs.in_file.replace(\"asl.nii.gz\",\"m0scan.nii.gz\")\n m0file_metadata=readjson(m0file.replace('nii.gz','json'))\n aslfile_linkedM0 = os.path.abspath(self.inputs.bids_dir+'/'+m0file_metadata['IntendedFor'])\n if self.inputs.in_file not in aslfile_linkedM0:\n raise RuntimeError(\"there is no separate m0scan for the asl data\")\n \n newm0 = fname_presuffix(self.inputs.asl_file,\n suffix='_m0file') \n newm0 = regmotoasl(asl=self.inputs.asl_file,m0file=m0file,m02asl=newm0)\n m0data_smooth = smooth_image(nb.load(newm0), fwhm=self.inputs.fwhm).get_data()\n if len(m0data_smooth.shape) > 3 :\n m0dataf = mask*np.mean(m0data_smooth, axis=3)\n else:\n m0dataf = mask*m0data_smooth\n \n elif self.inputs.in_metadata['M0Type'] == \"Included\":\n modata2 = dataasl[:, :, :, m0list]\n con2 = nb.Nifti1Image(modata2, allasl.affine, allasl.header)\n m0data_smooth = smooth_image(con2, fwhm=self.inputs.fwhm).get_data()\n if len(m0data_smooth.shape) > 3 :\n m0dataf = mask*np.mean(m0data_smooth, axis=3)\n else:\n m0dataf = mask*m0data_smooth\n\n elif self.inputs.in_metadata[\"M0Type\"] == \"Estimate\":\n moestimate=self.inputs.in_metadata['M0Estimate']\n m0dataf = moestimate*mask\n\n elif self.inputs.in_metadata[\"M0Type\"] == \"Absent\":\n if len(controllist) > 0:\n control_img = dataasl[:, :, :, controllist]\n con = nb.Nifti1Image(control_img, allasl.affine, allasl.header)\n control_img1 = smooth_image(con, fwhm=self.inputs.fwhm).get_data()\n m0dataf = mask*np.mean(control_img1, axis=3)\n elif len(cbflist) > 0:\n m0dataf = mask\n else: \n raise RuntimeError(\"m0scan is absent\")\n else:\n raise RuntimeError(\"no pathway to m0scan\")\n \n\n if len(dataasl.shape) == 5:\n raise RuntimeError('Input image (%s) is 5D.')\n if len(deltamlist) > 0 : \n cbf_data = dataasl[:, :, :, deltamlist]\n if len(cbflist) > 0 : \n cbf_data = dataasl[:, :, :, cbflist]\n elif len(labellist) > 0 :\n control_img = dataasl[:, :, :, controllist]\n label_img = dataasl[:, :, :, labellist] \n cbf_data = np.subtract(control_img, label_img)\n else: \n raise RuntimeError('no valid asl or cbf image.')\n \n \n if self.inputs.dummy_vols != 0:\n cbf_data = np.delete(cbf_data, range(0, self.inputs.dummy_vols), axis=3)\n #control_img = np.delete(control_img, range(0, self.inputs.dummy_vols), axis=3)\n\n self._results['out_file'] = fname_presuffix(self.inputs.in_file,\n suffix='_cbftimeseries', newpath=runtime.cwd)\n self._results['out_avg'] = fname_presuffix(self.inputs.in_file,\n suffix='_m0file', newpath=runtime.cwd)\n nb.Nifti1Image(\n cbf_data, allasl.affine, allasl.header).to_filename(\n self._results['out_file'])\n nb.Nifti1Image(\n m0dataf,allasl.affine, allasl.header).to_filename(\n self._results['out_avg'])\n\n self.inputs.out_file = os.path.abspath(self._results['out_file'])\n self.inputs.out_avg = os.path.abspath(self._results['out_avg'])\n return runtime\n\n\nclass _computeCBFInputSpec(BaseInterfaceInputSpec):\n in_cbf = File(exists=True, mandatory=True, desc='cbf nifti')\n in_metadata = traits.Dict(exists=True, mandatory=True,\n desc='metadata for CBF ')\n in_m0scale=traits.Float(exists=True, mandatory=True,\n desc='relative scale between asl and m0')\n in_m0file = File(exists=True, mandatory=False, desc='M0 nifti file')\n in_mask = File(exists=True, mandatory=False, desc='mask')\n out_cbf = File(exists=False, mandatory=False, desc='cbf timeries data')\n out_mean = File(exists=False, mandatory=False, desc='average control')\n out_att = File(exists=False, mandatory=False, desc='Arterial Transit Time')\n\n\nclass _computeCBFOutputSpec(TraitedSpec):\n out_cbf = File(exists=False, desc='cbf timeries data')\n out_mean = File(exists=False, desc='average control')\n out_att = File(exists=False, desc='Arterial Transit Time')\n\n\nclass computeCBF(SimpleInterface):\n \"\"\"\n compute cbf pASL or pCASL\n \"\"\"\n input_spec = _computeCBFInputSpec\n output_spec = _computeCBFOutputSpec\n\n def _run_interface(self, runtime):\n cbf, meancbf, att = cbfcomputation(metadata=self.inputs.in_metadata,m0scale=self.inputs.in_m0scale,\n mask=self.inputs.in_mask, m0file=self.inputs.in_m0file,\n cbffile=self.inputs.in_cbf)\n self._results['out_cbf'] = fname_presuffix(self.inputs.in_cbf,\n suffix='_cbf', newpath=runtime.cwd)\n self._results['out_mean'] = fname_presuffix(self.inputs.in_cbf,\n suffix='_meancbf', newpath=runtime.cwd)\n samplecbf = nb.load(self.inputs.in_m0file)\n nb.Nifti1Image(\n cbf, samplecbf.affine, samplecbf.header).to_filename(\n self._results['out_cbf'])\n nb.Nifti1Image(\n meancbf, samplecbf.affine, samplecbf.header).to_filename(\n self._results['out_mean'])\n if att is not None:\n self._results['out_att'] = fname_presuffix(self.inputs.in_cbf,\n suffix='_att', newpath=runtime.cwd)\n nb.Nifti1Image(att, samplecbf.affine, samplecbf.header).to_filename(\n self._results['out_att'])\n self.inputs.out_att = os.path.abspath(self._results['out_att'])\n self.inputs.out_cbf = os.path.abspath(self._results['out_cbf'])\n self.inputs.out_mean = os.path.abspath(self._results['out_mean'])\n # we dont know why not zeros background $\n from nipype.interfaces.fsl import MultiImageMaths\n mat1 = MultiImageMaths()\n mat1.inputs.in_file = self.inputs.out_mean\n mat1.inputs.op_string = \" -mul %s \"\n mat1.inputs.operand_files = self.inputs.in_mask\n mat1.inputs.out_file = self.inputs.out_mean\n mat1.run()\n mat1 = MultiImageMaths()\n mat1.inputs.in_file = self.inputs.out_cbf\n mat1.inputs.op_string = \" -mul %s \"\n mat1.inputs.operand_files = self.inputs.in_mask\n mat1.inputs.out_file = self.inputs.out_cbf\n mat1.run()\n return runtime\n\ndef cbfcomputation(metadata, mask, m0file, cbffile, m0scale=1):\n \n \"\"\"\n compute cbf with pld and multi pld\n metadata\n cbf metadata\n mask\n asl mask in native space\n m0file\n m0scan\n cbffile\n already processed cbf after tag-control substraction\n m0scale\n relative scale between m0scan and asl, default is 1\n \"\"\"\n labeltype = metadata['ArterialSpinLabelingType']\n tau = metadata['LabelingDuration']\n plds = np.array(metadata['PostLabelingDelay'])\n #m0scale = metadata['M0']\n magstrength = metadata['MagneticFieldStrength']\n t1blood = (110*int(magstrength)+1316)/1000 # https://onlinelibrary.wiley.com/doi/pdf/10.1002/mrm.24550 \n inverstiontime = np.add(tau, plds)\n #mask = nb.load(mask).get_fdata()\n\n \n if 'LabelingEfficiency' in metadata.keys():\n labeleff = metadata['LabelingEfficiency']\n elif 'CASL' in labeltype:\n labeleff = 0.72\n elif 'PASL' in labeltype:\n labeleff = 0.8\n else:\n print('no labelelling effiecieny')\n part_coeff = 0.9 # brain partition coefficient\n\n\n\n if 'CASL' in labeltype:\n pf1 = (6000*part_coeff)/(2*labeleff*t1blood*(1-np.exp(-(tau/t1blood))))\n perfusion_factor = pf1*np.exp(plds/t1blood)\n elif 'PASL' in labeltype:\n pf1 = (6000*part_coeff)/(2*labeleff)\n perfusion_factor = (pf1*np.exp(inverstiontime/t1blood))/inverstiontime\n #perfusion_factor = np.array(perfusion_factor)\n #print(perfusion_factor)\n\n maskx = nb.load(mask).get_fdata()\n m0data = nb.load(m0file).get_fdata()\n m0data=m0data[maskx==1]\n # compute cbf\n cbf_data = nb.load(cbffile).get_fdata()\n cbf_data = cbf_data[maskx==1]\n cbf1 = np.zeros(cbf_data.shape)\n if len(cbf_data.shape) < 2: \n cbf1 = np.divide(cbf_data,(m0scale*m0data))\n else: \n for i in range(cbf1.shape[1]):\n cbf1[:, i] = np.divide(cbf_data[:,i], (m0scale*m0data))\n # m1=m0scale*m0_data\n # cbf1=np.divide(cbf_data,m1)\n # for compute cbf for each PLD and TI\n att = None \n if hasattr(perfusion_factor, '__len__') and cbf_data.shape[1] > 1 :\n permfactor = np.tile(perfusion_factor ,int(cbf_data.shape[1]/len(perfusion_factor)))\n cbf_data_ts = np.zeros(cbf_data.shape)\n\n #calculate cbf with multiple plds \n for i in range(cbf_data.shape[1]):\n cbf_data_ts[:, i] =np.multiply(cbf1[:, i],permfactor[i])\n cbf = np.zeros([cbf_data_ts.shape[0], int(cbf_data.shape[1]/len(perfusion_factor))])\n cbf_xx=np.split(cbf_data_ts,int(cbf_data_ts.shape[1]/len(perfusion_factor)),axis=1)\n \n # calculate weighted cbf with multiplds\n # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3791289/\n # https://pubmed.ncbi.nlm.nih.gov/22084006/\n for k in range(len(cbf_xx)):\n cbf_plds = cbf_xx[k]\n pldx = np.zeros([cbf_plds.shape[0],len(cbf_plds)])\n for j in range(cbf_plds.shape[1]):\n pldx[:,j] = np.array(np.multiply(cbf_plds[:,j],plds[j]))\n cbf[:, k] = np.divide(np.sum(pldx,axis=1),np.sum(plds))\n\n elif hasattr(perfusion_factor, '__len__') and len(cbf_data.shape) < 2 :\n cbf_ts = np.zeros(cbf.shape,len(perfusion_factor))\n for i in len(perfusion_factor):\n cbf_ts[:,i] = np.multiply(cbf1,perfusion_factor[i])\n cbf = np.divide(np.sum(cbf_ts,axis=1),np.sum(perfusion_factor))\n else:\n cbf = cbf1*np.array(perfusion_factor)\n # cbf is timeseries\n # return cbf to nifti shape\n if len(cbf.shape) < 2:\n tcbf=np.zeros(maskx.shape)\n tcbf[maskx==1]=cbf\n else:\n tcbf=np.zeros([maskx.shape[0],maskx.shape[1],maskx.shape[2],cbf.shape[1]])\n for i in range(cbf.shape[1]):\n tcbfx=np.zeros(maskx.shape) \n tcbfx[maskx==1]=cbf[:,i]\n tcbf[:,:,:,i]=tcbfx\n if len(tcbf.shape) < 4:\n meancbf = tcbf\n else:\n meancbf = np.nanmean(tcbf, axis=3)\n meancbf = np.nan_to_num(meancbf)\n tcbf = np.nan_to_num(tcbf)\n att = np.nan_to_num(att)\n return tcbf, meancbf, att\n\n# score and scrub\n\n\nclass _scorescrubCBFInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True,\n desc='computed CBF from computeCBF')\n in_greyM = File(exists=True, mandatory=True, desc='grey matter')\n in_whiteM = File(exists=True, mandatory=True, desc='white matter')\n in_mask = File(exists=True, mandatory=True, desc='mask')\n in_csf = File(exists=True, mandatory=True, desc='csf')\n in_thresh = traits.Float(default_value=0.7, exists=True,\n mandatory=False, desc='threshold of propbaility matter')\n in_wfun = traits.Str(exists=True, mandatory=False, default_value='huber',\n option=['bisquare', 'andrews', 'cauchy', 'fair', 'logistics',\n 'ols', 'talwar', 'welsch'],\n desc='wavelet fun ')\n out_score = File(exists=False, desc='score timeseries data')\n out_avgscore = File(exists=False, desc='average score')\n out_scrub = File(exists=False, desc='average scrub')\n out_scoreindex = File(exists=False, desc='index of volume remove or leave by score')\n\n\nclass _scorescrubCBFOutputSpec(TraitedSpec):\n out_score = File(exists=False, mandatory=False, desc='score timeseries data')\n out_avgscore = File(exists=False, mandatory=False, desc='average score')\n out_scrub = File(exists=False, mandatory=False, desc='average scrub')\n out_scoreindex = File(exists=False, mandatory=False, desc='index of volume remove ')\n\n\nclass scorescrubCBF(SimpleInterface):\n \"\"\"\n compute score and scrub\n \"\"\"\n input_spec = _scorescrubCBFInputSpec\n output_spec = _scorescrubCBFOutputSpec\n\n def _run_interface(self, runtime):\n cbf_ts = nb.load(self.inputs.in_file).get_fdata()\n mask = nb.load(self.inputs.in_mask).get_fdata()\n greym = nb.load(self.inputs.in_greyM).get_fdata()\n whitem = nb.load(self.inputs.in_whiteM).get_fdata()\n csf = nb.load(self.inputs.in_csf).get_fdata()\n if len(cbf_ts.shape) > 3:\n cbf_scorets, index_score = _getcbfscore(cbfts=cbf_ts, wm=whitem,\n gm=greym, csf=csf, mask=mask,\n thresh=self.inputs.in_thresh)\n cbfscrub = _scrubcbf(cbf_ts=cbf_scorets, gm=greym, wm=whitem, csf=csf,\n mask=mask, wfun=self.inputs.in_wfun, thresh=self.inputs.in_thresh)\n avgscore = np.mean(cbf_scorets, axis=3)\n else:\n cbf_scorets = cbf_ts\n index_score = np.array([0])\n cbfscrub = cbf_ts\n avgscore = cbf_ts\n \n self._results['out_score'] = fname_presuffix(self.inputs.in_file,\n suffix='_cbfscorets', newpath=runtime.cwd)\n self._results['out_avgscore'] = fname_presuffix(self.inputs.in_file,\n suffix='_meancbfscore',\n newpath=runtime.cwd)\n self._results['out_scrub'] = fname_presuffix(self.inputs.in_file,\n suffix='_cbfscrub', newpath=runtime.cwd)\n self._results['out_scoreindex'] = fname_presuffix(self.inputs.in_file,\n suffix='_scoreindex.txt',\n newpath=runtime.cwd, use_ext=False)\n samplecbf = nb.load(self.inputs.in_mask)\n\n nb.Nifti1Image(dataobj=cbf_scorets, affine=samplecbf.affine, header=samplecbf.header).to_filename(self._results['out_score'])\n nb.Nifti1Image(\n dataobj=avgscore, affine=samplecbf.affine, header=samplecbf.header).to_filename(\n self._results['out_avgscore'])\n nb.Nifti1Image(\n dataobj=cbfscrub, affine=samplecbf.affine, header=samplecbf.header).to_filename(\n self._results['out_scrub'])\n\n np.savetxt(self._results['out_scoreindex'],index_score, delimiter=',')\n\n self.inputs.out_score = os.path.abspath(self._results['out_score'])\n self.inputs.out_avgscore = os.path.abspath(self._results['out_avgscore'])\n self.inputs.out_scrub = os.path.abspath(self._results['out_scrub'])\n self.inputs.out_scoreindex = os.path.abspath(self._results['out_scoreindex'])\n return runtime\n\n\ndef _weightfun(x, wfun='huber'):\n \"\"\"\"\n get weight fun and tuner\n\n \"\"\"\n if wfun == 'andrews':\n tuner = 1.339\n weight = (np.abs(x) < np.pi)*np.sin(x)\n elif wfun == 'bisquare':\n tuner = 4.685\n weight = (np.abs(x) < 1)*np.power((1-np.power(x, 2)), 2)\n elif wfun == 'cauchy':\n tuner = 2.385\n weight = 1/(1+np.power(x, 2))\n elif wfun == 'logistic':\n tuner = 1.205\n weight == np.tanh(x)/x\n elif wfun == 'ols':\n tuner = 1\n weight = np.repeat(1, len(x))\n elif wfun == 'talwar':\n tuner = 2.795\n weight = 1*(np.abs(x) < 1)\n elif wfun == 'welsch':\n tuner = 2.985\n weight = np.exp(-(np.power(x, 2)))\n else:\n tuner = 1.345\n weight = 1/np.abs(x)\n return weight, tuner\n\n\ndef _tune(wfun='huber'):\n \"\"\"\"\n get weight fun and tuner\n\n \"\"\"\n if wfun == 'andrews':\n tuner = 1.339\n elif wfun == 'bisquare':\n tuner = 4.685\n elif wfun == 'cauchy':\n tuner = 2.385\n elif wfun == 'logistic':\n tuner = 1.205\n elif wfun == 'ols':\n tuner = 1\n elif wfun == 'talwar':\n tuner = 2.795\n elif wfun == 'welsch':\n tuner = 2.985\n else:\n tuner = 1.345\n return tuner\n\n\ndef _getchisquare(n):\n a = [0.000000, 15.484663, 8.886835, 7.224733, 5.901333, 5.126189, 4.683238, 4.272937, 4.079918,\n 3.731612, 3.515615, 3.459711, 3.280471, 3.078046, 3.037280, 2.990761, 2.837119, 2.795526,\n 2.785189, 2.649955, 2.637642, 2.532700, 2.505253, 2.469810, 2.496135, 2.342210, 2.384975,\n 2.275019, 2.244482, 2.249109, 2.271968, 2.210340, 2.179537, 2.133762, 2.174928, 2.150072,\n 2.142526, 2.071512, 2.091061, 2.039329, 2.053183, 2.066396, 1.998564, 1.993568, 1.991905,\n 1.981837, 1.950225, 1.938580, 1.937753, 1.882911, 1.892665, 1.960767, 1.915530, 1.847124,\n 1.947374, 1.872383, 1.852023, 1.861169, 1.843109, 1.823870, 1.809643, 1.815038, 1.848064,\n 1.791687, 1.768343, 1.778231, 1.779046, 1.759597, 1.774383, 1.774876, 1.751232, 1.755293,\n 1.757028, 1.751388, 1.739384, 1.716395, 1.730631, 1.718389, 1.693839, 1.696862, 1.691245,\n 1.682541, 1.702515, 1.700991, 1.674607, 1.669986, 1.688864, 1.653713, 1.641309, 1.648462,\n 1.630380, 1.634156, 1.660821, 1.625298, 1.643779, 1.631554, 1.643987, 1.624604, 1.606314,\n 1.609462]\n b = [0, 2.177715, 1.446966, 1.272340, 1.190646, 1.151953, 1.122953, 1.103451, 1.089395,\n 1.079783, 1.071751, 1.063096, 1.058524, 1.054137, 1.049783, 1.046265, 1.043192,\n 1.039536, 1.038500, 1.037296, 1.033765, 1.032317, 1.031334, 1.029551, 1.028829,\n 1.027734, 1.024896, 1.024860, 1.025207, 1.024154, 1.022032, 1.021962, 1.021514,\n 1.020388, 1.019238, 1.020381, 1.019068, 1.018729, 1.018395, 1.017134, 1.016539,\n 1.015676, 1.015641, 1.015398, 1.015481, 1.015566, 1.014620, 1.014342, 1.013901,\n 1.013867, 1.013838, 1.013602, 1.013322, 1.012083, 1.013168, 1.012667, 1.011087,\n 1.011959, 1.011670, 1.011494, 1.010463, 1.010269, 1.010393, 1.010004, 1.010775,\n 1.009399, 1.011000, 1.010364, 1.009831, 1.009563, 1.010085, 1.009149, 1.008444,\n 1.009455, 1.009705, 1.008597, 1.008644, 1.008051, 1.008085, 1.008550, 1.008265,\n 1.009141, 1.008235, 1.008002, 1.008007, 1.007660, 1.007993, 1.007184, 1.008093,\n 1.007816, 1.007770, 1.007932, 1.007819, 1.007063, 1.006712, 1.006752, 1.006703,\n 1.006650, 1.006743, 1.007087]\n return a[n-1], b[n-1]\n\n\ndef _getcbfscore(cbfts, wm, gm, csf, mask, thresh=0.7):\n \"\"\" \n score algorithm by Sudipto\n removing noisy cbf volume\n cbf_ts\n nd array of 3D or 4D computed cbf\n gm,wm,csf \n numpy array of grey matter, whitematter, and csf\n mask \n numpy array of mask \n\n reference:\n\n \"\"\"\n gm[gm < thresh] = 0\n gm[gm > 0] = 1\n wm[wm < thresh] = 0\n wm[wm > 0] = 1\n csf[csf < thresh] = 0\n csf[csf > 0] = 1\n # get the total number of voxle within csf,gm and wm\n nogm = np.sum(gm == 1) - 1\n nowm = np.sum(wm == 1) - 1\n nocf = np.sum(csf == 1) - 1\n mask1 = gm + wm + csf\n # msk=sum(mask>0)\n # mean of times series cbf within greymatter\n mgmts = np.squeeze(np.mean(cbfts[gm == 1, :], axis=0))\n # robiust mean and meadian\n from scipy.stats import median_absolute_deviation\n medmngm = np.median(mgmts)\n sdmngm = median_absolute_deviation(mgmts)/0.675\n indx = 1*(np.abs(mgmts-medmngm) > (2.5*sdmngm))\n R = np.mean(cbfts[:, :, :, indx == 0], axis=3)\n V = nogm*np.var(R[gm == 1]) + nowm*np.var(R[wm == 1]) + nocf*np.var(R[csf == 1])\n V1 = V+1\n while V < V1:\n V1 = V\n CC = np.zeros(cbfts.shape[3])*(-2)\n for s in range(cbfts.shape[3]):\n if indx[s] != 0:\n break\n else:\n tmp1 = cbfts[:, :, :, s]\n CC[s] = np.corrcoef(R[mask1 > 0], tmp1[mask1 > 0])[0][1]\n inx = np.argmax(CC)\n indx[inx] = 2\n R = np.mean(cbfts[:, :, :, indx == 0], axis=3)\n V = nogm*np.var(R[gm == 1]) + nowm*np.var(R[wm == 1]) + nocf*np.var(R[csf == 1])\n cbfts_recon = cbfts[:, :, :, indx == 0]\n cbfts_recon1 = np.zeros_like(cbfts_recon)\n for i in range(cbfts_recon.shape[3]):\n cbfts_recon1[:, :, :, i] = cbfts_recon[:, :, :, i]*mask\n cbfts_recon1 = np.nan_to_num(cbfts_recon1)\n return cbfts_recon1, indx\n\n\ndef _roubustfit(Y, mu, Globalprior, modrobprior, lmd=0, localprior=0, wfun='huber', tune=1.345,\n flagstd=1, flagmodrobust=1, flagprior=1, thresh=0.7):\n \"\"\"\n robust fit \n \"\"\"\n dimcbf = Y.shape\n priow = np.ones([dimcbf[0], dimcbf[1]])\n sw = 1\n X = priow\n b = (np.sum(X*Y, axis=0)+mu*Globalprior+lmd*localprior)/(np.sum(X*X, axis=0)+mu+lmd)\n b0 = np.repeat(0, len(b))\n h1 = X/np.power(np.tile(np.sqrt(np.sum(X*X, axis=0)), (dimcbf[0], 1)), 2)\n h0 = 0.9999*np.ones([dimcbf[0], dimcbf[1]])\n h = np.minimum(h0, h1)\n adjfactor = 1/(np.sqrt(1-h/priow))\n tiny_s = (1e-6)*(np.std(h, axis=0))\n tiny_s[tiny_s == 0] = 1\n D = np.sqrt(np.finfo(float).eps)\n iter = 0\n interlim = 10\n while iter < interlim:\n print('iteration ', iter, \"\\n\")\n iter = iter + 1\n check1 = np.subtract(np.abs(b-b0), (D*np.maximum(np.abs(b), np.abs(b0))))\n check1[check1 > 0] = 0\n if any(check1):\n print(' \\n converged after ', iter, \"iterations\\n\")\n break\n r = Y - X*(np.tile(b, (dimcbf[0], 1)))\n radj = r * adjfactor/sw\n if flagstd == 1:\n s = np.sqrt(np.mean(np.power(radj, 2), axis=0))\n else:\n rs = np.sort(np.abs(radj), axis=0)\n s = np.median(rs, axis=0)/0.6745\n rx1 = radj*(1-flagmodrobust*np.exp(-np.tile(modrobprior, (dimcbf[0], 1))))\n rx2 = np.tile(np.maximum(s, tiny_s)*tune, (dimcbf[0], 1))\n r1 = rx1 / rx2\n w, _ = _weightfun(r1, wfun)\n b0 = b\n z = np.sqrt(w)\n x = X*z\n yz = Y*z\n b = (np.sum(x*yz, axis=0)+mu*Globalprior+lmd*localprior)/(np.sum(x*x, axis=0)+mu+lmd)\n b = np.nan_to_num(b)\n return b\n\n\ndef _scrubcbf(cbf_ts, gm, wm, csf, mask, wfun='huber', thresh=0.7):\n \n \"\"\" \n scrub algorithms by Sudipto\n cbf_ts\n nd array of 3D or 4D computed cbf\n gm,wm,csf \n numpy array of grey matter, whitematter, and csf\n mask \n numpy array of mask \n \n wf \n wave function\n\n reference:\n\n \"\"\"\n\n gm = mask*gm\n wm = mask*wm\n csf = csf*mask\n gmidx = gm[mask == 1]\n gmidx[gmidx < thresh] = 0\n gmidx[gmidx > 0] = 1\n wmidx = wm[mask == 1]\n wmidx[wmidx < thresh] = 0\n wmidx[wmidx > 0] = 1\n csfidx = csf[mask == 1]\n csfidx[csfidx < thresh] = 0\n csfidx[csfidx > 0] = 1\n # midx = mask[mask==1]\n meancbf = np.mean(cbf_ts, axis=3)\n y = np.transpose(cbf_ts[mask == 1, :, ])\n VV = np.var(y, axis=0)\n thresh1, thresh3 = _getchisquare(y.shape[0])\n mu1 = VV/(np.median(VV[gmidx == 1])*thresh3)\n mu = ((mu1 > thresh1) & (mu1 < 10*thresh1))*(mu1-thresh1) \\\n + (mu1 >= 10*thresh1)*(1/(2*thresh1*10) * np.power(mu1, 2))+(thresh1*10/2 - thresh1)\n M = meancbf*mask\n M[mask == 1] = mu\n modrobprior = mu/10\n gmidx2 = 1*([gm.flatten() > thresh] and [M.flatten() == 0]\n and [wm.flatten() > csf.flatten()])[0]\n wmidx2 = 1*([wm.flatten() > thresh] and [M.flatten() == 0]\n and [gm.flatten() > csf.flatten()])[0]\n if np.sum(gmidx2) == 0 or np.sum(wmidx2) == 0:\n gmidx2 = 1*(gm.flatten() > thresh)\n wmidx2 = 1*(wm.flatten() > thresh)\n idxx = gmidx2 + wmidx2\n idxx[idxx > 0] = 1\n X = np.zeros([len(idxx), 2])\n X[:, 0] = gm.flatten()[gm.flatten() >= (0)]*idxx\n X[:, 1] = wm.flatten()[wm.flatten() >= (0)]*idxx\n A = (meancbf.flatten()[idxx >= 0])*idxx\n c = np.linalg.lstsq(X, A)[0]\n Globalpriorfull = c[0]*gm.flatten() + c[1]*wm.flatten()\n Globalprior = Globalpriorfull[mask.flatten() == 1]\n localprior = 0\n lmd = 0\n tune = _tune(wfun=wfun)\n bb = _roubustfit(Y=y, mu=mu, Globalprior=Globalprior, modrobprior=modrobprior,\n lmd=lmd, localprior=localprior, wfun=wfun, tune=tune, flagstd=1,\n flagmodrobust=1, flagprior=1, thresh=0.7)\n newcbf = meancbf*mask\n newcbf[mask == 1] = bb\n newcbf=np.nan_to_num(newcbf)\n return newcbf\n\n# basil and pvcorr\n\n\nclass _BASILCBFInputSpec(FSLCommandInputSpec):\n # We use position args here as list indices - so a negative number\n # will put something on the end\n in_file = File(\n exists=True,\n desc=\"input file cbf after substracting tag-control or control-tag\",\n argstr=\" -i %s\",\n position=0,\n mandatory=True,\n )\n mask = File(exists=True, argstr=\" -m %s \", desc=\"mask in the same space as in_infile\",\n mandatory=True,)\n mzero = File(exists=True, argstr=\" -c %s \", desc='m0 scan', mandatory=False)\n m0scale = traits.Float(desc='calibration of asl', argstr=\" --cgain %.2f \", mandatory=True)\n m0tr = traits.Float(desc='Mzero TR', argstr=\" --tr %.2f \", mandatory=True,)\n tis = traits.Str(desc='recovery time =plds+bolus', argstr=\" --tis %s \", mandatory=True,)\n pcasl = traits.Bool(desc='label type:defualt is PASL', argstr=\" --casl \",\n mandatory=False, default_value=False)\n bolus = traits.Float(desc='bolus or tau: label duration', argstr=\" --bolus %.2f \",\n mandatory=True)\n pvc = traits.Bool(desc='calibration of asl', mandatory=False, argstr=\" --pvcorr \",\n default_value=True)\n pvgm = File(exists=True, mandatory=False, desc='grey matter probablity matter ',\n argstr=\" --pvgm %s \")\n pvwm = File(exists=True, mandatory=False, desc='white matter probablity matter ',\n argstr=\" --pvwm %s \")\n out_basename = File(desc=\"base name of output files\", argstr=\" -o %s \", mandatory=True)\n out_cbfb = File(exists=False, desc='cbf with spatial correction')\n out_cbfpv = File(exists=False, desc='cbf with spatial correction')\n out_att = File(exists=False, desc='aretrial transist time')\n # environ=traits.Str('FSLOUTPUTTYPE': 'NIFTI_GZ'}\n\n\nclass _BASILCBFOutputSpec(TraitedSpec):\n out_cbfb = File(exists=False, desc='cbf with spatial correction')\n out_cbfpv = File(exists=False, desc='cbf with spatial correction')\n out_att = File(exists=False, desc='aretrial transist time')\n\n\nclass BASILCBF(FSLCommand):\n r\"\"\"\n oxford asl \n https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BASIL\n \"\"\"\n _cmd = \" oxford_asl \"\n input_spec = _BASILCBFInputSpec\n output_spec = _BASILCBFOutputSpec\n\n def _run_interface(self, runtime):\n runtime = super(BASILCBF, self)._run_interface(runtime)\n return runtime\n\n def _gen_outfilename(self, suffix):\n if isdefined(self.inputs.in_file):\n out_file = self._gen_fname(self.inputs.in_file, suffix=suffix)\n return os.path.abspath(out_file)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n # outputs[\"out_cbfb\"]=self.inputs.out_basename+'/basilcbf.nii.gz'\n outputs[\"out_cbfb\"] = fname_presuffix(self.inputs.mask, suffix='_cbfbasil')\n from shutil import copyfile\n copyfile(self.inputs.out_basename+'/native_space/perfusion_calib.nii.gz',\n outputs[\"out_cbfb\"])\n \n \n # outputs[\"out_att\"]=self.inputs.out_basename+'/arrivaltime.nii.gz'\n outputs[\"out_att\"] = fname_presuffix(self.inputs.mask, suffix='_arrivaltime')\n copyfile(self.inputs.out_basename+'/native_space/arrival.nii.gz', outputs[\"out_att\"])\n self.inputs.out_att = os.path.abspath(outputs[\"out_att\"])\n \n # outputs[\"out_cbfpv\"]=self.inputs.out_basename+'/basilcbfpv.nii.gz'\n outputs[\"out_cbfpv\"] = fname_presuffix(self.inputs.mask, suffix='_cbfbasilpv')\n copyfile(self.inputs.out_basename+'/native_space/pvcorr/perfusion_calib.nii.gz',\n outputs[\"out_cbfpv\"])\n self.inputs.out_cbfb = os.path.abspath(outputs[\"out_cbfb\"])\n self.inputs.out_cbfpv = os.path.abspath(outputs[\"out_cbfpv\"])\n return outputs\n\n\nclass _qccbfInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='original asl_file')\n in_meancbf = File(exists=True, mandatory=True, desc='cbf img')\n in_avgscore = File(exists=True, mandatory=False, desc='cbf img')\n in_scrub = File(exists=True, mandatory=False, desc='cbf img')\n in_basil = File(exists=True, mandatory=False, desc='cbf img')\n in_pvc = File(exists=True, mandatory=False, desc='cbf img')\n in_greyM = File(exists=True, mandatory=True, desc='grey matter')\n in_whiteM = File(exists=True, mandatory=True, desc='white matter')\n in_csf = File(exists=True, mandatory=True, desc='csf')\n in_confmat = File(exists=True, mandatory=False, desc=' cofnound matrix')\n in_aslmask = File(exists=True, mandatory=True, desc='asl mask in native space')\n in_t1mask = File(exists=True, mandatory=True, desc='t1wmask in native space ')\n in_aslmaskstd = File(exists=True, mandatory=False, desc='asl mask in native space')\n in_templatemask = File(exists=True, mandatory=False, desc='template mask or image')\n qc_file = File(exists=False, mandatory=False, desc='qc file ')\n\n\nclass _qccbfOutputSpec(TraitedSpec):\n qc_file = File(exists=False, desc='qc file ')\n\n\nclass qccbf(SimpleInterface):\n r\"\"\"\"\n compute qc from confound regressors \n and cbf maps, \n coregistration and regsitration indexes\n\n \"\"\"\n\n input_spec = _qccbfInputSpec\n output_spec = _qccbfOutputSpec\n\n def _run_interface(self, runtime):\n\n time1 = pd.read_csv(self.inputs.in_confmat, sep='\\t')\n time1.fillna(0, inplace=True)\n fd = np.mean(time1['framewise_displacement'])\n rms = time1[['rot_x', 'rot_y', 'rot_z']]\n rms1 = rms.pow(2)\n rms = np.mean(np.sqrt(rms1.sum(axis=1)/3))\n regDC = dc(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regJC = jc(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regCC = crosscorr(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regCov = coverage(self.inputs.in_aslmask, self.inputs.in_t1mask)\n\n if self.inputs.in_aslmaskstd and self.inputs.in_templatemask:\n normDC = dc(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normJC = jc(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normCC = crosscorr(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normCov = coverage(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n\n meancbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_meancbf, thresh=0.7)\n meancbf = globalcbf(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, cbf=self.inputs.in_meancbf, thresh=0.7)\n\n if self.inputs.in_avgscore:\n scorecbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_avgscore, thresh=0.7)\n scrub_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_scrub, thresh=0.7)\n negscore = negativevoxel(cbf=self.inputs.in_avgscore, gm=self.inputs.in_greyM, thresh=0.7)\n negscrub = negativevoxel(cbf=self.inputs.in_scrub, gm=self.inputs.in_greyM, thresh=0.7)\n else:\n scorecbf_qei = 0\n scrub_qei = 0 \n negscore = 0\n negscrub = 0\n\n if self.inputs.in_basil:\n basilcbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_basil, thresh=0.7)\n pvcbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_pvc, thresh=0.7)\n negbasil = negativevoxel(cbf=self.inputs.in_basil, gm=self.inputs.in_greyM, thresh=0.7)\n negpvc = negativevoxel(cbf=self.inputs.in_pvc, gm=self.inputs.in_greyM, thresh=0.7)\n else:\n basilcbf_qei = 0\n pvcbf_qei = 0 \n negbasil = 0\n negpvc = 0\n \n \n gwratio = np.divide(meancbf[0], meancbf[1])\n negcbf = negativevoxel(cbf=self.inputs.in_meancbf, gm=self.inputs.in_greyM, thresh=0.7)\n \n \n\n if self.inputs.in_aslmaskstd and self.inputs.in_templatemask:\n dict1 = {'FD': [fd], 'relRMS': [rms], 'coregDC': [regDC], 'coregJC': [regJC],\n 'coregCC': [regCC], 'coregCOV': [regCov], 'normDC': [normDC],\n 'normJC': [normJC], 'normCC': [normCC], 'normCOV': [normCov],\n 'cbfQEI': [meancbf_qei], 'scoreQEI': [scorecbf_qei], 'scrubQEI': [scrub_qei],\n 'basilQEI': [basilcbf_qei], 'pvcQEI': [pvcbf_qei], 'GMmeanCBF': [meancbf[0]],\n 'WMmeanCBF': [meancbf[1]], 'Gm_Wm_CBF_ratio': [gwratio],\n 'NEG_CBF_PERC': [negcbf], 'NEG_SCORE_PERC': [negscore],\n 'NEG_SCRUB_PERC': [negscrub], 'NEG_BASIL_PERC': [negbasil],\n 'NEG_PVC_PERC': [negpvc]}\n else:\n dict1 = {'FD': [fd], 'relRMS': [rms], 'coregDC': [regDC], 'coregJC': [regJC],\n 'coregCC': [regCC], 'coregCOV': [regCov],\n 'cbfQEI': [meancbf_qei], 'scoreQEI': [scorecbf_qei], 'scrubQEI': [scrub_qei],\n 'basilQEI': [basilcbf_qei], 'pvcQEI': [pvcbf_qei], 'GMmeanCBF': [meancbf[0]],\n 'WMmeanCBF': [meancbf[1]], 'Gm_Wm_CBF_ratio': [gwratio],\n 'NEG_CBF_PERC': [negcbf], 'NEG_SCORE_PERC': [negscore],\n 'NEG_SCRUB_PERC': [negscrub], 'NEG_BASIL_PERC': [negbasil],\n 'NEG_PVC_PERC': [negpvc]}\n _, file1 = os.path.split(self.inputs.in_file)\n bb = file1.split('_')\n dict2 = {}\n for i in range(len(bb)-1):\n dict2.update({bb[i].split('-')[0]: bb[i].split('-')[1]})\n dict2.update(dict1)\n\n df = pd.DataFrame(dict2)\n\n self._results['qc_file'] = fname_presuffix(self.inputs.in_meancbf, suffix='qc_cbf.csv',\n newpath=runtime.cwd, use_ext=False)\n df.to_csv(self._results['qc_file'], index=False, header=True)\n\n self.inputs.qc_file = os.path.abspath(self._results['qc_file'])\n return runtime\n \n\nclass _qccbfgeInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc='original asl_file')\n in_meancbf = File(exists=True, mandatory=True, desc='cbf img')\n in_avgscore = File(exists=True, mandatory=False, desc='cbf img')\n in_scrub = File(exists=True, mandatory=False, desc='cbf img')\n in_basil = File(exists=True, mandatory=False, desc='cbf img')\n in_pvc = File(exists=True, mandatory=False, desc='cbf img')\n in_greyM = File(exists=True, mandatory=True, desc='grey matter')\n in_whiteM = File(exists=True, mandatory=True, desc='white matter')\n in_csf = File(exists=True, mandatory=True, desc='csf')\n in_aslmask = File(exists=True, mandatory=True, desc='asl mask in native space')\n in_t1mask = File(exists=True, mandatory=True, desc='t1wmask in native space ')\n in_aslmaskstd = File(exists=True, mandatory=False, desc='asl mask in native space')\n in_templatemask = File(exists=True, mandatory=False, desc='template mask or image')\n qc_file = File(exists=False, mandatory=False, desc='qc file ')\n\n\nclass _qccbfgeOutputSpec(TraitedSpec):\n qc_file = File(exists=False, desc='qc file ')\n\n\nclass qccbfge(SimpleInterface):\n r\"\"\"\"\n compute qc from confound regressors \n and cbf maps, \n coregistration and regsitration indexes\n\n \"\"\"\n\n input_spec = _qccbfInputSpec\n output_spec = _qccbfOutputSpec\n\n def _run_interface(self, runtime):\n regDC = dc(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regJC = jc(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regCC = crosscorr(self.inputs.in_aslmask, self.inputs.in_t1mask)\n regCov = coverage(self.inputs.in_aslmask, self.inputs.in_t1mask)\n\n if self.inputs.in_aslmaskstd and self.inputs.in_templatemask:\n normDC = dc(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normJC = jc(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normCC = crosscorr(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n normCov = coverage(self.inputs.in_aslmaskstd, self.inputs.in_templatemask)\n\n meancbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_meancbf, thresh=0.8)\n meancbf = globalcbf(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, cbf=self.inputs.in_meancbf, thresh=0.8)\n \n\n if self.inputs.in_avgscore:\n scorecbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_avgscore, thresh=0.8)\n scrub_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_scrub, thresh=0.8)\n negscore = negativevoxel(cbf=self.inputs.in_avgscore, gm=self.inputs.in_greyM, thresh=0.8)\n negscrub = negativevoxel(cbf=self.inputs.in_scrub, gm=self.inputs.in_greyM, thresh=0.8)\n else:\n scorecbf_qei = 0\n scrub_qei = 0 \n negscore = 0\n negscrub = 0\n\n if self.inputs.in_basil:\n basilcbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_basil, thresh=0.8)\n pvcbf_qei = cbf_qei(gm=self.inputs.in_greyM, wm=self.inputs.in_whiteM,\n csf=self.inputs.in_csf, img=self.inputs.in_pvc, thresh=0.8)\n negbasil = negativevoxel(cbf=self.inputs.in_basil, gm=self.inputs.in_greyM, thresh=0.8)\n negpvc = negativevoxel(cbf=self.inputs.in_pvc, gm=self.inputs.in_greyM, thresh=0.8)\n else:\n basilcbf_qei = 0\n pvcbf_qei = 0 \n negbasil = 0\n negpvc = 0\n gwratio = np.divide(meancbf[0], meancbf[1])\n negcbf = negativevoxel(cbf=self.inputs.in_meancbf, gm=self.inputs.in_greyM, thresh=0.8)\n\n if self.inputs.in_aslmaskstd and self.inputs.in_templatemask:\n dict1 = {'FD': 0, 'relRMS': 0, 'coregDC': [regDC], 'coregJC': [regJC],\n 'coregCC': [regCC], 'coregCOV': [regCov], 'normDC': [normDC],\n 'normJC': [normJC], 'normCC': [normCC], 'normCOV': [normCov],\n 'cbfQEI': [meancbf_qei], 'scoreQEI': [scorecbf_qei], 'scrubQEI': [scrub_qei],\n 'basilQEI': [basilcbf_qei], 'pvcQEI': [pvcbf_qei], 'GMmeanCBF': [meancbf[0]],\n 'WMmeanCBF': [meancbf[1]], 'Gm_Wm_CBF_ratio': [gwratio],\n 'NEG_CBF_PERC': [negcbf], 'NEG_SCORE_PERC': [negscore],\n 'NEG_SCRUB_PERC': [negscrub], 'NEG_BASIL_PERC': [negbasil],\n 'NEG_PVC_PERC': [negpvc]}\n else:\n dict1 = {'FD': 0, 'relRMS': 0, 'coregDC': [regDC], 'coregJC': [regJC],\n 'coregCC': [regCC], 'coregCOV': [regCov],\n 'cbfQEI': [meancbf_qei], 'scoreQEI': [scorecbf_qei], 'scrubQEI': [scrub_qei],\n 'basilQEI': [basilcbf_qei], 'pvcQEI': [pvcbf_qei], 'GMmeanCBF': [meancbf[0]],\n 'WMmeanCBF': [meancbf[1]], 'Gm_Wm_CBF_ratio': [gwratio],\n 'NEG_CBF_PERC': [negcbf], 'NEG_SCORE_PERC': [negscore],\n 'NEG_SCRUB_PERC': [negscrub], 'NEG_BASIL_PERC': [negbasil],\n 'NEG_PVC_PERC': [negpvc]}\n _, file1 = os.path.split(self.inputs.in_file)\n bb = file1.split('_')\n dict2 = {}\n for i in range(len(bb)-1):\n dict2.update({bb[i].split('-')[0]: bb[i].split('-')[1]})\n dict2.update(dict1)\n\n df = pd.DataFrame(dict2)\n\n self._results['qc_file'] = fname_presuffix(self.inputs.in_meancbf, suffix='qc_cbf.csv',\n newpath=runtime.cwd, use_ext=False)\n df.to_csv(self._results['qc_file'], index=False, header=True)\n\n self.inputs.qc_file = os.path.abspath(self._results['qc_file'])\n return runtime\n\ndef dc(input1, input2):\n r\"\"\"\n Dice coefficient\n Computes the Dice coefficient (also known as Sorensen index) between the binary\n objects in two images.\n The metric is defined as\n .. math::\n DC=\\frac{2|A\\cap B|}{|A|+|B|}\n , where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).\n Parameters\n ----------\n input1 : array_like\n Input data containing objects. Can be any type but will be converted\n into binary: background where 0, object everywhere else.\n input2 : array_like\n Input data containing objects. Can be any type but will be converted\n into binary: background where 0, object everywhere else.\n Returns\n -------\n dc : float\n The Dice coefficient between the object(s) in ```input1``` and the\n object(s) in ```input2```. It ranges from 0 (no overlap) to 1 (perfect overlap).\n Notes\n -----\n This is a real metric.\n \"\"\"\n input1 = nb.load(input1).get_fdata()\n input2 = nb.load(input2).get_fdata()\n input1 = np.atleast_1d(input1.astype(np.bool))\n input2 = np.atleast_1d(input2.astype(np.bool))\n\n intersection = np.count_nonzero(input1 & input2)\n\n size_i1 = np.count_nonzero(input1)\n size_i2 = np.count_nonzero(input2)\n\n try:\n dc = 2. * intersection / float(size_i1 + size_i2)\n except ZeroDivisionError:\n dc = 0.0\n\n return dc\n\n\ndef jc(input1, input2):\n r\"\"\"\n Jaccard coefficient\n Computes the Jaccard coefficient between the binary objects in two images.\n Parameters\n ----------\n input1: array_like\n Input data containing objects. Can be any type but will be converted\n into binary: background where 0, object everywhere else.\n input2: array_like\n Input data containing objects. Can be any type but will be converted\n into binary: background where 0, object everywhere else.\n Returns\n -------\n jc: float\n The Jaccard coefficient between the object(s) in `input1` and the\n object(s) in `input2`. It ranges from 0 (no overlap) to 1 (perfect overlap).\n Notes\n -----\n This is a real metric.\n \"\"\"\n input1 = nb.load(input1).get_fdata()\n input2 = nb.load(input2).get_fdata()\n input1 = np.atleast_1d(input1.astype(np.bool))\n input2 = np.atleast_1d(input2.astype(np.bool))\n\n intersection = np.count_nonzero(input1 & input2)\n union = np.count_nonzero(input1 | input2)\n\n jc = float(intersection) / float(union)\n\n return jc\n\n\ndef crosscorr(input1, input2):\n r\"\"\"\n cross correlation\n computer compute cross correction bewteen input mask\n \"\"\"\n input1 = nb.load(input1).get_fdata()\n input2 = nb.load(input2).get_fdata()\n input1 = np.atleast_1d(input1.astype(np.bool)).flatten()\n input2 = np.atleast_1d(input2.astype(np.bool)).flatten()\n cc = np.corrcoef(input1, input2)[0][1]\n return cc\n\n\ndef coverage(input1, input2):\n \"\"\"\n estimate the coverage between two mask\n \"\"\"\n input1 = nb.load(input1).get_fdata()\n input2 = nb.load(input2).get_fdata()\n input1 = np.atleast_1d(input1.astype(np.bool))\n input2 = np.atleast_1d(input2.astype(np.bool))\n intsec = np.count_nonzero(input1 & input2)\n if np.sum(input1) > np.sum(input2):\n smallv = np.sum(input2)\n else:\n smallv = np.sum(input1)\n cov = float(intsec)/float(smallv)\n return cov\n\n\ndef globalcbf(cbf, gm, wm, csf, thresh=0.7):\n cbf = nb.load(cbf).get_fdata()\n gm = nb.load(gm).get_fdata()\n wm = nb.load(wm).get_fdata()\n csf = nb.load(csf).get_fdata()\n b1 = [gm < thresh]\n gm[b1] = 0\n bx = cbf[gm > 0]\n b2 = [wm < thresh]\n wm[b2] = 0\n by = cbf[wm > 0]\n b3 = [csf < thresh]\n csf[b3] = 0\n bz = cbf[csf > 0]\n return np.mean(bx), np.mean(by), np.mean(bz)\n\n\ndef cbf_qei(gm, wm, csf, img, thresh=0.8):\n \"\"\"\n Quality evaluation index of CBF base on Sudipto Dolui work \n Dolui S., Wolf R. & Nabavizadeh S., David W., Detre, J. (2017). \n Automated Quality Evaluation Index for 2D ASL CBF Maps. ISMR 2017\n\n \"\"\"\n def fun1(x, xdata):\n d1 = np.exp(-(x[0])*np.power(xdata, x[1]))\n return(d1)\n\n def fun2(x, xdata):\n d1 = 1-np.exp(-(x[0])*np.power(xdata, x[1]))\n return(d1)\n\n x1 = [0.054, 0.9272]\n x2 = [2.8478, 0.5196]\n x4 = [3.0126, 2.4419]\n scbf = smooth_image(nb.load(img), fwhm=5).get_fdata()\n if len(scbf.shape) > 3:\n scbf = scbf[:, :, :, 0]\n # load prob maps\n gmm = nb.load(gm).get_fdata()\n wmm = nb.load(wm).get_fdata()\n ccf = nb.load(csf).get_fdata()\n if len(gmm.shape) > 3:\n gmm = gmm[:, :, :, 0]\n wmm = wmm[:, :, :, 0]\n ccf = ccf[:, :, :, 0]\n pbcf = 2.5*gmm+wmm # gmm is 2.5 times wm\n msk = np.array((scbf != 0) & (scbf != np.nan) & (pbcf != np.nan)).astype(int)\n\n gm1 = np.array(gmm > thresh)\n wm1 = np.array(wmm > thresh)\n cc1 = np.array(ccf > thresh)\n r1 = np.array([0, np.corrcoef(scbf[msk == 1], pbcf[msk == 1])[1, 0]]).max()\n\n V = ((np.sum(gm1)-1)*np.var(scbf[gm1 > 0])+(np.sum(wm1)-1)*np.var(scbf[wm1 > 0])\n + (np.sum(cc1)-1) * np.var(scbf[cc1 > 0]))/(np.sum(gm1 > 0)+np.sum(wm1 > 0)\n + np.sum(cc1 > 0)-3)\n\n negGM = np.sum(scbf[gm1] < 0)/(np.sum(gm1))\n GMCBF = np.mean(scbf[gm1])\n CV = V/np.abs(GMCBF)\n Q = [fun1(x1, CV), fun1(x2, negGM), fun2(x4, r1)]\n return gmean(Q)\n\n\ndef negativevoxel(cbf, gm, thresh=0.7):\n \"\"\"\n percentage of negative voxel within\n grey matter voxel\n \"\"\"\n gm = nb.load(gm).get_fdata()\n cbf = nb.load(cbf).get_fdata()\n gm1 = np.array(gm > thresh)\n gm1[gm1 > 0] = 1\n npgm = np.sum(gm1)\n cbfgm = np.array(cbf < 0)\n cbfgm[cbfgm < 0] = 1\n ncbfgm = np.sum(np.multiply(cbfgm, gm1))\n pernegcbf = np.multiply(np.divide(ncbfgm, npgm), 100)\n return pernegcbf\n\n\ndef get_atlas(atlasname):\n if atlasname == 'HarvardOxford':\n atlasfile = pkgrf('aslprep', 'data/atlas/HarvardOxford/HarvardOxfordMNI.nii.gz')\n atlasdata = pkgrf('aslprep', 'data/atlas/HarvardOxford/HarvardOxfordNodeNames.txt')\n atlaslabel = pkgrf('aslprep', 'data/atlas/HarvardOxford/HarvardOxfordNodeIndex.1D')\n elif atlasname == 'schaefer200x7':\n atlasfile = pkgrf('aslprep', 'data/atlas/schaefer200x7/schaefer200x7MNI.nii.gz')\n atlasdata = pkgrf('aslprep', 'data/atlas/schaefer200x7/schaefer200x7NodeNames.txt')\n atlaslabel = pkgrf('aslprep', 'data/atlas/schaefer200x7/schaefer200x7NodeIndex.1D')\n elif atlasname == 'schaefer200x17':\n atlasfile = pkgrf('aslprep', 'data/atlas/schaefer200x17/schaefer200x17MNI.nii.gz')\n atlasdata = pkgrf('aslprep', 'data/atlas/schaefer200x17/schaefer200x17NodeNames.txt')\n atlaslabel = pkgrf('aslprep', 'data/atlas/schaefer200x17/schaefer200x17NodeIndex.1D')\n elif atlasname == 'schaefer400x7':\n atlasfile = pkgrf('aslprep', 'data/atlas/schaefer400x7/schaefer400x7MNI.nii.gz')\n atlasdata = pkgrf('aslprep', 'data/atlas/schaefer400x7/schaefer400x7NodeNames.txt')\n atlaslabel = pkgrf('aslprep', 'data/atlas/schaefer200x17/schaefer200x17NodeIndex.1D')\n elif atlasname == 'schaefer400x17':\n atlasfile = pkgrf('aslprep', 'data/atlas/schaefer400x17/schaefer400x17MNI.nii.gz')\n atlasdata = pkgrf('aslprep', 'data/atlas/schaefer400x17/schaefer400x17NodeNames.txt')\n atlaslabel = pkgrf('aslprep', 'data/atlas/schaefer400x17/schaefer400x17NodeIndex.1D')\n else:\n raise RuntimeError('atlas not available')\n return atlasfile, atlasdata, atlaslabel\n\n\ndef cbfroiquant(roi_file, roi_label, cbfmap):\n data = nb.load(cbfmap).get_data()\n roi = nb.load(roi_file).get_data()\n roi_labels = np.loadtxt(roi_label)\n if (data.shape != roi.shape):\n raise ValueError(\"Image-shapes do not match\")\n # if roi_labels is None:\n # roi_labels = np.unique(roi)\n mean_vals = []\n for roi_label in roi_labels:\n mean_vals.append(np.mean(data[roi == roi_label]))\n return mean_vals\n\n\nclass _cbfroiquantInputSpec(BaseInterfaceInputSpec):\n in_cbf = File(exists=True, mandatory=True, desc='cbf img')\n atlasfile = File(exists=True, mandatory=True, desc='data')\n atlasdata = File(exists=True, mandatory=True, desc='data')\n atlaslabel = File(exists=True, mandatory=True, desc='data')\n atlascsv = File(exists=False, mandatory=False, desc='harvard output csv')\n\n\nclass _cbfroiquantOutputSpec(TraitedSpec):\n atlascsv = File(exists=False, desc='harvard output csv')\n\n\nclass cbfqroiquant(SimpleInterface):\n input_spec = _cbfroiquantInputSpec\n output_spec = _cbfroiquantOutputSpec\n\n def _run_interface(self, runtime):\n\n self._results['atlascsv'] = fname_presuffix(self.inputs.in_cbf, suffix='atlas.csv',\n newpath=runtime.cwd, use_ext=False)\n roiquant = cbfroiquant(roi_label=self.inputs.atlaslabel, roi_file=self.inputs.atlasfile,\n cbfmap=self.inputs.in_cbf)\n data1 = pd.read_table(self.inputs.atlasdata, header=None, index_col=None, sep='\\t')\n bb = list(data1.values.tolist())\n flattened = [val for sublist in bb for val in sublist]\n datat = pd.DataFrame([flattened, roiquant])\n datat.to_csv(self._results['atlascsv'], header=None, index=None)\n return runtime\n\n\n\n\nclass _extractCBInputSpec(BaseInterfaceInputSpec):\n in_asl = File(exists=True, mandatory=True, desc='raw asl file')\n in_aslmask = File(exists=True, mandatory=True, desct='asl mask')\n file_type = traits.Str(desc='file type, c for cbf, d for deltam',mandatory=True)\n out_file = File(exists=False, mandatory=False, desc='cbf or deltam')\n\n\nclass _extractCBOutputSpec(TraitedSpec):\n out_file= File(exists=False, desc='cbf or deltam')\n \n\n\nclass extractCB(SimpleInterface):\n r\"\"\"\n the code refine the asl mask with t1w mask\n the output is refined asl mask\n\n \"\"\"\n input_spec = _extractCBInputSpec\n output_spec = _extractCBOutputSpec\n\n def _run_interface(self, runtime):\n self._results['out_file'] = fname_presuffix(self.inputs.in_aslmask,\n suffix='_cbfdeltam', newpath=runtime.cwd)\n filex=self.inputs.in_asl\n aslcontext = pd.read_csv(filex.replace('_asl.nii.gz', '_aslcontext.tsv'))\n idasl = aslcontext['volume_type'].tolist()\n fdata=nb.load(filex).get_fdata()\n img=nb.load(filex)\n\n controllist = [i for i in range(0, len(idasl)) if idasl[i] == 'control']\n labelist = [i for i in range(0, len(idasl)) if idasl[i] == 'label']\n \n\n if self.inputs.file_type == 'd':\n if len(controllist) > 0 :\n ffdata=fdata[:, :, :, controllist]-fdata[:, :, :, labelist]\n newdata = nb.Nifti1Image(dataobj=ffdata,affine=img.affine,header=img.header)\n else:\n dlist = [i for i in range(0, len(idasl)) if idasl[i] == 'deltam']\n if len(fdata.shape) < 4:\n newdata = nb.Nifti1Image(dataobj=fdata,affine=img.affine,header=img.header)\n else:\n ffdata=fdata[:, :, :, dlist]\n newdata = nb.Nifti1Image(dataobj=ffdata,affine=img.affine,header=img.header)\n elif self.inputs.file_type == 'c':\n dlist = [i for i in range(0, len(idasl)) if idasl[i] == 'CBF']\n if len(fdata.shape) < 4:\n newdata = nb.Nifti1Image(dataobj=fdata,affine=img.affine,header=img.header)\n else:\n ffdata=fdata[:, :, :, dlist]\n newdata = nb.Nifti1Image(dataobj=ffdata,affine=img.affine,header=img.header)\n \n newdata.to_filename(self._results['out_file'])\n\n return runtime\n\n\ndef regmotoasl(asl,m0file,m02asl):\n from nipype.interfaces import fsl\n meanasl = fsl.MeanImage(); meanasl.inputs.in_file = asl\n meanasl.inputs.out_file = fname_presuffix(asl,suffix='_meanasl')\n meanasl.run()\n meanm0 = fsl.MeanImage(); meanm0.inputs.in_file = m0file\n meanm0.inputs.out_file = fname_presuffix(asl,suffix='_meanm0')\n meanm0.run()\n flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')\n flt.inputs.in_file = meanm0.inputs.out_file \n flt.inputs.reference = meanasl.inputs.out_file\n flt.inputs.out_file = m02asl\n flt.run()\n return m02asl \n\n\ndef readjson(jsonfile):\n import json\n with open(jsonfile) as f:\n data = json.load(f)\n return data\n\ndef refine_ref_mask(t1w_mask,ref_asl_mask,\n t12ref_transform,tmp_mask,\n refined_mask):\n\n b1 = ApplyTransforms(); b1.inputs.dimension = 3\n b1.inputs.float = True; b1.inputs.input_image = t1w_mask\n b1.inputs.interpolation = 'NearestNeighbor'; b1.inputs.reference_image = ref_asl_mask\n b1.inputs.transforms = t12ref_transform; b1.inputs.input_image_type = 3\n b1.inputs.output_image = tmp_mask; b1.run()\n \n mat1 = MultiImageMaths(); mat1.inputs.in_file = tmp_mask\n mat1.inputs.op_string = \" -mul %s -bin\"; mat1.inputs.operand_files = ref_asl_mask\n mat1.inputs.out_file = refined_mask; mat1.run()\n \n return refined_mask\n\n\n\n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.mean",
"numpy.zeros_like",
"numpy.nanmean",
"numpy.var",
"numpy.exp",
"numpy.divide",
"pandas.read_csv",
"numpy.subtract",
"numpy.sin",
"numpy.finfo",
"numpy.std",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.zeros",
"scipy.stats.gmean",
"numpy.multiply",
"numpy.power",
"numpy.median",
"numpy.linalg.lstsq",
"pandas.read_table",
"numpy.transpose",
"numpy.savetxt",
"numpy.corrcoef",
"numpy.array",
"numpy.tanh",
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.tile",
"scipy.stats.median_absolute_deviation",
"numpy.ones",
"numpy.add",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"1.6",
"1.4",
"1.5",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
GuptaVishu2002/IRNET | [
"a430d17df3ececfe6cfd8ab469fff070e1c262e7"
] | [
"IRNET17_NEW.py"
] | [
"# Larger CNN for the MNIST Dataset\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.layers import add\nfrom collections import Counter\nfrom tensorflow.keras.layers import Input\nimport re, os, csv, math, operator\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\n#Contains 86 elements (Without Noble elements as it does not forms compounds in normal condition)\nelements = ['H','Li','Be', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl',\n 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe','Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge',\n 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd',\n 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd',\n 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er','Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', \n 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu']\n\n# import training data \ndef load_data(csvname):\n # load in data\n data = np.asarray(pd.read_csv(csvname))\n\n # import data and reshape appropriately\n X = data[:,0:-1]\n y = data[:,-1]\n y.shape = (len(y),1)\n \n return X,y\n\ndef convert(lst): \n res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)} \n return res_dct \n\nseparate = re.compile('[A-Z][a-z]?|\\d+\\.\\d')\n\ndef correction(x_train):\n new_x = []\n for i in range (0,x_train.shape[0]):\n new_x.append(separate.findall(x_train[i][0])) \n new_x = np.asarray(new_x)\n new_x.shape = (len(new_x),1) \n dict_x = convert(new_x[0][0])\n input_x = []\n for i in range (0,new_x.shape[0]):\n input_x.append(convert(new_x[i][0]))\n \n in_elements = np.zeros(shape=(len(input_x), len(elements)))\n comp_no = 0\n\n for compound in input_x:\n keys = compound.keys()\n for key in keys:\n in_elements[comp_no][elements.index(key)] = compound[key]\n comp_no+=1 \n\n data = in_elements \n \n return data\n\n# load data\nx_train, y_train = load_data('dataset/train_set.csv')\nx_test, y_test = load_data('dataset/test_set.csv')\n\nnew_x_train = correction(x_train)\nnew_x_test = correction(x_test)\n\nnew_y_train = y_train\nnew_y_test = y_test\n\nnew_y_train.shape = (len(new_y_train),)\nnew_y_test.shape = (len(new_y_test),)\n\nbatch_size1 = new_x_train.shape[0]\nnum_input1 = new_x_train.shape[1]\n\n#in_chem = Input(shape=(num_input,))\n\n# create model\n\nin_layer = Input(shape=(86,))\n\nlayer_1 = Dense(1024)(in_layer)\nlayer_1 = BatchNormalization()(layer_1)\nlayer_1 = Activation('relu')(layer_1)\n\n#gsk_1 = concatenate([in_layer, layer_1])\n\nlayer_2 = Dense(1024)(layer_1)\nlayer_2 = BatchNormalization()(layer_2)\nlayer_2 = Activation('relu')(layer_2)\n\ngsk_2 = add([layer_1, layer_2])\n\nlayer_3 = Dense(1024)(gsk_2)\nlayer_3 = BatchNormalization()(layer_3)\nlayer_3 = Activation('relu')(layer_3)\n\ngsk_3 = add([gsk_2, layer_3])\n\nlayer_4 = Dense(1024)(gsk_3)\nlayer_4 = BatchNormalization()(layer_4)\nlayer_4 = Activation('relu')(layer_4)\n\ngsk_4 = add([gsk_3, layer_4])\n\nlayer_5 = Dense(512)(gsk_4)\nlayer_5 = BatchNormalization()(layer_5)\nlayer_5 = Activation('relu')(layer_5)\n\n#gsk_5 = concatenate([gsk_4, layer_5])\n\nlayer_6 = Dense(512)(layer_5)\nlayer_6 = BatchNormalization()(layer_6)\nlayer_6 = Activation('relu')(layer_6)\n\ngsk_6 = add([layer_5, layer_6])\n\nlayer_7 = Dense(512)(gsk_6)\nlayer_7 = BatchNormalization()(layer_7)\nlayer_7 = Activation('relu')(layer_7)\n\ngsk_7 = add([gsk_6, layer_7])\n\nlayer_8 = Dense(256)(gsk_7)\nlayer_8 = BatchNormalization()(layer_8)\nlayer_8 = Activation('relu')(layer_8)\n\n#gsk_8 = concatenate([gsk_7, layer_8])\n\nlayer_9 = Dense(256)(layer_8)\nlayer_9 = BatchNormalization()(layer_9)\nlayer_9 = Activation('relu')(layer_9)\n\ngsk_9 = add([layer_8, layer_9])\n\nlayer_10 = Dense(256)(gsk_9)\nlayer_10 = BatchNormalization()(layer_10)\nlayer_10 = Activation('relu')(layer_10)\n\ngsk_10 = add([gsk_9, layer_10])\n\nlayer_11 = Dense(128)(gsk_10)\nlayer_11 = BatchNormalization()(layer_11)\nlayer_11 = Activation('relu')(layer_11)\n\n#gsk_11 = concatenate([gsk_10, layer_11])\n\nlayer_12 = Dense(128)(layer_11)\nlayer_12 = BatchNormalization()(layer_12)\nlayer_12 = Activation('relu')(layer_12)\n\ngsk_12 = add([layer_11, layer_12])\n\nlayer_13 = Dense(128)(gsk_12)\nlayer_13 = BatchNormalization()(layer_13)\nlayer_13 = Activation('relu')(layer_13)\n\ngsk_13 = concatenate([gsk_12, layer_13])\n\nlayer_14 = Dense(64)(gsk_13)\nlayer_14 = BatchNormalization()(layer_14)\nlayer_14 = Activation('relu')(layer_14)\n\n#gsk_14 = concatenate([gsk_13, layer_14])\n\nlayer_15 = Dense(64)(layer_14)\nlayer_15 = BatchNormalization()(layer_15)\nlayer_15 = Activation('relu')(layer_15)\n\n#gsk_15 = concatenate([gsk_14, layer_15])\n\nlayer_16 = Dense(32)(layer_15)\nlayer_16 = BatchNormalization()(layer_16)\nlayer_16 = Activation('relu')(layer_16)\n\n#gsk_16 = concatenate([gsk_15, layer_16])\n\nout_layer = Dense(1)(layer_16)\n\nmodel = Model(inputs=in_layer, outputs=out_layer)\n\n# Compile model\nadam = optimizers.Adam(lr=0.0001)\nmodel.compile(loss=tf.keras.losses.mean_absolute_error, optimizer=adam, metrics=['mean_absolute_error'])\n\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)\n# Fit the model\nmodel.fit(new_x_train, new_y_train,verbose=2, validation_data=(new_x_test, new_y_test), epochs=1000, batch_size=32, callbacks=[es])\ny_predict = model.predict(new_x_test)\nf = open( 'resultIR17.txt', 'w' )\nf.write(y_predict)\nf.close()\nmodel.save_weights(\"modelIR17.h5\")"
] | [
[
"pandas.read_csv",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"numpy.asarray",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.add",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
rlnsanz/inspectional-rara-parakeet | [
"2c7919ed432616ec016a5afcd6718d16fa65e8af"
] | [
"examples/housing/housing_price_sliced.py"
] | [
"import gadget as ln\n\nwith ln.tracking(\"housing_price\"):\n\n import dbfread\n import pandas as pd\n\n from sklearn.ensemble import RandomForestClassifier\n\n ln.importing(\n dbfread, module=\"dbfread\", name=\"dbfread\", line_no=2\n ) ###############################\n ln.importing(\n pd, module=\"pandas\", name=\"pd\", line_no=3\n ) ##########################################\n\n ln.importing(\n RandomForestClassifier,\n module=\"sklearn.ensemble.RandomForestClassifier\",\n name=\"RandomForestClassifier\",\n line_no=5,\n ) ##############################\n\n dbf = ln.call(\n dbfread.DBF(\n \"../property_example/L3_SHP_M114_Greenfield/M114Assess_CY20_FY20.dbf\"\n ),\n text='dbfread.DBF(\"./L3_SHP_M114_Greenfield/M114Assess_CY20_FY20.dbf\")',\n line_no=8,\n ).assign(\n target=\"dbf\"\n ) ################################################################################################################\n dbf = ln.call(iter(dbf), text=\"iter(dbf)\", line_no=9).assign(\n target=\"dbf\"\n ) #####################################################\n assets = ln.call(pd.DataFrame(dbf), text=\"pd.DataFrame(dbf)\", line_no=10).assign(\n target=\"assets\"\n ) ##############################\n\n \"\"\"\n Just make sure that it is not ambiguous which of the possibly many functions is running\n assuming there could be name colisions.\n \"\"\"\n\n def get_threshold(): ###############################################################################################\n with ln.func(name=\"get_threshold\", ret_text=\"1970\", line_no=12):\n return 1970\n\n ln.assign(get_threshold, \"get_threshold\", \"get_threshold\", line_no=21)\n\n def is_new(\n col,\n ): ###################################################################################################\n with ln.func(name=\"is_new\", ret_text=\"col > get_threshold()\", line_no=13):\n return col > ln.call(get_threshold(), text=\"get_threshold()\", line_no=14)\n\n ln.assign(is_new, \"is_new\", \"is_new\", line_no=22)\n\n assets[\"is_new\"] = ln.call(\n is_new(assets[\"YEAR_BUILT\"]), text=\"is_new(assets['YEAR_BUILT'])\", line_no=15\n ).assign(\n target=\"assets\"\n ) ###############################################################\n\n clf = ln.call(\n RandomForestClassifier(random_state=0),\n text=\"RandomForestClassifier(random_state=0)\",\n line_no=16,\n ).assign(\n target=\"clf\"\n ) ###############################################################################\n # assign(e, target, text, line_no, mod=None):\n y = ln.assign(\n assets[\"is_new\"], text=\"assets\", target=\"y\", line_no=17\n ) #############################################\n x = ln.assign(\n assets[[\"BLDG_VAL\", \"LOT_SIZE\", \"NUM_ROOMS\"]],\n text=\"assets\",\n line_no=18,\n target=\"x\",\n ) #############################################\n\n ln.call(\n clf.fit(x, y), text=\"clf.fit(x, y)\", line_no=19\n ) ################################\n\n p = ln.call(\n clf.predict([[100 * 1000, 10, 4]]),\n text=\"clf.predict([[100 * 1000, 10, 4]])\",\n line_no=20,\n ).assign(\n target=\"p\"\n ) ###################################################################################\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jvfe/BioProv | [
"fe19342bec91931f226726a7c867210dc703040e"
] | [
"bioprov/workflows/workflows.py"
] | [
"__author__ = \"Vini Salazar\"\n__license__ = \"MIT\"\n__maintainer__ = \"Vini Salazar\"\n__url__ = \"https://github.com/vinisalazar/bioprov\"\n__version__ = \"0.1.22\"\n\n\"\"\"\nModule containing preset workflows created with the Workflow class.\n\"\"\"\n\nfrom bioprov.programs import blastn, prodigal\nfrom bioprov.src.workflow import Workflow, Step\n\n# Kaiju WF imports. These will be removed later\nimport argparse\nimport logging\nfrom os import path, getcwd, mkdir\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom bioprov import config, from_df, Sample\nfrom bioprov.programs import kaiju, kaiju2table\nfrom bioprov.utils import Warnings, tax_ranks\n\n\ndef blastn_alignment(**kwargs):\n\n _blastn_alignment = Workflow(\n name=\"blastn\",\n description=\"Align nucleotide data to a reference database with BLASTN.\",\n input_type=\"dataframe\",\n index_col=\"sample-id\",\n file_columns=\"query\",\n **kwargs,\n )\n\n try:\n _blastn_alignment.db = kwargs[\"db\"]\n except KeyError:\n _blastn_alignment.db = None\n\n blastn_preset = blastn(db=_blastn_alignment.db)\n\n _blastn_alignment.add_step(Step(blastn_preset, default=True))\n\n # Workflow specific arguments must be added AFTER the steps.\n # That is because adding a Step updates the parser with the default arguments\n # of the Workflow class.\n\n _blastn_alignment.parser.add_argument(\n \"-db\",\n \"--database\",\n help=\"BLASTn reference database. Must be a valid BLAST database created with the `makeblastdb` command.\",\n required=True,\n )\n\n return _blastn_alignment\n\n\ndef genome_annotation(**kwargs):\n _genome_annotation = Workflow(\n name=\"genome_annotation\",\n description=\"Genome annotation with Prodigal, Prokka and the COG database.\",\n input_type=\"dataframe\",\n index_col=\"sample-id\",\n file_columns=\"assembly\",\n **kwargs,\n )\n\n # Create steps from preset programs.\n prodigal_preset, prokka_preset = (prodigal(), None) # prokka()\n steps = [\n Step(prodigal_preset, default=True),\n # Step(prokka_preset, default=False),\n ]\n\n # Add steps to parser\n for _step in steps:\n _genome_annotation.add_step(_step)\n\n return _genome_annotation\n\n\nclass KaijuWorkflow:\n \"\"\"\n Class holding the KaijuWorkflow main function and parser.\n \"\"\"\n\n description = (\n \"Run Kaiju on metagenomic data and create reports for taxonomic ranks.\"\n )\n\n def __init__(self):\n pass\n\n @staticmethod\n def main(\n input_file,\n output_path=None,\n kaijudb=None,\n nodes=None,\n names=None,\n threads=config.threads,\n _tag=None,\n verbose=False,\n resume=True,\n kaiju_params=\"\",\n kaiju2table_params=\"\",\n ):\n \"\"\"\n Main function to run the Kaiju workflow.\n\n :param input_file: Input tab delimited file with the columns: 'sample-id', 'R1', 'R2'\n :param output_path: Directory to create Kaiju output files.\n :param kaijudb: Kaiju database file.\n :param nodes: Kaiju nodes file.\n :param names: Kaiju names file.\n :param threads: Number of threads to use with Kaiju.\n :param _tag: Tag for Project.\n :param verbose: Verbose output.\n :param resume: Check for existing files and skip running Kaiju for them.\n :param kaiju_params: Parameter string to add to Kaiju command.\n :param kaiju2table_params: Parameter string to add to kaiju2table command.\n :return:\n \"\"\"\n # Asserting files exist\n for file_ in (input_file, kaijudb, nodes, names):\n assert path.isfile(file_), Warnings()[\"not_exist\"](file_)\n\n # Asserting columns are correct\n df = pd.read_csv(input_file, sep=\"\\t\")\n for column in (\"sample-id\", \"R1\", \"R2\"):\n assert (\n column in df.columns\n ), f\"Column '{column}' not present in {input_file}.\"\n\n # Assert all files exist\n for ix, row in df[[\"R1\", \"R2\"]].iterrows():\n for column in (\"R1\", \"R2\"):\n file_ = row[column]\n assert path.isfile(\n file_\n ), f\"File '{file_}' was not found! Make sure all file paths are correct in input file.\"\n\n logging.warning(Warnings()[\"sample_loading\"](len(df)))\n\n # Create BioProv Project\n ss = from_df(df, index_col=\"sample-id\", file_cols=(\"R1\", \"R2\"), tag=_tag)\n\n success, skip = 0, 0\n\n sample: Sample\n for k, sample in tqdm(ss.items()):\n kaiju_ = kaiju(\n sample,\n output_path=output_path,\n kaijudb=kaijudb,\n nodes=nodes,\n threads=threads,\n add_param_str=kaiju_params,\n )\n\n # If resume is 'on', will check for existing files and skip if needed.\n if resume and sample.files[\"kaiju_output\"].exists:\n skip += 1\n continue\n\n kaiju_run = kaiju_.run(sample, _print=verbose)\n if verbose:\n print(kaiju_run)\n\n # Create reports for each rank (this is much faster than running Kaiju)\n if not verbose:\n print(\"Creating Kaiju reports.\")\n for rank in tax_ranks:\n if verbose:\n print(f\"Creating report for {rank} rank.\")\n kaiju2table_ = kaiju2table(\n _sample=sample,\n rank=rank,\n nodes=nodes,\n names=names,\n add_param_str=kaiju2table_params,\n )\n kaiju2table_.run(sample)\n\n all_files_exist = False\n for k_, v in sample.files.items():\n if not path.isfile(str(v)):\n all_files_exist = False\n break\n else:\n all_files_exist = True\n\n if all_files_exist:\n success += 1\n\n ss.to_json()\n print(Warnings()[\"number_success\"](success, len(df)))\n print(Warnings()[\"number_skip\"](skip))\n\n @classmethod\n def parser(cls):\n \"\"\"\n Parser for the Kaiju workflow.\n :return: instance of argparse.ArgumentParser.\n \"\"\"\n _parser = argparse.ArgumentParser(\n \"kaiju\",\n description=KaijuWorkflow.description,\n )\n _parser.add_argument(\n \"-i\",\n \"--input\",\n help=(\n \"Input file, a tab delimited file which must contain three columns: 'sample-id', 'R1', and 'R2',\\\n containing respectively sample IDs, _path to forward reads and _path to reverse reads.\"\n ),\n required=True,\n type=str,\n )\n _parser.add_argument(\n \"-o\",\n \"--output_directory\",\n help=\"Output directory to create Kaiju files. Default is directory of input file.\",\n required=False,\n default=None,\n )\n _parser.add_argument(\n \"-db\",\n \"--kaiju_db\",\n help=\"Kaiju database file.\",\n required=True,\n )\n _parser.add_argument(\n \"-no\",\n \"--nodes\",\n help=\"NCBI Taxonomy nodes.dmp file required to run Kaiju.\",\n required=True,\n )\n _parser.add_argument(\n \"-na\",\n \"--names\",\n help=\"NCBI Taxonomy names.dmp file required to run Kaiju2Table.\",\n required=True,\n )\n _parser.add_argument(\n \"--kaiju_params\",\n help=\"Parameter string to be added to Kaiju command.\",\n required=False,\n default=\"\",\n )\n _parser.add_argument(\n \"--kaiju2table_params\",\n help=\"Parameter string to be added to Kaiju2table command.\",\n required=False,\n default=\"\",\n )\n _parser.add_argument(\n \"-t\", \"--tag\", help=\"A tag for the dataset\", required=False\n )\n _parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"More verbose output\",\n action=\"store_true\",\n default=False,\n required=False,\n )\n _parser.add_argument(\n \"-p\",\n \"--threads\",\n help=\"Number of threads. Default is set in BioProv config (half of the threads).\",\n default=config.threads,\n )\n return _parser\n\n\nclass WorkflowOptionsParser:\n \"\"\"\n Class for parsing command-line options.\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def _blastn_alignment(kwargs, steps):\n \"\"\"\n Runs blastn alignment workflow\n :return:\n \"\"\"\n main = blastn_alignment(**kwargs)\n main.run_steps(steps)\n\n @staticmethod\n def _genome_annotation(kwargs, steps):\n \"\"\"\n Runs genome annotation workflow\n :return:\n \"\"\"\n main = genome_annotation(**kwargs)\n main.run_steps(steps)\n\n @staticmethod\n def _kaiju_workflow(kwargs, steps):\n \"\"\"\n Runs Kaiju workflow\n :return:\n \"\"\"\n _ = steps\n KaijuWorkflow.main(**kwargs)\n\n def parse_options(self, options):\n \"\"\"\n Parses options and returns correct workflow.\n :type options: argparse.Namespace\n :param options: arguments passed by the parser.\n :return: Runs the specified subparser in options.subparser_name.\n \"\"\"\n subparsers = {\n \"genome_annotation\": lambda _options, _steps: self._genome_annotation(\n _options, _steps\n ),\n \"blastn\": lambda _options, _steps: self._blastn_alignment(_options, _steps),\n \"kaiju\": lambda _options, _steps: self._kaiju_workflow(_options, _steps),\n }\n\n # Run desired subparser\n kwargs = dict(options._get_kwargs())\n steps = kwargs.pop(\"steps\")\n subparsers[options.subparser_name](kwargs, steps)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TanjaBayer/craft-text-detector | [
"7daddaf5612d454e9fe570873ac21bc3f19472ac"
] | [
"craft_text_detector/predict.py"
] | [
"import os\nimport time\n\nimport cv2\nimport numpy as np\n\nimport craft_text_detector.craft_utils as craft_utils\nimport craft_text_detector.image_utils as image_utils\nimport craft_text_detector.torch_utils as torch_utils\n\n\ndef get_prediction(\n image,\n craft_net,\n refine_net=None,\n text_threshold: float = 0.7,\n link_threshold: float = 0.4,\n low_text: float = 0.4,\n cuda: bool = False,\n long_size: int = 1280,\n poly: bool = True,\n):\n \"\"\"\n Arguments:\n image: path to the image to be processed or numpy array or PIL image\n output_dir: path to the results to be exported\n craft_net: craft net model\n refine_net: refine net model\n text_threshold: text confidence threshold\n link_threshold: link confidence threshold\n low_text: text low-bound score\n cuda: Use cuda for inference\n canvas_size: image size for inference\n long_size: desired longest image size for inference\n poly: enable polygon type\n Output:\n {\"masks\": lists of predicted masks 2d as bool array,\n \"boxes\": list of coords of points of predicted boxes,\n \"boxes_as_ratios\": list of coords of points of predicted boxes as ratios of image size,\n \"polys_as_ratios\": list of coords of points of predicted polys as ratios of image size,\n \"heatmaps\": visualizations of the detected characters/links,\n \"times\": elapsed times of the sub modules, in seconds}\n \"\"\"\n t0 = time.time()\n\n # read/convert image\n image = image_utils.read_image(image)\n\n # resize\n img_resized, target_ratio, size_heatmap = image_utils.resize_aspect_ratio(\n image, long_size, interpolation=cv2.INTER_LINEAR\n )\n ratio_h = ratio_w = 1 / target_ratio\n resize_time = time.time() - t0\n t0 = time.time()\n\n # preprocessing\n x = image_utils.normalizeMeanVariance(img_resized)\n x = torch_utils.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]\n x = torch_utils.Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]\n if cuda:\n x = x.cuda()\n preprocessing_time = time.time() - t0\n t0 = time.time()\n\n # forward pass\n with torch_utils.no_grad():\n y, feature = craft_net(x)\n craftnet_time = time.time() - t0\n t0 = time.time()\n\n # make score and link map\n score_text = y[0, :, :, 0].cpu().data.numpy()\n score_link = y[0, :, :, 1].cpu().data.numpy()\n\n # refine link\n if refine_net is not None:\n with torch_utils.no_grad():\n y_refiner = refine_net(y, feature)\n score_link = y_refiner[0, :, :, 0].cpu().data.numpy()\n refinenet_time = time.time() - t0\n t0 = time.time()\n\n # Post-processing\n boxes, polys = craft_utils.getDetBoxes(\n score_text, score_link, text_threshold, link_threshold, low_text, poly\n )\n\n # coordinate adjustment\n boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)\n polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)\n for k in range(len(polys)):\n if polys[k] is None:\n polys[k] = boxes[k]\n\n # get image size\n img_height = image.shape[0]\n img_width = image.shape[1]\n\n # calculate box coords as ratios to image size\n boxes_as_ratio = []\n for box in boxes:\n boxes_as_ratio.append(box / [img_width, img_height])\n boxes_as_ratio = np.array(boxes_as_ratio)\n\n # calculate poly coords as ratios to image size\n polys_as_ratio = []\n for poly in polys:\n polys_as_ratio.append(poly / [img_width, img_height])\n polys_as_ratio = np.array(polys_as_ratio)\n\n text_score_heatmap = image_utils.cvt2HeatmapImg(score_text)\n link_score_heatmap = image_utils.cvt2HeatmapImg(score_link)\n\n postprocess_time = time.time() - t0\n\n times = {\n \"resize_time\": resize_time,\n \"preprocessing_time\": preprocessing_time,\n \"craftnet_time\": craftnet_time,\n \"refinenet_time\": refinenet_time,\n \"postprocess_time\": postprocess_time,\n }\n\n return {\n \"boxes\": boxes,\n \"boxes_as_ratios\": boxes_as_ratio,\n \"polys\": polys,\n \"polys_as_ratios\": polys_as_ratio,\n \"heatmaps\": {\n \"text_score_heatmap\": text_score_heatmap,\n \"link_score_heatmap\": link_score_heatmap,\n },\n \"times\": times,\n }\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
refaev/combat_gym | [
"f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf"
] | [
"gym_combat/gym_combat/envs/DQN/deeprl_prj/core_one_screen_array.py"
] | [
"\"\"\"Core classes.\"\"\"\n\nimport numpy as np\nfrom gym_combat.envs.DQN.DQN_constants import *\nfrom PIL import Image\n\nclass Sample:\n \"\"\"Represents a reinforcement learning sample.\n\n Used to store observed experience from an MDP. Represents a\n standard `(s, a, r, s', terminal)` tuple.\n\n Parameters\n ----------\n state: array-like\n Represents the state of the MDP before taking an action. In most\n cases this will be a numpy array.\n action: int, float, tuple\n For discrete action domains this will be an integer. For\n continuous action domains this will be a floating point\n number. For a parameterized action MDP this will be a tuple\n containing the action and its associated parameters.\n reward: float\n The reward received for executing the given action in the given\n state and transitioning to the resulting state.\n next_state: array-like\n This is the state the agent transitions to after executing the\n `action` in `state`. Expected to be the same type/dimensions as\n the state.\n is_terminal: boolean\n True if this action finished the episode. False otherwise.\n \"\"\"\n def __init__(self, state, action, reward, next_state, is_terminal):\n self.state = state\n self.action = action\n self.reward = reward\n self.next_state = next_state\n self.is_terminal = is_terminal\n\nclass Preprocessor:\n \"\"\"Preprocessor base class.\n\n This is a suggested interface for the preprocessing steps. \n\n Preprocessor can be used to perform some fixed operations on the\n raw state from an environment. For example, in ConvNet based\n networks which use image as the raw state, it is often useful to\n convert the image to greyscale or downsample the image.\n\n Preprocessors are implemented as class so that they can have\n internal state. This can be useful for things like the\n AtariPreproccessor which maxes over k frames.\n\n If you're using internal states, such as for keeping a sequence of\n inputs like in Atari, you should probably call reset when a new\n episode begins so that state doesn't leak in from episode to\n episode.\n \"\"\"\n\n def process_state_for_network(self, state):\n \"\"\"Preprocess the given state before giving it to the network.\n\n Should be called just before the action is selected.\n\n This is a different method from the process_state_for_memory\n because the replay memory may require a different storage\n format to reduce memory usage. For example, storing images as\n uint8 in memory is a lot more efficient thant float32, but the\n networks work better with floating point images.\n\n Parameters\n ----------\n state: np.ndarray\n Generally a numpy array. A single state from an environment.\n\n Returns\n -------\n processed_state: np.ndarray\n Generally a numpy array. The state after processing. Can be\n modified in anyway.\n \"\"\"\n return state\n\n def process_state_for_memory(self, state):\n \"\"\"Preprocess the given state before giving it to the replay memory.\n\n Should be called just before appending this to the replay memory.\n\n This is a different method from the process_state_for_network\n because the replay memory may require a different storage\n format to reduce memory usage. For example, storing images as\n uint8 in memory and the network expecting images in floating\n point.\n\n Parameters\n ----------\n state: np.ndarray\n A single state from an environmnet. Generally a numpy array.\n\n Returns\n -------\n processed_state: np.ndarray\n Generally a numpy array. The state after processing. Can be\n modified in any manner.\n \"\"\"\n return state\n\n def process_batch(self, samples):\n \"\"\"Process batch of samples.\n\n If your replay memory storage format is different than your\n network input, you may want to apply this function to your\n sampled batch before running it through your update function.\n\n Parameters\n ----------\n samples: list(tensorflow_rl.core.Sample)\n List of samples to process\n\n Returns\n -------\n processed_samples: list(tensorflow_rl.core.Sample)\n Samples after processing. Can be modified in anyways, but\n the list length will generally stay the same.\n \"\"\"\n return samples\n\n def process_reward(self, reward):\n \"\"\"Process the reward.\n\n Useful for things like reward clipping. The Atari environments\n from DQN paper do this. Instead of taking real score, they\n take the sign of the delta of the score.\n\n Parameters\n ----------\n reward: float\n Reward to process\n\n Returns\n -------\n processed_reward: float\n The processed reward\n \"\"\"\n return reward\n\n def reset(self):\n \"\"\"Reset any internal state.\n\n Will be called at the start of every new episode. Makes it\n possible to do history snapshots.\n \"\"\"\n pass\n\nclass ReplayMemory:\n \"\"\"Interface for replay memories.\n\n Methods\n -------\n append(state, action, reward, debug_info=None)\n Add a sample to the replay memory. \n end_episode(final_state, is_terminal, debug_info=None)\n Set the final state of an episode and mark whether it was a true\n terminal state (i.e. the env returned is_terminal=True), of it\n is an artificial terminal state (i.e. agent quit the episode\n early, but agent could have kept running episode).\n sample(batch_size, indexes=None)\n Return list of samples from the memory. Each class will\n implement a different method of choosing the\n samples. Optionally, specify the sample indexes manually.\n clear()\n Reset the memory. Deletes all references to the samples.\n \"\"\"\n def __init__(self, args):\n \"\"\"Setup memory.\n\n You should specify the maximum size o the memory. Once the\n memory fills up oldest values should be removed. You can try\n the collections.deque class as the underlying storage, but\n your sample method will be very slow.\n\n We recommend using a list as a ring buffer. Just track the\n index where the next sample should be inserted in the list.\n \"\"\"\n self.memory_size = args.replay_memory_size\n self.history_length = args.num_frames\n self.actions = np.zeros(self.memory_size, dtype = np.int8)\n self.rewards = np.zeros(self.memory_size, dtype = np.float)\n self.screens = np.zeros((self.memory_size, args.frame_height, args.frame_width), dtype = np.uint8)\n # self.state = np.zeros((self.memory_size, args.frame_height, args.frame_width), dtype=np.uint8)\n # self.new_state = np.zeros((self.memory_size, args.frame_height, args.frame_width), dtype=np.uint8)\n self.terminals = np.zeros(self.memory_size, dtype = np.bool)\n self.current = 0\n\n def append(self, state, action, reward, new_state, is_terminal):\n self.actions[self.current % self.memory_size] = action\n self.rewards[self.current % self.memory_size] = reward\n self.screens[self.current % self.memory_size] = state\n # self.state[self.current % self.memory_size] = state\n #self.new_state[self.current % self.memory_size] = new_state\n self.terminals[self.current % self.memory_size] = is_terminal\n # img = Image.fromarray(state, mode = 'L')\n # path = \"./tmp/%05d-%s.png\" % (self.current, is_terminal)\n # img.save(path)\n self.current += 1\n\n def get_state(self, index):\n state = self.screens[index - self.history_length + 1:index + 1, :, :]\n # history dimention last\n return np.transpose(state, (1, 2, 0))\n\n def get_new_state(self, index):\n state = self.screens[index - self.history_length + 1:index + 1, :, :]\n # history dimention last\n return np.transpose(state, (1, 2, 0))\n\n def sample(self, batch_size):\n samples = []\n indexes = []\n # ensure enough frames to sample\n assert self.current > self.history_length\n # -1 because still need next frame\n end = min(self.current, self.memory_size) - 1\n\n\n # while len(indexes) < batch_size:\n # index = np.random.randint(1, end)\n # if self.terminals[index]==True and self.terminals[index+1] == False:\n # continue\n # if self.terminals[index]==True and self.terminals[index+1] == True:\n # if self.rewards[index]==CONST_LAST_FRAME_REWARD:\n # continue\n # indexes.append(index)\n\n # while len(indexes) < batch_size:\n # index = np.random.randint(self.history_length - 1, end)\n # rewards = np.array(self.rewards[index - self.history_length + 1: index + 1])\n # res = rewards[np.where(rewards==CONST_LAST_FRAME_REWARD)]\n # if len(res)>0:\n # continue\n # indexes.append(index)\n\n bad_indexes_terminal = []\n bad_indexes_terminal_rewards = []\n while len(indexes) < batch_size:\n index = np.random.randint(self.history_length - 1, end)\n # if self.terminals[index - self.history_length + 1: index+1].any():\n # bad_indexes_terminal.append(index)\n # if np.min(self.rewards[index - self.history_length + 1: index + 1])==CONST_LAST_FRAME_REWARD:\n # bad_indexes_terminal_rewards.append(index)\n # continue\n if np.min(self.rewards[index - self.history_length + 1: index + 1])==CONST_LAST_FRAME_REWARD:\n bad_indexes_terminal_rewards.append(index)\n if self.terminals[index - self.history_length + 1: index + 1].any():\n bad_indexes_terminal.append(index)\n continue\n indexes.append(index)\n\n\n if False:\n import matplotlib.pyplot as plt\n s = self.get_state(idx)\n fig, axs = plt.subplots(2, 4)\n axs[0, 0].matshow(s[:, :, 0:1])\n axs[0, 1].matshow(s[:, :, 1:2])\n axs[0, 2].matshow(s[:, :, 2:3])\n axs[0, 3].matshow(s)\n\n s = self.get_state(idx + 1)\n axs[1, 0].matshow(s[:, :, 0:1])\n axs[1, 1].matshow(s[:, :, 1:2])\n axs[1, 2].matshow(s[:, :, 2:3])\n axs[1, 3].matshow(s)\n\n counter=0\n for idx in indexes:\n new_sample = Sample(self.get_state(idx), self.actions[idx],\n self.rewards[idx], self.get_state(idx+1), self.terminals[idx])\n if self.terminals[idx]:\n counter+=1\n samples.append(new_sample)\n return samples\n\n def clear(self):\n self.current = 0\n\n\n"
] | [
[
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.transpose",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cuongtv312/int3405-examples | [
"c9e9f9944a12e210002868fe97c80b47f1e51d10"
] | [
"week03/naive_bayes.py"
] | [
"\"\"\"\nImplement Naive Bayes model, using smoothing constant L\n\nName:\nClass:\nMSSV:\n\nYou should understand your code\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\nclass NaiveBayes:\n\n def __init__(self, L=0):\n self.L = L\n\n def fit(self, X, y):\n return None\n\n def predict(self, X):\n return [1 for _ in X]\n\n def predict_proba(self, X):\n return [[0.5, 0.5] for _ in X]\n\n\ndef read_data(input_file):\n df = pd.read_csv(input_file)\n\n feature_columns = [c for c in df.columns if c != 'PlayTennis']\n\n X = df[feature_columns].values\n\n # Convert to 0, 1\n _y = df['PlayTennis'].values\n y = np.where(_y == 'Yes', 1, 0)\n\n return X, y\n\n\nif __name__ == \"__main__\":\n np.random.seed(1)\n\n Xtrain, ytrain = read_data('./train_nb.csv')\n Xtest, ytest = read_data('./test_nb.csv')\n print(Xtrain.shape, ytrain.shape)\n\n nb = NaiveBayes()\n nb.fit(Xtrain, ytrain)\n\n output_test = nb.predict(Xtest)\n print(\"Accuracy: \", accuracy_score(ytest, output_test))\n print(\"F1 score: \", f1_score(ytest, output_test))\n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"sklearn.metrics.f1_score",
"numpy.where",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
alex-s-v/filterer | [
"c6ce02ae5a7cccceefcdb387c9be504e6cbcd6b6"
] | [
"cli.py"
] | [
"from configparser import ConfigParser\nfrom pathlib import Path\nfrom copy import deepcopy\n\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom filterer import (\n process_data,\n compare_data\n)\n\n\ndef main(config):\n \"\"\"Main algorithm function\n\n Parameters\n ----------\n config : ConfigParser\n Configuration data for the algorithm\n \"\"\"\n cfg = parse_config(config)\n size = cfg[\"General Parameters\"][\"number_of_interpolation_points\"]\n deg = cfg[\"General Parameters\"][\"trend_degree\"]\n names = [Path(cfg[\"Data Files\"][\"stand\"])]\n frs = []\n c = cfg[\"Stand Cleaning Parameters\"]\n i = 1\n while True:\n frs.append(process_data_cfg(names[-1], c, deg, size))\n try: c = cfg[f\"Filter {i} Cleaning Parameters\"]\n except: break\n names.append(Path(cfg[\"Data Files\"][f\"filter_{i}\"]))\n i += 1\n xs, ys = compare_data(frs[0], frs[1:], size)\n path = Path(cfg[\"Output Parameters\"][\"out_path\"])\n path.mkdir(parents=True, exist_ok=True)\n save_figures = cfg[\"Output Parameters\"][\"save_figures\"] == \"Y\"\n preview = cfg[\"Output Parameters\"][\"preview\"] == \"Y\"\n act_names = [x.name[:-4] for x in names]\n if save_figures or preview:\n save_plots(\n xs, ys, frs, act_names, path,\n save_figures=save_figures,\n preview=preview,\n dpi=cfg[\"Output Parameters\"][\"image_dpi\"],\n format=cfg[\"Output Parameters\"][\"image_format\"],\n size=cfg[\"Output Parameters\"][\"image_size\"],\n **cfg[\"Figure Parameters\"]\n )\n if cfg[\"Output Parameters\"][\"save_tables\"] == \"Y\":\n save_tables(\n frs, act_names, path,\n cdel=cfg[\"Output Parameters\"][\"col_delimiter\"],\n ddel=cfg[\"Output Parameters\"][\"dec_delimiter\"]\n )\n if cfg[\"Output Parameters\"][\"save_trends\"] == \"Y\":\n save_trends(frs, act_names, path)\n\n\ndef save_trends(datas, names, path):\n \"\"\"Saves calculated trend equations in text format\n\n Parameters\n ----------\n datas : list of FtlrResult\n Filtering results for the stend and the filters\n names : list of str\n List of figure names (in the same\n order as filters in `datas` parameter)\n path : pathlib.Path\n Path to the folder for saving text files\n \"\"\"\n ml = max(map(len, names))\n with (path / \"trends.txt\").open(mode=\"w\") as f:\n for d, n in zip(datas, names):\n eq = ps2eq(d.trend_coefs)\n f.write(f\"{n}{' '*(ml-len(n))} : y = {eq}\\n\")\n return None\n\n\ndef save_tables(datas, names, path, cdel, ddel):\n \"\"\"Saves calculated data in csv table format\n\n Parameters\n ----------\n datas : list of FtlrResult\n Filtering results for the stend and the filters\n names : list of str\n List of figure names (in the same\n order as filters in `datas` parameter)\n path : pathlib.Path\n Path to the folder for saving tables\n cdel : str\n String of length 1. Field delimiter for the output file\n ddel : str\n String of length 1. Floating point delimiter\n \"\"\"\n p = path / \"tables\"\n p.mkdir(parents=True, exist_ok=True)\n for d, n in zip(datas, names):\n df = pd.DataFrame({\"Rate [l/min]\": d.xf, \"Pd [kPa]\": d.yf})\n df.to_csv(p / f\"{n}.csv\", sep=cdel, decimal=ddel, index=False)\n return None\n\n\ndef save_plots(xs, ys, datas, names, path, **kwargs):\n \"\"\"Create plots to save or display or both\n\n Parameters\n ----------\n xs : numpy.ndarray\n X coordinates of the data points\n ys : list of numpy.ndarray\n List of arrays of Y coordinates for\n the each filter (after the comparison\n to stand data)\n datas : list of FtlrResult\n Filtering results for the stend\n (always first) and the filters\n names : list of str\n List of figure names (in the same\n order as filters in `datas` parameter)\n path : pathlib.Path\n Path to the folder for saving figures\n \"\"\"\n figs = []\n figsize = (kwargs[\"size\"], kwargs[\"size\"])\n for i, d in enumerate(datas):\n fig = plt.figure(num=names[i], figsize=figsize)\n plt.scatter(d.xf, d.yf, alpha=0.1)\n plt.plot(d.xt, d.yt, c=\"k\")\n ax = plt.gca()\n ax.set_aspect(1 / ax.get_data_ratio())\n plt.xlim(left=0)\n plt.ylim(bottom=0)\n plt.xlabel(kwargs[\"xlabel\"])\n plt.ylabel(kwargs[\"ylabel\"])\n plt.tight_layout()\n figs.append(fig)\n fig = plt.figure(num=\"comparison\", figsize=figsize)\n figs.append(fig)\n for i, y in enumerate(ys, start=1):\n plt.plot(xs, y, label=kwargs[f\"filter_{i}_label\"])\n ax = plt.gca()\n ax.set_aspect(1 / ax.get_data_ratio())\n plt.xlim(left=0)\n plt.ylim(bottom=0)\n plt.xlabel(kwargs[\"xlabel\"])\n plt.ylabel(kwargs[\"ylabel\"])\n plt.legend()\n plt.tight_layout()\n # new\n fig = plt.figure(num=\"comparison2\", figsize=figsize)\n figs.append(fig)\n for i, d in enumerate(datas):\n plt.plot(d.xt, d.yt, label=kwargs[f\"filter_{i}_label\"])\n ax = plt.gca()\n ax.set_aspect(1 / ax.get_data_ratio())\n plt.xlim(left=0)\n plt.ylim(bottom=0)\n plt.xlabel(kwargs[\"xlabel\"])\n plt.ylabel(kwargs[\"ylabel\"])\n plt.legend()\n plt.tight_layout()\n # new\n if kwargs[\"preview\"]:\n plt.show()\n if kwargs[\"save_figures\"]:\n p = (path / \"plots\")\n p.mkdir(parents=True, exist_ok=True)\n for fig in figs:\n fig.savefig(\n p / f\"{fig._label}.{kwargs['format']}\",\n dpi=kwargs[\"dpi\"],\n format=kwargs[\"format\"]\n )\n return None\n\n\ndef ps2eq(ps):\n \"\"\"Convert polynomial coefficients to text\n\n Parameters\n ----------\n ps : list of float\n List of polynomial coefficients\n\n Returns\n -------\n str\n String representation of the polynomial\n \"\"\"\n n = len(ps)\n f = \"\".join(f\"{p:+} * x^{n-i}\" for i, p in enumerate(ps, start=1))\n if \"+\" == f[0]: f = f[1:]\n f = f.replace(\"-\", \" - \")\n f = f.replace(\"+\", \" + \")\n # -6 to cut the ` * x^0` at the end\n return f[:-6]\n\n\ndef process_data_cfg(filename, cfg, deg, size):\n \"\"\"Process data using specified configuration\n\n Parameters\n ----------\n filename : str or Path\n Path to a file with the data\n cfg : dict\n Configuration dictionary\n deg : int\n Degree of the fitting polynomial\n size : int\n Number of points in the calculated trend line\n coordinates\n\n Returns\n -------\n FtlrResult\n Result of the data processing:\n * Cleaned data\n * Trend line\n * Trend line equation\n \"\"\"\n res = process_data(\n filename,\n {\n \"size\": cfg[\"number_of_bins\"],\n \"fpargs\": {\n \"height\": cfg[\"height\"], \"distance\": cfg[\"distance\"]\n }\n },\n {\"alpha_low\": cfg[\"alpha_low\"], \"alpha_high\": cfg[\"alpha_high\"]},\n deg, size\n )\n return res\n\n\ndef parse_config(cfg):\n \"\"\"Parse configuration dictionary\n\n Parameters\n ----------\n cfg : dict\n Raw configuration dictionary\n\n Returns\n -------\n dict\n Parsed configuration file\n \"\"\"\n cfg_ = dict()\n for k1 in cfg:\n cfg_[k1] = {}\n for k2 in cfg[k1]:\n try: cfg_[k1][k2] = int(cfg[k1][k2])\n except:\n try: cfg_[k1][k2] = float(cfg[k1][k2])\n except: cfg_[k1][k2] = cfg[k1][k2]\n return cfg_\n\n\nif __name__ == \"__main__\":\n mpl.style.use(\"seaborn-whitegrid\")\n config = ConfigParser()\n config.read(\"settings.ini\")\n main(config)\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.style.use",
"matplotlib.pyplot.ylim",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TillMacher/TaxonTableTools | [
"9b5c6356acd9890465d39f16b671eb346ceec25a"
] | [
"taxontabletools/check_taXon_table_format.py"
] | [
"# check the input format\ndef check_taXon_table_format(taXon_table):\n\n import PySimpleGUI as sg\n import pandas as pd\n import numpy as np\n from pathlib import Path\n\n try:\n taXon_table_df = pd.read_excel(Path(taXon_table), \"TaXon table\")\n taXon_table_df = taXon_table_df.replace(np.nan, 'nan', regex=True)\n except:\n sg.PopupError(\"Could not find the TaXon table sheet\", keep_on_top=True)\n raise\n\n\n ###################################\n # A) header prompt\n taXon_table_df_header = taXon_table_df.columns.tolist()[0:9]\n header_prompt = [\"ID\", \"Phylum\", \"Class\", \"Order\", \"Family\", \"Genus\", \"Species\", \"Similarity\", \"Status\"]\n\n if taXon_table_df_header != header_prompt:\n sg.PopupError(\"Oops! Something is wrong with the header!\", title=\"Error\", keep_on_top=True)\n raise RuntimeError(\"Oops! Something is wrong with the header!\")\n\n ###################################\n # B) OTUs prompt\n\n ## removed in 1.1.6\n\n ###################################\n # C) Species prompt\n # does not necessarily need to include both the Genus and epithet, but it is recommended\n\n species_list = taXon_table_df['Species'].values.tolist()\n epithet_only_list = []\n\n for species in species_list:\n if (species != 'nan' and len(species.split()) < 2):\n epithet_only_list.append(species)\n\n if epithet_only_list != []:\n WarningMessage = \"Warning: There are \" + str(len(epithet_only_list)) + \" species that do not fit the binomial nomenclature.\" + \"\\n\" + \"This error message will be ignored, but it's recommended to use binomial nomenclature!\"\n sg.Popup(WarningMessage, title=\"Warning\", keep_on_top=True)\n\n\n ###################################\n # D) Samples prompt\n\n samples_list = taXon_table_df.columns.tolist()[10:]\n\n for sample in samples_list:\n if \" \" in sample:\n ErrorMessage = \"Please do not use spaces in the sample names:\\n\" + sample\n sg.PopupError(ErrorMessage, title=\"Error\", keep_on_top=True)\n raise RuntimeError(ErrorMessage)\n\n ###################################\n # E) Reads prompt\n\n OTU_reads_list = taXon_table_df.values.tolist()\n\n ask = False\n answer = False\n write = False\n\n for OTU_reads in OTU_reads_list:\n read_numbers = OTU_reads[10:]\n if sum(read_numbers) == 0:\n ask = True\n for read_number in read_numbers:\n try:\n read_number = int(read_number)\n except:\n OTU = OTU_reads[0]\n ErrorMessage = \"Please check your read numbers in \" + OTU + \" -> \" + str(read_number)\n sg.PopupError(ErrorMessage, title=\"Error\", keep_on_top=True)\n raise RuntimeError(ErrorMessage)\n\n ###################################\n # F) Taxonomy consistency\n\n taxonomy_list = taXon_table_df[[\"ID\", \"Phylum\", \"Class\", \"Order\", \"Family\", \"Genus\", \"Species\"]].values.tolist()\n for entry in taxonomy_list:\n if \"nan\" in entry:\n taxonomy = entry[1:]\n OTU = entry[0]\n n_nan = taxonomy.count(\"nan\")\n if 6 - taxonomy.index(\"nan\") != n_nan:\n answer = sg.PopupOKCancel(\"Internally missing taxonomy found!\\nReplace with placeholder?\\nWarning: This will overwrite the dataframe!\")\n break\n if answer == 'OK':\n new_df_list = []\n for row in taXon_table_df.values.tolist():\n entry = row[0:7]\n if \"nan\" in entry:\n taxonomy = entry[1:]\n OTU = entry[0]\n n_nan = taxonomy.count(\"nan\")\n if 6 - taxonomy.index(\"nan\") != n_nan:\n for item in [i for i,x in enumerate(entry) if x == \"nan\"]:\n row[item] = \"FLAG\"\n row = ['' if x=='nan' else x for x in row]\n new_df_list.append(row)\n column_names = taXon_table_df.columns.tolist()\n taXon_table_df_new = pd.DataFrame(new_df_list, columns=column_names)\n write = True\n sg.Popup(\"Replaced missing taxonomy with a >FLAG< placeholder.\\nPlease adjust the taxonomy in Excel.\")\n\n if write == True:\n taXon_table_df_new.to_excel(taXon_table, sheet_name=\"TaXon table\", index=False)\n\n\n sg.Popup(\"Your file looks great and is ready to use!\", title=\"Taxonomy table check\", keep_on_top=True)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
alkasm/pandas | [
"966757faa28cc536b1bca4856f1dc693ec0bd2ea"
] | [
"pandas/tests/io/test_html.py"
] | [
"from functools import partial\nfrom importlib import reload\nfrom io import BytesIO, StringIO\nimport os\nimport re\nimport threading\nfrom urllib.error import URLError\n\nimport numpy as np\nfrom numpy.random import rand\nimport pytest\n\nfrom pandas.compat import is_platform_windows\nfrom pandas.errors import ParserError\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv\nimport pandas.util.testing as tm\n\nfrom pandas.io.common import file_path_to_url\nimport pandas.io.html\nfrom pandas.io.html import read_html\n\nHERE = os.path.dirname(__file__)\n\n\[email protected](\n params=[\n \"chinese_utf-16.html\",\n \"chinese_utf-32.html\",\n \"chinese_utf-8.html\",\n \"letz_latin1.html\",\n ]\n)\ndef html_encoding_file(request, datapath):\n \"\"\"Parametrized fixture for HTML encoding test filenames.\"\"\"\n return datapath(\"io\", \"data\", \"html_encoding\", request.param)\n\n\ndef assert_framelist_equal(list1, list2, *args, **kwargs):\n assert len(list1) == len(list2), (\n \"lists are not of equal size \"\n \"len(list1) == {0}, \"\n \"len(list2) == {1}\".format(len(list1), len(list2))\n )\n msg = \"not all list elements are DataFrames\"\n both_frames = all(\n map(\n lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),\n list1,\n list2,\n )\n )\n assert both_frames, msg\n for frame_i, frame_j in zip(list1, list2):\n tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)\n assert not frame_i.empty, \"frames are both empty\"\n\n\[email protected]_if_no(\"bs4\")\ndef test_bs4_version_fails(monkeypatch, datapath):\n import bs4\n\n monkeypatch.setattr(bs4, \"__version__\", \"4.2\")\n with pytest.raises(ImportError, match=\"Pandas requires version\"):\n read_html(datapath(\"io\", \"data\", \"html\", \"spam.html\"), flavor=\"bs4\")\n\n\ndef test_invalid_flavor():\n url = \"google.com\"\n flavor = \"invalid flavor\"\n msg = r\"\\{\" + flavor + r\"\\} is not a valid set of flavors\"\n\n with pytest.raises(ValueError, match=msg):\n read_html(url, \"google\", flavor=flavor)\n\n\[email protected]_if_no(\"bs4\")\[email protected]_if_no(\"lxml\")\ndef test_same_ordering(datapath):\n filename = datapath(\"io\", \"data\", \"html\", \"valid_markup.html\")\n dfs_lxml = read_html(filename, index_col=0, flavor=[\"lxml\"])\n dfs_bs4 = read_html(filename, index_col=0, flavor=[\"bs4\"])\n assert_framelist_equal(dfs_lxml, dfs_bs4)\n\n\[email protected](\n \"flavor\",\n [\n pytest.param(\"bs4\", marks=td.skip_if_no(\"lxml\")),\n pytest.param(\"lxml\", marks=td.skip_if_no(\"lxml\")),\n ],\n scope=\"class\",\n)\nclass TestReadHtml:\n @pytest.fixture(autouse=True)\n def set_files(self, datapath):\n self.spam_data = datapath(\"io\", \"data\", \"html\", \"spam.html\")\n self.spam_data_kwargs = {}\n self.spam_data_kwargs[\"encoding\"] = \"UTF-8\"\n self.banklist_data = datapath(\"io\", \"data\", \"html\", \"banklist.html\")\n\n @pytest.fixture(autouse=True, scope=\"function\")\n def set_defaults(self, flavor, request):\n self.read_html = partial(read_html, flavor=flavor)\n yield\n\n def test_to_html_compat(self):\n df = (\n tm.makeCustomDataframe(\n 4,\n 3,\n data_gen_f=lambda *args: rand(),\n c_idx_names=False,\n r_idx_names=False,\n )\n .applymap(\"{0:.3f}\".format)\n .astype(float)\n )\n out = df.to_html()\n res = self.read_html(out, attrs={\"class\": \"dataframe\"}, index_col=0)[0]\n tm.assert_frame_equal(res, df)\n\n @tm.network\n def test_banklist_url(self):\n url = \"http://www.fdic.gov/bank/individual/failed/banklist.html\"\n df1 = self.read_html(\n url, \"First Federal Bank of Florida\", attrs={\"id\": \"table\"}\n )\n df2 = self.read_html(url, \"Metcalf Bank\", attrs={\"id\": \"table\"})\n\n assert_framelist_equal(df1, df2)\n\n @tm.network\n def test_spam_url(self):\n # TODO: alimcmaster1 - revert to master\n url = (\n \"https://raw.githubusercontent.com/alimcmaster1/\"\n \"pandas/mcmali-tests-dir-struct/\"\n \"pandas/tests/io/data/html/spam.html\"\n )\n df1 = self.read_html(url, \".*Water.*\")\n df2 = self.read_html(url, \"Unit\")\n\n assert_framelist_equal(df1, df2)\n\n @pytest.mark.slow\n def test_banklist(self):\n df1 = self.read_html(self.banklist_data, \".*Florida.*\", attrs={\"id\": \"table\"})\n df2 = self.read_html(self.banklist_data, \"Metcalf Bank\", attrs={\"id\": \"table\"})\n\n assert_framelist_equal(df1, df2)\n\n def test_spam(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\")\n df2 = self.read_html(self.spam_data, \"Unit\")\n assert_framelist_equal(df1, df2)\n\n assert df1[0].iloc[0, 0] == \"Proximates\"\n assert df1[0].columns[0] == \"Nutrient\"\n\n def test_spam_no_match(self):\n dfs = self.read_html(self.spam_data)\n for df in dfs:\n assert isinstance(df, DataFrame)\n\n def test_banklist_no_match(self):\n dfs = self.read_html(self.banklist_data, attrs={\"id\": \"table\"})\n for df in dfs:\n assert isinstance(df, DataFrame)\n\n def test_spam_header(self):\n df = self.read_html(self.spam_data, \".*Water.*\", header=2)[0]\n assert df.columns[0] == \"Proximates\"\n assert not df.empty\n\n def test_skiprows_int(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=1)\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=1)\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_xrange(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=range(2))[0]\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=range(2))[0]\n tm.assert_frame_equal(df1, df2)\n\n def test_skiprows_list(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=[1, 2])\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=[2, 1])\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_set(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows={1, 2})\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows={2, 1})\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_slice(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=1)\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=1)\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_slice_short(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=slice(2))\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=slice(2))\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_slice_long(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=slice(2, 5))\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=slice(4, 1, -1))\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_ndarray(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", skiprows=np.arange(2))\n df2 = self.read_html(self.spam_data, \"Unit\", skiprows=np.arange(2))\n\n assert_framelist_equal(df1, df2)\n\n def test_skiprows_invalid(self):\n with pytest.raises(TypeError, match=(\"is not a valid type for skipping rows\")):\n self.read_html(self.spam_data, \".*Water.*\", skiprows=\"asdf\")\n\n def test_index(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", index_col=0)\n df2 = self.read_html(self.spam_data, \"Unit\", index_col=0)\n assert_framelist_equal(df1, df2)\n\n def test_header_and_index_no_types(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", header=1, index_col=0)\n df2 = self.read_html(self.spam_data, \"Unit\", header=1, index_col=0)\n assert_framelist_equal(df1, df2)\n\n def test_header_and_index_with_types(self):\n df1 = self.read_html(self.spam_data, \".*Water.*\", header=1, index_col=0)\n df2 = self.read_html(self.spam_data, \"Unit\", header=1, index_col=0)\n assert_framelist_equal(df1, df2)\n\n def test_infer_types(self):\n\n # 10892 infer_types removed\n df1 = self.read_html(self.spam_data, \".*Water.*\", index_col=0)\n df2 = self.read_html(self.spam_data, \"Unit\", index_col=0)\n assert_framelist_equal(df1, df2)\n\n def test_string_io(self):\n with open(self.spam_data, **self.spam_data_kwargs) as f:\n data1 = StringIO(f.read())\n\n with open(self.spam_data, **self.spam_data_kwargs) as f:\n data2 = StringIO(f.read())\n\n df1 = self.read_html(data1, \".*Water.*\")\n df2 = self.read_html(data2, \"Unit\")\n assert_framelist_equal(df1, df2)\n\n def test_string(self):\n with open(self.spam_data, **self.spam_data_kwargs) as f:\n data = f.read()\n\n df1 = self.read_html(data, \".*Water.*\")\n df2 = self.read_html(data, \"Unit\")\n\n assert_framelist_equal(df1, df2)\n\n def test_file_like(self):\n with open(self.spam_data, **self.spam_data_kwargs) as f:\n df1 = self.read_html(f, \".*Water.*\")\n\n with open(self.spam_data, **self.spam_data_kwargs) as f:\n df2 = self.read_html(f, \"Unit\")\n\n assert_framelist_equal(df1, df2)\n\n @tm.network\n def test_bad_url_protocol(self):\n with pytest.raises(URLError):\n self.read_html(\"git://github.com\", match=\".*Water.*\")\n\n @tm.network\n @pytest.mark.slow\n def test_invalid_url(self):\n try:\n with pytest.raises(URLError):\n self.read_html(\"http://www.a23950sdfa908sd.com\", match=\".*Water.*\")\n except ValueError as e:\n assert \"No tables found\" in str(e)\n\n @pytest.mark.slow\n def test_file_url(self):\n url = self.banklist_data\n dfs = self.read_html(\n file_path_to_url(os.path.abspath(url)), \"First\", attrs={\"id\": \"table\"}\n )\n assert isinstance(dfs, list)\n for df in dfs:\n assert isinstance(df, DataFrame)\n\n @pytest.mark.slow\n def test_invalid_table_attrs(self):\n url = self.banklist_data\n with pytest.raises(ValueError, match=\"No tables found\"):\n self.read_html(\n url, \"First Federal Bank of Florida\", attrs={\"id\": \"tasdfable\"}\n )\n\n def _bank_data(self, *args, **kwargs):\n return self.read_html(\n self.banklist_data, \"Metcalf\", attrs={\"id\": \"table\"}, *args, **kwargs\n )\n\n @pytest.mark.slow\n def test_multiindex_header(self):\n df = self._bank_data(header=[0, 1])[0]\n assert isinstance(df.columns, MultiIndex)\n\n @pytest.mark.slow\n def test_multiindex_index(self):\n df = self._bank_data(index_col=[0, 1])[0]\n assert isinstance(df.index, MultiIndex)\n\n @pytest.mark.slow\n def test_multiindex_header_index(self):\n df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]\n assert isinstance(df.columns, MultiIndex)\n assert isinstance(df.index, MultiIndex)\n\n @pytest.mark.slow\n def test_multiindex_header_skiprows_tuples(self):\n df = self._bank_data(header=[0, 1], skiprows=1)[0]\n assert isinstance(df.columns, MultiIndex)\n\n @pytest.mark.slow\n def test_multiindex_header_skiprows(self):\n df = self._bank_data(header=[0, 1], skiprows=1)[0]\n assert isinstance(df.columns, MultiIndex)\n\n @pytest.mark.slow\n def test_multiindex_header_index_skiprows(self):\n df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]\n assert isinstance(df.index, MultiIndex)\n assert isinstance(df.columns, MultiIndex)\n\n @pytest.mark.slow\n def test_regex_idempotency(self):\n url = self.banklist_data\n dfs = self.read_html(\n file_path_to_url(os.path.abspath(url)),\n match=re.compile(re.compile(\"Florida\")),\n attrs={\"id\": \"table\"},\n )\n assert isinstance(dfs, list)\n for df in dfs:\n assert isinstance(df, DataFrame)\n\n def test_negative_skiprows(self):\n msg = r\"\\(you passed a negative value\\)\"\n with pytest.raises(ValueError, match=msg):\n self.read_html(self.spam_data, \"Water\", skiprows=-1)\n\n @tm.network\n def test_multiple_matches(self):\n url = \"https://docs.python.org/2/\"\n dfs = self.read_html(url, match=\"Python\")\n assert len(dfs) > 1\n\n @tm.network\n def test_python_docs_table(self):\n url = \"https://docs.python.org/2/\"\n dfs = self.read_html(url, match=\"Python\")\n zz = [df.iloc[0, 0][0:4] for df in dfs]\n assert sorted(zz) == sorted([\"Repo\", \"What\"])\n\n @pytest.mark.slow\n def test_thousands_macau_stats(self, datapath):\n all_non_nan_table_index = -2\n macau_data = datapath(\"io\", \"data\", \"html\", \"macau.html\")\n dfs = self.read_html(macau_data, index_col=0, attrs={\"class\": \"style1\"})\n df = dfs[all_non_nan_table_index]\n\n assert not any(s.isna().any() for _, s in df.items())\n\n @pytest.mark.slow\n def test_thousands_macau_index_col(self, datapath):\n all_non_nan_table_index = -2\n macau_data = datapath(\"io\", \"data\", \"html\", \"macau.html\")\n dfs = self.read_html(macau_data, index_col=0, header=0)\n df = dfs[all_non_nan_table_index]\n\n assert not any(s.isna().any() for _, s in df.items())\n\n def test_empty_tables(self):\n \"\"\"\n Make sure that read_html ignores empty tables.\n \"\"\"\n result = self.read_html(\n \"\"\"\n <table>\n <thead>\n <tr>\n <th>A</th>\n <th>B</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1</td>\n <td>2</td>\n </tr>\n </tbody>\n </table>\n <table>\n <tbody>\n </tbody>\n </table>\n \"\"\"\n )\n\n assert len(result) == 1\n\n def test_multiple_tbody(self):\n # GH-20690\n # Read all tbody tags within a single table.\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr>\n <th>A</th>\n <th>B</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1</td>\n <td>2</td>\n </tr>\n </tbody>\n <tbody>\n <tr>\n <td>3</td>\n <td>4</td>\n </tr>\n </tbody>\n </table>\"\"\"\n )[0]\n\n expected = DataFrame(data=[[1, 2], [3, 4]], columns=[\"A\", \"B\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_header_and_one_column(self):\n \"\"\"\n Don't fail with bs4 when there is a header and only one column\n as described in issue #9178\n \"\"\"\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr>\n <th>Header</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>first</td>\n </tr>\n </tbody>\n </table>\"\"\"\n )[0]\n\n expected = DataFrame(data={\"Header\": \"first\"}, index=[0])\n\n tm.assert_frame_equal(result, expected)\n\n def test_thead_without_tr(self):\n \"\"\"\n Ensure parser adds <tr> within <thead> on malformed HTML.\n \"\"\"\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr>\n <th>Country</th>\n <th>Municipality</th>\n <th>Year</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>Ukraine</td>\n <th>Odessa</th>\n <td>1944</td>\n </tr>\n </tbody>\n </table>\"\"\"\n )[0]\n\n expected = DataFrame(\n data=[[\"Ukraine\", \"Odessa\", 1944]],\n columns=[\"Country\", \"Municipality\", \"Year\"],\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_tfoot_read(self):\n \"\"\"\n Make sure that read_html reads tfoot, containing td or th.\n Ignores empty tfoot\n \"\"\"\n data_template = \"\"\"<table>\n <thead>\n <tr>\n <th>A</th>\n <th>B</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>bodyA</td>\n <td>bodyB</td>\n </tr>\n </tbody>\n <tfoot>\n {footer}\n </tfoot>\n </table>\"\"\"\n\n expected1 = DataFrame(data=[[\"bodyA\", \"bodyB\"]], columns=[\"A\", \"B\"])\n\n expected2 = DataFrame(\n data=[[\"bodyA\", \"bodyB\"], [\"footA\", \"footB\"]], columns=[\"A\", \"B\"]\n )\n\n data1 = data_template.format(footer=\"\")\n data2 = data_template.format(footer=\"<tr><td>footA</td><th>footB</th></tr>\")\n\n result1 = self.read_html(data1)[0]\n result2 = self.read_html(data2)[0]\n\n tm.assert_frame_equal(result1, expected1)\n tm.assert_frame_equal(result2, expected2)\n\n def test_parse_header_of_non_string_column(self):\n # GH5048: if header is specified explicitly, an int column should be\n # parsed as int while its header is parsed as str\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <td>S</td>\n <td>I</td>\n </tr>\n <tr>\n <td>text</td>\n <td>1944</td>\n </tr>\n </table>\n \"\"\",\n header=0,\n )[0]\n\n expected = DataFrame([[\"text\", 1944]], columns=(\"S\", \"I\"))\n\n tm.assert_frame_equal(result, expected)\n\n def test_nyse_wsj_commas_table(self, datapath):\n data = datapath(\"io\", \"data\", \"html\", \"nyse_wsj.html\")\n df = self.read_html(data, index_col=0, header=0, attrs={\"class\": \"mdcTable\"})[0]\n\n expected = Index(\n [\n \"Issue(Roll over for charts and headlines)\",\n \"Volume\",\n \"Price\",\n \"Chg\",\n \"% Chg\",\n ]\n )\n nrows = 100\n assert df.shape[0] == nrows\n tm.assert_index_equal(df.columns, expected)\n\n @pytest.mark.slow\n def test_banklist_header(self, datapath):\n from pandas.io.html import _remove_whitespace\n\n def try_remove_ws(x):\n try:\n return _remove_whitespace(x)\n except AttributeError:\n return x\n\n df = self.read_html(self.banklist_data, \"Metcalf\", attrs={\"id\": \"table\"})[0]\n ground_truth = read_csv(\n datapath(\"io\", \"data\", \"csv\", \"banklist.csv\"),\n converters={\"Updated Date\": Timestamp, \"Closing Date\": Timestamp},\n )\n assert df.shape == ground_truth.shape\n old = [\n \"First Vietnamese American BankIn Vietnamese\",\n \"Westernbank Puerto RicoEn Espanol\",\n \"R-G Premier Bank of Puerto RicoEn Espanol\",\n \"EurobankEn Espanol\",\n \"Sanderson State BankEn Espanol\",\n \"Washington Mutual Bank(Including its subsidiary Washington \"\n \"Mutual Bank FSB)\",\n \"Silver State BankEn Espanol\",\n \"AmTrade International BankEn Espanol\",\n \"Hamilton Bank, NAEn Espanol\",\n \"The Citizens Savings BankPioneer Community Bank, Inc.\",\n ]\n new = [\n \"First Vietnamese American Bank\",\n \"Westernbank Puerto Rico\",\n \"R-G Premier Bank of Puerto Rico\",\n \"Eurobank\",\n \"Sanderson State Bank\",\n \"Washington Mutual Bank\",\n \"Silver State Bank\",\n \"AmTrade International Bank\",\n \"Hamilton Bank, NA\",\n \"The Citizens Savings Bank\",\n ]\n dfnew = df.applymap(try_remove_ws).replace(old, new)\n gtnew = ground_truth.applymap(try_remove_ws)\n converted = dfnew._convert(datetime=True, numeric=True)\n date_cols = [\"Closing Date\", \"Updated Date\"]\n converted[date_cols] = converted[date_cols]._convert(datetime=True, coerce=True)\n tm.assert_frame_equal(converted, gtnew)\n\n @pytest.mark.slow\n def test_gold_canyon(self):\n gc = \"Gold Canyon\"\n with open(self.banklist_data, \"r\") as f:\n raw_text = f.read()\n\n assert gc in raw_text\n df = self.read_html(self.banklist_data, \"Gold Canyon\", attrs={\"id\": \"table\"})[0]\n assert gc in df.to_string()\n\n def test_different_number_of_cols(self):\n expected = self.read_html(\n \"\"\"<table>\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>C_l0_g0</th>\n <th>C_l0_g1</th>\n <th>C_l0_g2</th>\n <th>C_l0_g3</th>\n <th>C_l0_g4</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>R_l0_g0</th>\n <td> 0.763</td>\n <td> 0.233</td>\n <td> nan</td>\n <td> nan</td>\n <td> nan</td>\n </tr>\n <tr>\n <th>R_l0_g1</th>\n <td> 0.244</td>\n <td> 0.285</td>\n <td> 0.392</td>\n <td> 0.137</td>\n <td> 0.222</td>\n </tr>\n </tbody>\n </table>\"\"\",\n index_col=0,\n )[0]\n\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>C_l0_g0</th>\n <th>C_l0_g1</th>\n <th>C_l0_g2</th>\n <th>C_l0_g3</th>\n <th>C_l0_g4</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>R_l0_g0</th>\n <td> 0.763</td>\n <td> 0.233</td>\n </tr>\n <tr>\n <th>R_l0_g1</th>\n <td> 0.244</td>\n <td> 0.285</td>\n <td> 0.392</td>\n <td> 0.137</td>\n <td> 0.222</td>\n </tr>\n </tbody>\n </table>\"\"\",\n index_col=0,\n )[0]\n\n tm.assert_frame_equal(result, expected)\n\n def test_colspan_rowspan_1(self):\n # GH17054\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <th>A</th>\n <th colspan=\"1\">B</th>\n <th rowspan=\"1\">C</th>\n </tr>\n <tr>\n <td>a</td>\n <td>b</td>\n <td>c</td>\n </tr>\n </table>\n \"\"\"\n )[0]\n\n expected = DataFrame([[\"a\", \"b\", \"c\"]], columns=[\"A\", \"B\", \"C\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_colspan_rowspan_copy_values(self):\n # GH17054\n\n # In ASCII, with lowercase letters being copies:\n #\n # X x Y Z W\n # A B b z C\n\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <td colspan=\"2\">X</td>\n <td>Y</td>\n <td rowspan=\"2\">Z</td>\n <td>W</td>\n </tr>\n <tr>\n <td>A</td>\n <td colspan=\"2\">B</td>\n <td>C</td>\n </tr>\n </table>\n \"\"\",\n header=0,\n )[0]\n\n expected = DataFrame(\n data=[[\"A\", \"B\", \"B\", \"Z\", \"C\"]], columns=[\"X\", \"X.1\", \"Y\", \"Z\", \"W\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_colspan_rowspan_both_not_1(self):\n # GH17054\n\n # In ASCII, with lowercase letters being copies:\n #\n # A B b b C\n # a b b b D\n\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <td rowspan=\"2\">A</td>\n <td rowspan=\"2\" colspan=\"3\">B</td>\n <td>C</td>\n </tr>\n <tr>\n <td>D</td>\n </tr>\n </table>\n \"\"\",\n header=0,\n )[0]\n\n expected = DataFrame(\n data=[[\"A\", \"B\", \"B\", \"B\", \"D\"]], columns=[\"A\", \"B\", \"B.1\", \"B.2\", \"C\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_rowspan_at_end_of_row(self):\n # GH17054\n\n # In ASCII, with lowercase letters being copies:\n #\n # A B\n # C b\n\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <td>A</td>\n <td rowspan=\"2\">B</td>\n </tr>\n <tr>\n <td>C</td>\n </tr>\n </table>\n \"\"\",\n header=0,\n )[0]\n\n expected = DataFrame(data=[[\"C\", \"B\"]], columns=[\"A\", \"B\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_rowspan_only_rows(self):\n # GH17054\n\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <td rowspan=\"3\">A</td>\n <td rowspan=\"3\">B</td>\n </tr>\n </table>\n \"\"\",\n header=0,\n )[0]\n\n expected = DataFrame(data=[[\"A\", \"B\"], [\"A\", \"B\"]], columns=[\"A\", \"B\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_header_inferred_from_rows_with_only_th(self):\n # GH17054\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <th>A</th>\n <th>B</th>\n </tr>\n <tr>\n <th>a</th>\n <th>b</th>\n </tr>\n <tr>\n <td>1</td>\n <td>2</td>\n </tr>\n </table>\n \"\"\"\n )[0]\n\n columns = MultiIndex(levels=[[\"A\", \"B\"], [\"a\", \"b\"]], codes=[[0, 1], [0, 1]])\n expected = DataFrame(data=[[1, 2]], columns=columns)\n\n tm.assert_frame_equal(result, expected)\n\n def test_parse_dates_list(self):\n df = DataFrame({\"date\": date_range(\"1/1/2001\", periods=10)})\n expected = df.to_html()\n res = self.read_html(expected, parse_dates=[1], index_col=0)\n tm.assert_frame_equal(df, res[0])\n res = self.read_html(expected, parse_dates=[\"date\"], index_col=0)\n tm.assert_frame_equal(df, res[0])\n\n def test_parse_dates_combine(self):\n raw_dates = Series(date_range(\"1/1/2001\", periods=10))\n df = DataFrame(\n {\n \"date\": raw_dates.map(lambda x: str(x.date())),\n \"time\": raw_dates.map(lambda x: str(x.time())),\n }\n )\n res = self.read_html(\n df.to_html(), parse_dates={\"datetime\": [1, 2]}, index_col=1\n )\n newdf = DataFrame({\"datetime\": raw_dates})\n tm.assert_frame_equal(newdf, res[0])\n\n def test_computer_sales_page(self, datapath):\n data = datapath(\"io\", \"data\", \"html\", \"computer_sales_page.html\")\n msg = (\n r\"Passed header=\\[0,1\\] are too many \"\n r\"rows for this multi_index of columns\"\n )\n with pytest.raises(ParserError, match=msg):\n self.read_html(data, header=[0, 1])\n\n data = datapath(\"io\", \"data\", \"html\", \"computer_sales_page.html\")\n assert self.read_html(data, header=[1, 2])\n\n def test_wikipedia_states_table(self, datapath):\n data = datapath(\"io\", \"data\", \"html\", \"wikipedia_states.html\")\n assert os.path.isfile(data), \"{data!r} is not a file\".format(data=data)\n assert os.path.getsize(data), \"{data!r} is an empty file\".format(data=data)\n result = self.read_html(data, \"Arizona\", header=1)[0]\n assert result[\"sq mi\"].dtype == np.dtype(\"float64\")\n\n def test_parser_error_on_empty_header_row(self):\n msg = (\n r\"Passed header=\\[0,1\\] are too many \"\n r\"rows for this multi_index of columns\"\n )\n with pytest.raises(ParserError, match=msg):\n self.read_html(\n \"\"\"\n <table>\n <thead>\n <tr><th></th><th></tr>\n <tr><th>A</th><th>B</th></tr>\n </thead>\n <tbody>\n <tr><td>a</td><td>b</td></tr>\n </tbody>\n </table>\n \"\"\",\n header=[0, 1],\n )\n\n def test_decimal_rows(self):\n # GH 12907\n result = self.read_html(\n \"\"\"<html>\n <body>\n <table>\n <thead>\n <tr>\n <th>Header</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1100#101</td>\n </tr>\n </tbody>\n </table>\n </body>\n </html>\"\"\",\n decimal=\"#\",\n )[0]\n\n expected = DataFrame(data={\"Header\": 1100.101}, index=[0])\n\n assert result[\"Header\"].dtype == np.dtype(\"float64\")\n tm.assert_frame_equal(result, expected)\n\n def test_bool_header_arg(self):\n # GH 6114\n for arg in [True, False]:\n with pytest.raises(TypeError):\n self.read_html(self.spam_data, header=arg)\n\n def test_converters(self):\n # GH 13461\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr>\n <th>a</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td> 0.763</td>\n </tr>\n <tr>\n <td> 0.244</td>\n </tr>\n </tbody>\n </table>\"\"\",\n converters={\"a\": str},\n )[0]\n\n expected = DataFrame({\"a\": [\"0.763\", \"0.244\"]})\n\n tm.assert_frame_equal(result, expected)\n\n def test_na_values(self):\n # GH 13461\n result = self.read_html(\n \"\"\"<table>\n <thead>\n <tr>\n <th>a</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td> 0.763</td>\n </tr>\n <tr>\n <td> 0.244</td>\n </tr>\n </tbody>\n </table>\"\"\",\n na_values=[0.244],\n )[0]\n\n expected = DataFrame({\"a\": [0.763, np.nan]})\n\n tm.assert_frame_equal(result, expected)\n\n def test_keep_default_na(self):\n html_data = \"\"\"<table>\n <thead>\n <tr>\n <th>a</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td> N/A</td>\n </tr>\n <tr>\n <td> NA</td>\n </tr>\n </tbody>\n </table>\"\"\"\n\n expected_df = DataFrame({\"a\": [\"N/A\", \"NA\"]})\n html_df = self.read_html(html_data, keep_default_na=False)[0]\n tm.assert_frame_equal(expected_df, html_df)\n\n expected_df = DataFrame({\"a\": [np.nan, np.nan]})\n html_df = self.read_html(html_data, keep_default_na=True)[0]\n tm.assert_frame_equal(expected_df, html_df)\n\n def test_preserve_empty_rows(self):\n result = self.read_html(\n \"\"\"\n <table>\n <tr>\n <th>A</th>\n <th>B</th>\n </tr>\n <tr>\n <td>a</td>\n <td>b</td>\n </tr>\n <tr>\n <td></td>\n <td></td>\n </tr>\n </table>\n \"\"\"\n )[0]\n\n expected = DataFrame(data=[[\"a\", \"b\"], [np.nan, np.nan]], columns=[\"A\", \"B\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_ignore_empty_rows_when_inferring_header(self):\n result = self.read_html(\n \"\"\"\n <table>\n <thead>\n <tr><th></th><th></tr>\n <tr><th>A</th><th>B</th></tr>\n <tr><th>a</th><th>b</th></tr>\n </thead>\n <tbody>\n <tr><td>1</td><td>2</td></tr>\n </tbody>\n </table>\n \"\"\"\n )[0]\n\n columns = MultiIndex(levels=[[\"A\", \"B\"], [\"a\", \"b\"]], codes=[[0, 1], [0, 1]])\n expected = DataFrame(data=[[1, 2]], columns=columns)\n\n tm.assert_frame_equal(result, expected)\n\n def test_multiple_header_rows(self):\n # Issue #13434\n expected_df = DataFrame(\n data=[(\"Hillary\", 68, \"D\"), (\"Bernie\", 74, \"D\"), (\"Donald\", 69, \"R\")]\n )\n expected_df.columns = [\n [\"Unnamed: 0_level_0\", \"Age\", \"Party\"],\n [\"Name\", \"Unnamed: 1_level_1\", \"Unnamed: 2_level_1\"],\n ]\n html = expected_df.to_html(index=False)\n html_df = self.read_html(html)[0]\n tm.assert_frame_equal(expected_df, html_df)\n\n def test_works_on_valid_markup(self, datapath):\n filename = datapath(\"io\", \"data\", \"html\", \"valid_markup.html\")\n dfs = self.read_html(filename, index_col=0)\n assert isinstance(dfs, list)\n assert isinstance(dfs[0], DataFrame)\n\n @pytest.mark.slow\n def test_fallback_success(self, datapath):\n banklist_data = datapath(\"io\", \"data\", \"html\", \"banklist.html\")\n self.read_html(banklist_data, \".*Water.*\", flavor=[\"lxml\", \"html5lib\"])\n\n def test_to_html_timestamp(self):\n rng = date_range(\"2000-01-01\", periods=10)\n df = DataFrame(np.random.randn(10, 4), index=rng)\n\n result = df.to_html()\n assert \"2000-01-01\" in result\n\n @pytest.mark.parametrize(\n \"displayed_only,exp0,exp1\",\n [\n (True, DataFrame([\"foo\"]), None),\n (False, DataFrame([\"foo bar baz qux\"]), DataFrame([\"foo\"])),\n ],\n )\n def test_displayed_only(self, displayed_only, exp0, exp1):\n # GH 20027\n data = StringIO(\n \"\"\"<html>\n <body>\n <table>\n <tr>\n <td>\n foo\n <span style=\"display:none;text-align:center\">bar</span>\n <span style=\"display:none\">baz</span>\n <span style=\"display: none\">qux</span>\n </td>\n </tr>\n </table>\n <table style=\"display: none\">\n <tr>\n <td>foo</td>\n </tr>\n </table>\n </body>\n </html>\"\"\"\n )\n\n dfs = self.read_html(data, displayed_only=displayed_only)\n tm.assert_frame_equal(dfs[0], exp0)\n\n if exp1 is not None:\n tm.assert_frame_equal(dfs[1], exp1)\n else:\n assert len(dfs) == 1 # Should not parse hidden table\n\n def test_encode(self, html_encoding_file):\n _, encoding = os.path.splitext(os.path.basename(html_encoding_file))[0].split(\n \"_\"\n )\n\n try:\n with open(html_encoding_file, \"rb\") as fobj:\n from_string = self.read_html(\n fobj.read(), encoding=encoding, index_col=0\n ).pop()\n\n with open(html_encoding_file, \"rb\") as fobj:\n from_file_like = self.read_html(\n BytesIO(fobj.read()), encoding=encoding, index_col=0\n ).pop()\n\n from_filename = self.read_html(\n html_encoding_file, encoding=encoding, index_col=0\n ).pop()\n tm.assert_frame_equal(from_string, from_file_like)\n tm.assert_frame_equal(from_string, from_filename)\n except Exception:\n # seems utf-16/32 fail on windows\n if is_platform_windows():\n if \"16\" in encoding or \"32\" in encoding:\n pytest.skip()\n raise\n\n def test_parse_failure_unseekable(self):\n # Issue #17975\n\n if self.read_html.keywords.get(\"flavor\") == \"lxml\":\n pytest.skip(\"Not applicable for lxml\")\n\n class UnseekableStringIO(StringIO):\n def seekable(self):\n return False\n\n bad = UnseekableStringIO(\n \"\"\"\n <table><tr><td>spam<foobr />eggs</td></tr></table>\"\"\"\n )\n\n assert self.read_html(bad)\n\n with pytest.raises(ValueError, match=\"passed a non-rewindable file object\"):\n self.read_html(bad)\n\n def test_parse_failure_rewinds(self):\n # Issue #17975\n\n class MockFile:\n def __init__(self, data):\n self.data = data\n self.at_end = False\n\n def read(self, size=None):\n data = \"\" if self.at_end else self.data\n self.at_end = True\n return data\n\n def seek(self, offset):\n self.at_end = False\n\n def seekable(self):\n return True\n\n good = MockFile(\"<table><tr><td>spam<br />eggs</td></tr></table>\")\n bad = MockFile(\"<table><tr><td>spam<foobr />eggs</td></tr></table>\")\n\n assert self.read_html(good)\n assert self.read_html(bad)\n\n @pytest.mark.slow\n def test_importcheck_thread_safety(self, datapath):\n # see gh-16928\n\n class ErrorThread(threading.Thread):\n def run(self):\n try:\n super().run()\n except Exception as err:\n self.err = err\n else:\n self.err = None\n\n # force import check by reinitalising global vars in html.py\n reload(pandas.io.html)\n\n filename = datapath(\"io\", \"data\", \"html\", \"valid_markup.html\")\n helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))\n helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))\n\n helper_thread1.start()\n helper_thread2.start()\n\n while helper_thread1.is_alive() or helper_thread2.is_alive():\n pass\n assert None is helper_thread1.err is helper_thread2.err\n"
] | [
[
"pandas.io.html._remove_whitespace",
"pandas.MultiIndex",
"pandas.compat.is_platform_windows",
"numpy.arange",
"pandas.Index",
"pandas.DataFrame",
"pandas.io.html.read_html",
"pandas.util.testing.assert_frame_equal",
"numpy.dtype",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.random.rand",
"pandas.date_range",
"pandas.util._test_decorators.skip_if_no"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LFElodie/CarND-Behavioral-Cloning-P3 | [
"9d2db7d3ce7be8700fa359a3b11a21c44d199d50"
] | [
"utils.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\nimport matplotlib.image as mpimg\n\nCORRECTION = 0.2\n\n\ndef load_data(data_folders):\n '''\n Read driving_log.csv from data folders \n '''\n samples = []\n for data_folder in data_folders:\n # Raw sample contains [center,left,right,steering,throttle,brake,speed] each row\n raw_sample = pd.read_csv(os.path.join(data_folder, 'driving_log.csv'))\n # Extract filenames and convert to right image path\n image_path = raw_sample.iloc[:, :3].applymap(\n lambda x: os.path.join(data_folder, 'IMG', os.path.basename(x)))\n sample = pd.concat([image_path, raw_sample.iloc[:, 3]], axis=1)\n samples.append(sample)\n samples = pd.concat(samples, axis=0)\n return samples\n\n\ndef training_generator(samples, batch_size=32):\n '''\n Generator for training data\n '''\n num_samples = samples.shape[0]\n while 1:\n samples = shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset + batch_size]\n\n images = []\n angles = []\n for index, batch_sample in batch_samples.iterrows():\n # read in images from center, left and right cameras\n for i in range(3):\n image = mpimg.imread(batch_sample[i])\n images.append(image)\n # steering center\n angles.append(batch_sample[3])\n # steering left\n angles.append(batch_sample[3] + CORRECTION)\n # steering right\n angles.append(batch_sample[3] - CORRECTION)\n\n # data augment flip the image\n augmented_images, augmented_angles = [], []\n for image, angle in zip(images, angles):\n augmented_images.append(image)\n augmented_angles.append(angle)\n # flip the image only if angle is larger than 0.1\n if angle > 0.1:\n augmented_images.append(np.fliplr(image))\n augmented_angles.append(-angle)\n X_train = np.array(augmented_images)\n y_train = np.array(augmented_angles)\n\n yield shuffle(X_train, y_train)\n\n\ndef validation_generator(samples, batch_size=32):\n '''\n Generator for validation data\n '''\n num_samples = samples.shape[0]\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset + batch_size]\n images = []\n angles = []\n for index, batch_sample in batch_samples.iterrows():\n image = mpimg.imread(batch_sample[0])\n images.append(image)\n angles.append(batch_sample[3])\n X_train = np.array(images)\n y_train = np.array(images)\n\n yield X_train, y_train\n"
] | [
[
"pandas.concat",
"numpy.fliplr",
"sklearn.utils.shuffle",
"matplotlib.image.imread",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rac2030/OnFire | [
"7fba89681ed20a37581d09d81d2099c9bc242423"
] | [
"bin/Vectorizer.py"
] | [
"import numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nlist = [\"India\"]\n\nwith open('new300.csv') as f:\n for line in f:\n list.append(line)\n\nvect = TfidfVectorizer(min_df=1)\n\ntfidf = vect.fit_transform(list)\nprint(tfidf.toarray())\nprint ((tfidf * tfidf.T).A)\n\nprint(vect.get_feature_names())\nfout = open(\"vector-matrix.txt\",encoding='utf8',mode='w') \nfout.write(tfidf.toarray())\nfout.close()\n\n"
] | [
[
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JulienPeloton/gcr-catalogs | [
"1e8ba090ed42b32c2b7cfed2b3227611e093d76e"
] | [
"GCRCatalogs/alphaq.py"
] | [
"\"\"\"\nAlpha Q galaxy catalog class.\n\"\"\"\nfrom __future__ import division\nimport os\nimport re\nimport warnings\nfrom distutils.version import StrictVersion # pylint: disable=no-name-in-module,import-error\nimport numpy as np\nimport h5py\nfrom astropy.cosmology import FlatLambdaCDM\nfrom GCR import BaseGenericCatalog\nfrom .utils import md5\n\n__all__ = ['AlphaQGalaxyCatalog']\n__version__ = '5.0.0'\n\n\ndef _calc_weighted_size(size1, size2, lum1, lum2):\n return ((size1*lum1) + (size2*lum2)) / (lum1+lum2)\n\n\ndef _calc_weighted_size_minor(size1, size2, lum1, lum2, ell):\n size = _calc_weighted_size(size1, size2, lum1, lum2)\n return size * (1.0 - ell) / (1.0 + ell)\n\n\ndef _calc_conv(mag, shear1, shear2):\n slct = mag < 0.2\n mag_corr = np.copy(mag)\n mag_corr[slct] = 1.0 # manually changing the values for when magnification is near zero.\n conv = 1.0 - np.sqrt(1.0/mag_corr + shear1**2 + shear2**2)\n return conv\n\n\ndef _calc_Rv(lum_v, lum_v_dust, lum_b, lum_b_dust):\n v = lum_v_dust/lum_v\n b = lum_b_dust/lum_b\n bv = b/v\n Rv = np.log10(v) / np.log10(bv)\n Rv[(v == 1) & (b == 1)] = 1.0\n Rv[v == b] = np.nan\n return Rv\n\n\ndef _calc_Av(lum_v, lum_v_dust):\n Av = -2.5*(np.log10(lum_v_dust/lum_v))\n Av[lum_v_dust == 0] = np.nan\n return Av\n\n\ndef _gen_position_angle(size_reference):\n # pylint: disable=protected-access\n size = size_reference.size\n if not hasattr(_gen_position_angle, \"_pos_angle\") or _gen_position_angle._pos_angle.size != size:\n _gen_position_angle._pos_angle = np.random.RandomState(123497).uniform(0, 180, size)\n return _gen_position_angle._pos_angle\n\n\ndef _calc_ellipticity_1(ellipticity):\n # position angle using ellipticity as reference for the size or\n # the array. The angle is converted from degrees to radians\n pos_angle = _gen_position_angle(ellipticity)*np.pi/180.0\n # use the correct conversion for ellipticity 1 from ellipticity\n # and position angle\n return ellipticity*np.cos(2.0*pos_angle)\n\n\ndef _calc_ellipticity_2(ellipticity):\n # position angle using ellipticity as reference for the size or\n # the array. The angle is converted from degrees to radians\n pos_angle = _gen_position_angle(ellipticity)*np.pi/180.0\n # use the correct conversion for ellipticity 2 from ellipticity\n # and position angle\n return ellipticity*np.sin(2.0*pos_angle)\n\n\ndef _gen_galaxy_id(size_reference):\n # pylint: disable=protected-access\n size = size_reference.size\n if not hasattr(_gen_galaxy_id, \"_galaxy_id\") or _gen_galaxy_id._galaxy_id.size != size:\n _gen_galaxy_id._galaxy_id = np.arange(size, dtype='i8')\n return _gen_galaxy_id._galaxy_id\n\ndef _calc_lensed_magnitude(magnitude, magnification):\n magnification[magnification==0]=1.0\n return magnitude -2.5*np.log10(magnification)\n\nclass AlphaQGalaxyCatalog(BaseGenericCatalog):\n \"\"\"\n Alpha Q galaxy catalog class. Uses generic quantity and filter mechanisms\n defined by BaseGenericCatalog class.\n \"\"\"\n\n def _subclass_init(self, filename, **kwargs): #pylint: disable=W0221\n\n if not os.path.isfile(filename):\n raise ValueError('Catalog file {} does not exist'.format(filename))\n self._file = filename\n\n if kwargs.get('md5'):\n if md5(self._file) != kwargs['md5']:\n raise ValueError('md5 sum does not match!')\n else:\n warnings.warn('No md5 sum specified in the config file')\n\n self.lightcone = kwargs.get('lightcone')\n\n with h5py.File(self._file, 'r') as fh:\n # pylint: disable=no-member\n # get version\n catalog_version = list()\n for version_label in ('Major', 'Minor', 'MinorMinor'):\n try:\n catalog_version.append(fh['/metaData/version' + version_label].value)\n except KeyError:\n break\n catalog_version = StrictVersion('.'.join(map(str, catalog_version or (2, 0))))\n\n # get cosmology\n self.cosmology = FlatLambdaCDM(\n H0=fh['metaData/simulationParameters/H_0'].value,\n Om0=fh['metaData/simulationParameters/Omega_matter'].value,\n Ob0=fh['metaData/simulationParameters/Omega_b'].value,\n )\n self.cosmology.sigma8 = fh['metaData/simulationParameters/sigma_8'].value\n self.cosmology.n_s = fh['metaData/simulationParameters/N_s'].value\n self.halo_mass_def = fh['metaData/simulationParameters/haloMassDefinition'].value\n\n # get sky area\n if catalog_version >= StrictVersion(\"2.1.1\"):\n self.sky_area = float(fh['metaData/skyArea'].value)\n else:\n self.sky_area = 25.0 #If the sky area isn't specified use the default value of the sky area.\n\n # get native quantities\n self._native_quantities = set()\n def _collect_native_quantities(name, obj):\n if isinstance(obj, h5py.Dataset):\n self._native_quantities.add(name)\n fh['galaxyProperties'].visititems(_collect_native_quantities)\n\n # check versions\n self.version = kwargs.get('version', '0.0.0')\n config_version = StrictVersion(self.version)\n if config_version != catalog_version:\n raise ValueError('Catalog file version {} does not match config version {}'.format(catalog_version, config_version))\n if StrictVersion(__version__) < config_version:\n raise ValueError('Reader version {} is less than config version {}'.format(__version__, catalog_version))\n\n # specify quantity modifiers\n self._quantity_modifiers = {\n 'galaxy_id' : 'galaxyID',\n 'ra': 'ra',\n 'dec': 'dec',\n 'ra_true': 'ra_true',\n 'dec_true': 'dec_true',\n 'redshift': 'redshift',\n 'redshift_true': 'redshiftHubble',\n 'shear_1': 'shear1',\n 'shear_2': (np.negative, 'shear2'),\n 'shear_2_treecorr': (np.negative, 'shear2'),\n 'shear_2_phosim': 'shear2',\n 'convergence': (\n _calc_conv,\n 'magnification',\n 'shear1',\n 'shear2',\n ),\n 'magnification': (lambda mag: np.where(mag < 0.2, 1.0, mag), 'magnification'),\n 'halo_id': 'hostHaloTag',\n 'halo_mass': 'hostHaloMass',\n 'is_central': (lambda x: x.astype(np.bool), 'isCentral'),\n 'stellar_mass': 'totalMassStellar',\n 'stellar_mass_disk': 'diskMassStellar',\n 'stellar_mass_bulge': 'spheroidMassStellar',\n 'size_disk_true': 'morphology/diskMajorAxisArcsec',\n 'size_bulge_true': 'morphology/spheroidMajorAxisArcsec',\n 'size_minor_disk_true': 'morphology/diskMinorAxisArcsec',\n 'size_minor_bulge_true': 'morphology/spheroidMinorAxisArcsec',\n 'position_angle_true': (_gen_position_angle, 'morphology/positionAngle'),\n 'sersic_disk': 'morphology/diskSersicIndex',\n 'sersic_bulge': 'morphology/spheroidSersicIndex',\n 'ellipticity_true': 'morphology/totalEllipticity',\n 'ellipticity_1_true': (_calc_ellipticity_1, 'morphology/totalEllipticity'),\n 'ellipticity_2_true': (_calc_ellipticity_2, 'morphology/totalEllipticity'),\n 'ellipticity_disk_true': 'morphology/diskEllipticity',\n 'ellipticity_1_disk_true': (_calc_ellipticity_1, 'morphology/diskEllipticity'),\n 'ellipticity_2_disk_true': (_calc_ellipticity_2, 'morphology/diskEllipticity'),\n 'ellipticity_bulge_true': 'morphology/spheroidEllipticity',\n 'ellipticity_1_bulge_true': (_calc_ellipticity_1, 'morphology/spheroidEllipticity'),\n 'ellipticity_2_bulge_true': (_calc_ellipticity_2, 'morphology/spheroidEllipticity'),\n 'size_true': (\n _calc_weighted_size,\n 'morphology/diskMajorAxisArcsec',\n 'morphology/spheroidMajorAxisArcsec',\n 'LSST_filters/diskLuminositiesStellar:LSST_r:rest',\n 'LSST_filters/spheroidLuminositiesStellar:LSST_r:rest',\n ),\n 'size_minor_true': (\n _calc_weighted_size_minor,\n 'morphology/diskMajorAxisArcsec',\n 'morphology/spheroidMajorAxisArcsec',\n 'LSST_filters/diskLuminositiesStellar:LSST_r:rest',\n 'LSST_filters/spheroidLuminositiesStellar:LSST_r:rest',\n 'morphology/totalEllipticity',\n ),\n 'bulge_to_total_ratio_i': (\n lambda x, y: x/(x+y),\n 'SDSS_filters/spheroidLuminositiesStellar:SDSS_i:observed',\n 'SDSS_filters/diskLuminositiesStellar:SDSS_i:observed',\n ),\n 'A_v': (\n _calc_Av,\n 'otherLuminosities/totalLuminositiesStellar:V:rest',\n 'otherLuminosities/totalLuminositiesStellar:V:rest:dustAtlas',\n ),\n 'A_v_disk': (\n _calc_Av,\n 'otherLuminosities/diskLuminositiesStellar:V:rest',\n 'otherLuminosities/diskLuminositiesStellar:V:rest:dustAtlas',\n ),\n 'A_v_bulge': (\n _calc_Av,\n 'otherLuminosities/spheroidLuminositiesStellar:V:rest',\n 'otherLuminosities/spheroidLuminositiesStellar:V:rest:dustAtlas',\n ),\n 'R_v': (\n _calc_Rv,\n 'otherLuminosities/totalLuminositiesStellar:V:rest',\n 'otherLuminosities/totalLuminositiesStellar:V:rest:dustAtlas',\n 'otherLuminosities/totalLuminositiesStellar:B:rest',\n 'otherLuminosities/totalLuminositiesStellar:B:rest:dustAtlas',\n ),\n 'R_v_disk': (\n _calc_Rv,\n 'otherLuminosities/diskLuminositiesStellar:V:rest',\n 'otherLuminosities/diskLuminositiesStellar:V:rest:dustAtlas',\n 'otherLuminosities/diskLuminositiesStellar:B:rest',\n 'otherLuminosities/diskLuminositiesStellar:B:rest:dustAtlas',\n ),\n 'R_v_bulge': (\n _calc_Rv,\n 'otherLuminosities/spheroidLuminositiesStellar:V:rest',\n 'otherLuminosities/spheroidLuminositiesStellar:V:rest:dustAtlas',\n 'otherLuminosities/spheroidLuminositiesStellar:B:rest',\n 'otherLuminosities/spheroidLuminositiesStellar:B:rest:dustAtlas',\n ),\n 'position_x': 'x',\n 'position_y': 'y',\n 'position_z': 'z',\n 'velocity_x': 'vx',\n 'velocity_y': 'vy',\n 'velocity_z': 'vz',\n }\n\n # add magnitudes\n for band in 'ugrizyY':\n if band != 'y' and band != 'Y':\n self._quantity_modifiers['mag_{}_sdss'.format(band)] = (_calc_lensed_magnitude, 'SDSS_filters/magnitude:SDSS_{}:observed:dustAtlas'.format(band), 'magnification',)\n self._quantity_modifiers['mag_{}_sdss_no_host_extinction'.format(band)] = (_calc_lensed_magnitude, 'SDSS_filters/magnitude:SDSS_{}:observed'.format(band), 'magnification',)\n self._quantity_modifiers['mag_true_{}_sdss'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:observed:dustAtlas'.format(band)\n self._quantity_modifiers['mag_true_{}_sdss_no_host_extinction'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:observed'.format(band)\n self._quantity_modifiers['Mag_true_{}_sdss_z0'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:rest:dustAtlas'.format(band)\n self._quantity_modifiers['Mag_true_{}_sdss_z0_no_host_extinction'.format(band)] = 'SDSS_filters/magnitude:SDSS_{}:rest'.format(band)\n\n self._quantity_modifiers['mag_{}_lsst'.format(band)] = (_calc_lensed_magnitude, 'LSST_filters/magnitude:LSST_{}:observed:dustAtlas'.format(band.lower()), 'magnification',)\n self._quantity_modifiers['mag_{}_lsst_no_host_extinction'.format(band)] = (_calc_lensed_magnitude, 'LSST_filters/magnitude:LSST_{}:observed'.format(band.lower()), 'magnification',)\n self._quantity_modifiers['mag_true_{}_lsst'.format(band)] = 'LSST_filters/magnitude:LSST_{}:observed:dustAtlas'.format(band.lower())\n self._quantity_modifiers['mag_true_{}_lsst_no_host_extinction'.format(band)] = 'LSST_filters/magnitude:LSST_{}:observed'.format(band.lower())\n self._quantity_modifiers['Mag_true_{}_lsst_z0'.format(band)] = 'LSST_filters/magnitude:LSST_{}:rest:dustAtlas'.format(band.lower())\n self._quantity_modifiers['Mag_true_{}_lsst_z0_no_host_extinction'.format(band)] = 'LSST_filters/magnitude:LSST_{}:rest'.format(band.lower())\n\n if band != 'Y':\n self._quantity_modifiers['mag_{}'.format(band)] = self._quantity_modifiers['mag_{}_lsst'.format(band)]\n self._quantity_modifiers['mag_true_{}'.format(band)] = self._quantity_modifiers['mag_true_{}_lsst'.format(band)]\n\n\n # add SEDs\n translate_component_name = {'total': '', 'disk': '_disk', 'spheroid': '_bulge'}\n sed_re = re.compile(r'^SEDs/([a-z]+)LuminositiesStellar:SED_(\\d+)_(\\d+):rest((?::dustAtlas)?)$')\n for quantity in self._native_quantities:\n m = sed_re.match(quantity)\n if m is None:\n continue\n component, start, width, dust = m.groups()\n key = 'sed_{}_{}{}{}'.format(start, width, translate_component_name[component], '' if dust else '_no_host_extinction')\n self._quantity_modifiers[key] = quantity\n\n # make quantity modifiers work in older versions\n if catalog_version < StrictVersion('4.0'):\n self._quantity_modifiers.update({\n 'galaxy_id' : (_gen_galaxy_id, 'galaxyID'),\n })\n\n if catalog_version < StrictVersion('3.0'):\n self._quantity_modifiers.update({\n 'galaxy_id' : 'galaxyID',\n 'host_id': 'hostIndex',\n 'position_angle_true': 'morphology/positionAngle',\n 'ellipticity_1_true': 'morphology/totalEllipticity1',\n 'ellipticity_2_true': 'morphology/totalEllipticity2',\n 'ellipticity_1_disk_true': 'morphology/diskEllipticity1',\n 'ellipticity_2_disk_true': 'morphology/diskEllipticity2',\n 'ellipticity_1_bulge_true': 'morphology/spheroidEllipticity1',\n 'ellipticity_2_bulge_true': 'morphology/spheroidEllipticity2',\n })\n\n if catalog_version < StrictVersion('2.1.2'):\n self._quantity_modifiers.update({\n 'position_angle_true': (lambda pos_angle: np.rad2deg(np.rad2deg(pos_angle)), 'morphology/positionAngle'), #I converted the units the wrong way, so a double conversion is required.\n })\n\n if catalog_version < StrictVersion('2.1.1'):\n self._quantity_modifiers.update({\n 'sersic_disk': 'diskSersicIndex',\n 'sersic_bulge': 'spheroidSersicIndex',\n })\n for key in (\n 'size_minor_true',\n 'ellipticity_true',\n 'ellipticity_1_true',\n 'ellipticity_2_true',\n 'ellipticity_1_disk_true',\n 'ellipticity_2_disk_true',\n 'ellipticity_1_bulge_true',\n 'ellipticity_2_bulge_true',\n ):\n if key in self._quantity_modifiers:\n del self._quantity_modifiers[key]\n\n if catalog_version == StrictVersion('2.0'): # to be backward compatible\n self._quantity_modifiers.update({\n 'ra': (lambda x: x/3600, 'ra'),\n 'ra_true': (lambda x: x/3600, 'ra_true'),\n 'dec': (lambda x: x/3600, 'dec'),\n 'dec_true': (lambda x: x/3600, 'dec_true'),\n })\n\n\n def _generate_native_quantity_list(self):\n return self._native_quantities\n\n\n def _iter_native_dataset(self, native_filters=None):\n if native_filters is not None:\n raise ValueError('*native_filters* is not supported')\n with h5py.File(self._file, 'r') as fh:\n def _native_quantity_getter(native_quantity):\n return fh['galaxyProperties/{}'.format(native_quantity)].value # pylint: disable=no-member\n yield _native_quantity_getter\n\n\n def _get_native_quantity_info_dict(self, quantity, default=None):\n with h5py.File(self._file, 'r') as fh:\n quantity_key = 'galaxyProperties/' + quantity\n if quantity_key not in fh:\n return default\n modifier = lambda k, v: None if k == 'description' and v == b'None given' else v.decode()\n return {k: modifier(k, v) for k, v in fh[quantity_key].attrs.items()}\n\n\n def _get_quantity_info_dict(self, quantity, default=None):\n q_mod = self.get_quantity_modifier(quantity)\n if callable(q_mod) or (isinstance(q_mod, (tuple, list)) and len(q_mod) > 1 and callable(q_mod[0])):\n warnings.warn('This value is composed of a function on native quantities. So we have no idea what the units are')\n return default\n return self._get_native_quantity_info_dict(q_mod or quantity, default=default)\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.cos",
"numpy.rad2deg",
"numpy.sin",
"numpy.copy",
"numpy.log10",
"numpy.random.RandomState",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pantheon5100/simsimpp | [
"147d5cdaa986d1da1608efb6cf663826bfd57053"
] | [
"solo/methods/new_predictor.py"
] | [
"import argparse\nfrom typing import Any, Dict, List, Sequence\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom solo.losses.simsiam import simsiam_loss_func\nfrom solo.methods.base import BaseModel\nfrom solo.losses.vicreg import covariance_loss\n\n\ndef value_constrain(x, type=None):\n if type == \"sigmoid\":\n return 2*torch.sigmoid(x)-1\n elif type == \"tanh\":\n return torch.tanh(0.5*x)\n else:\n return x\n\n\nclass BiasLayer(nn.Module):\n def __init__(self, output_dim, bias=False, weight_matrix=False, constrain_type=\"none\", bias_first=False):\n super(BiasLayer, self).__init__()\n self.constrain_type = constrain_type\n self.bias_first = bias_first\n\n self.weight_matrix = weight_matrix\n if weight_matrix:\n self.w = nn.Linear(output_dim, output_dim, bias=False)\n\n self.bias = bias\n if bias:\n self.bias_vector = nn.Parameter(torch.zeros(1, output_dim))\n\n def _base_forward(self, x):\n if self.bias_first:\n self.bias_vector.data = value_constrain(self.bias_vector.data, type=self.constrain_type).detach()\n x = x + self.bias_vector\n\n self.w.weight.data = value_constrain(self.w.weight.data, type=self.constrain_type).detach()\n x = self.w(x)\n else:\n self.w.weight.data = value_constrain(self.w.weight.data, type=self.constrain_type).detach()\n x = self.w(x)\n\n self.bias_vector.data = value_constrain(self.bias_vector.data, type=self.constrain_type).detach()\n x = x + self.bias_vector\n return x\n\n def forward(self,x):\n \n x = F.normalize(x, dim=-1)\n\n if self.bias and self.weight_matrix:\n x = self._base_forward(x)\n return x\n\n if self.bias:\n self.bias_vector.data = value_constrain(self.bias_vector.data, type=self.constrain_type).detach()\n x = x + self.bias_vector\n\n if self.weight_matrix:\n self.w.weight.data = value_constrain(self.w.weight.data, type=self.constrain_type).detach()\n x = self.w(x)\n\n return x\n\nclass NewPredictor(BaseModel):\n def __init__(\n self,\n output_dim: int,\n proj_hidden_dim: int,\n pred_hidden_dim: int,\n BL:bool,\n bias:bool,\n weight_matrix:bool,\n constrain:str,\n bias_first:bool,\n **kwargs,\n ):\n \"\"\"Implements SimSiam (https://arxiv.org/abs/2011.10566).\n\n Args:\n output_dim (int): number of dimensions of projected features.\n proj_hidden_dim (int): number of neurons of the hidden layers of the projector.\n pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.\n \"\"\"\n\n super().__init__(**kwargs)\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim, bias=False),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, output_dim),\n # nn.BatchNorm1d(output_dim, affine=False),\n )\n # self.projector[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n\n # predictor\n if not BL:\n assert bias and weight_matrix\n self.predictor = nn.Sequential(\n nn.Linear(output_dim, pred_hidden_dim, bias=False),\n nn.BatchNorm1d(pred_hidden_dim),\n nn.ReLU(),\n nn.Linear(pred_hidden_dim, output_dim),\n )\n elif BL:\n self.predictor = nn.Sequential(\n BiasLayer(output_dim,bias=bias, weight_matrix=weight_matrix, constrain_type=constrain, bias_first=bias_first)\n )\n \n self.register_buffer(\"previouscentering\", torch.randn(1, output_dim))\n self.register_buffer(\"onestepbeforecentering\", torch.randn(1, output_dim))\n\n\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(NewPredictor, NewPredictor).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"newpredictor\")\n\n # projector\n parser.add_argument(\"--output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # predictor\n parser.add_argument(\"--BL\", action=\"store_true\")\n parser.add_argument(\"--bias\", action=\"store_true\")\n parser.add_argument(\"--bias_first\", action=\"store_true\")\n parser.add_argument(\"--weight_matrix\", action=\"store_true\")\n\n SUPPORTED_VALUE_CONSTRAIN = [\"none\", \"sigmoid\", \"tanh\"]\n parser.add_argument(\"--constrain\", choices=SUPPORTED_VALUE_CONSTRAIN, type=str)\n\n\n parser.add_argument(\"--pred_hidden_dim\", type=int, default=512)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector and predictor parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params: List[dict] = [\n {\"params\": self.projector.parameters()},\n {\"params\": self.predictor.parameters(), \"static_lr\": True},\n ]\n return super().learnable_params + extra_learnable_params\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the encoder, the projector and the predictor.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]:\n a dict containing the outputs of the parent\n and the projected and predicted features.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = self.projector(out[\"feats\"])\n p = self.predictor(z)\n return {**out, \"z\": z, \"p\": p}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"Training step for SimSiam reusing BaseModel training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size self.num_crops containing batches of images\n batch_idx (int): index of the batch\n\n Returns:\n torch.Tensor: total loss composed of SimSiam loss and classification loss\n \"\"\"\n\n out = super().training_step(batch, batch_idx)\n class_loss = out[\"loss\"]\n feats1, feats2 = out[\"feats\"]\n\n z1 = self.projector(feats1)\n z2 = self.projector(feats2)\n\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n\n # ------- contrastive loss -------\n neg_cos_sim = simsiam_loss_func(p1, z2) / 2 + simsiam_loss_func(p2, z1) / 2\n\n # calculate std of features\n z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()\n z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()\n z_std = (z1_std + z2_std) / 2\n\n with torch.no_grad():\n z1_norm = F.normalize(z1, dim=-1)\n z2_norm = F.normalize(z2, dim=-1)\n # normalize the vector to make it comparable\n\n centervector = ((z1_norm + z2_norm)/2).mean(dim=0)\n residualvector = z2_norm - centervector\n # import pdb; pdb.set_trace()\n\n ZvsC = F.cosine_similarity(z2_norm, centervector.expand(z2_norm.size(0), 2048), dim=-1).mean()\n ZvsR = F.cosine_similarity(z2_norm, residualvector, dim=-1).mean()\n CvsR = F.cosine_similarity(centervector.expand(z2_norm.size(0), 2048), residualvector, dim=-1).mean()\n\n\n ratio_RvsW = (torch.linalg.norm(residualvector, dim=1, ord=2) / torch.linalg.norm(z2_norm, dim=1, ord=2)).mean()\n ratio_CvsW = (torch.linalg.norm(centervector.expand(z2_norm.size(0), 2048), dim=1, ord=2) / torch.linalg.norm(z2_norm, dim=1, ord=2)).mean()\n\n CS1vsCc = F.cosine_similarity(self.onestepbeforecentering, centervector.reshape(1, -1))\n CS1minusCcvsCc = F.cosine_similarity(centervector.reshape(1, -1)-self.onestepbeforecentering , centervector.reshape(1, -1))\n CS1minusCcvsCS1 = F.cosine_similarity(centervector.reshape(1, -1)-self.onestepbeforecentering , self.onestepbeforecentering)\n\n self.onestepbeforecentering = centervector.reshape(1, -1)\n\n new_metric_log={\"ZvsC_norm\":ZvsC,\n \"ZvsR_norm\":ZvsR,\n \"ratio_RvsW_norm\":ratio_RvsW,\n \"ZvsR_norm\":ZvsR,\n \"ratio_CvsW_norm\":ratio_CvsW,\n \"CvsR_norm\":CvsR,\n \"CS1vsCc\":CS1vsCc,\n \"CS1minusCcvsCc\":CS1minusCcvsCc,\n \"CS1minusCcvsCS1\":CS1minusCcvsCS1,\n }\n\n if self.trainer.global_step % 100 == 0:\n\n CpvsCc = F.cosine_similarity(self.previouscentering, centervector.reshape(1, -1))\n\n self.previouscentering = centervector.reshape(1, -1).clone()\n\n new_metric_log.update({\"CpvsCc_norm\": CpvsCc})\n\n # calculate std of features\n z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()\n z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()\n z_std = (z1_std + z2_std) / 2\n\n with torch.no_grad():\n cov_loss = covariance_loss(z1_norm, z2_norm)\n mean_z = (z1_norm.abs().mean(dim=1) + z2_norm.abs().mean(dim=1)).mean()/2\n\n metrics = {\n \"neg_cos_sim\": neg_cos_sim,\n \"train_z_std\": z_std,\n \"cov_loss\": cov_loss,\n \"mean_z\": mean_z,\n }\n\n metrics.update(new_metric_log)\n\n self.log_dict(metrics, on_epoch=True, sync_dist=True)\n\n return neg_cos_sim + class_loss\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.BatchNorm1d",
"torch.sigmoid",
"torch.zeros",
"torch.randn",
"torch.linalg.norm",
"torch.tanh",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.functional.cosine_similarity",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jbreitbart/detectron2 | [
"53dfc401103993a64f6714b7bfc36bcebe36e55c"
] | [
"projects/DensePose/densepose/evaluator.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport contextlib\nimport copy\nimport io\nimport itertools\nimport logging\nimport numpy as np\nimport os\nfrom collections import OrderedDict\nimport pycocotools.mask as mask_utils\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom pycocotools.coco import COCO\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.evaluation import DatasetEvaluator\nfrom detectron2.structures import BoxMode\nfrom detectron2.utils.comm import all_gather, is_main_process, synchronize\nfrom detectron2.utils.logger import create_small_table\n\nfrom .converters import ToChartResultConverter, ToMaskConverter\nfrom .densepose_coco_evaluation import DensePoseCocoEval, DensePoseEvalMode\nfrom .structures import compress_quantized_densepose_chart_result, quantize_densepose_chart_result\n\n\nclass DensePoseCOCOEvaluator(DatasetEvaluator):\n def __init__(self, dataset_name, distributed, output_dir=None):\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n self._logger = logging.getLogger(__name__)\n\n self._metadata = MetadataCatalog.get(dataset_name)\n self._min_threshold = 0.5\n json_file = PathManager.get_local_path(self._metadata.json_file)\n with contextlib.redirect_stdout(io.StringIO()):\n self._coco_api = COCO(json_file)\n\n def reset(self):\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).\n It is a list of dict. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\", \"image_id\".\n outputs: the outputs of a COCO model. It is a list of dicts with key\n \"instances\" that contains :class:`Instances`.\n The :class:`Instances` object needs to have `densepose` field.\n \"\"\"\n for input, output in zip(inputs, outputs):\n instances = output[\"instances\"].to(self._cpu_device)\n if not instances.has(\"pred_densepose\"):\n continue\n json_results = prediction_to_json(instances, input[\"image_id\"])\n self._predictions.extend(json_results)\n\n def evaluate(self, imgIds=None):\n if self._distributed:\n synchronize()\n predictions = all_gather(self._predictions)\n predictions = list(itertools.chain(*predictions))\n if not is_main_process():\n return\n else:\n predictions = self._predictions\n\n return copy.deepcopy(self._eval_predictions(predictions, imgIds))\n\n def _eval_predictions(self, predictions, imgIds=None):\n \"\"\"\n Evaluate predictions on densepose.\n Return results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n\n if self._output_dir:\n PathManager.mkdirs(self._output_dir)\n file_path = os.path.join(self._output_dir, \"coco_densepose_predictions.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(predictions, f)\n\n self._logger.info(\"Evaluating predictions ...\")\n res = OrderedDict()\n results_gps, results_gpsm, results_segm = _evaluate_predictions_on_coco(\n self._coco_api, predictions, min_threshold=self._min_threshold, imgIds=imgIds\n )\n res[\"densepose_gps\"] = results_gps\n res[\"densepose_gpsm\"] = results_gpsm\n res[\"densepose_segm\"] = results_segm\n return res\n\n\ndef prediction_to_json(instances, img_id):\n \"\"\"\n Args:\n instances (Instances): the output of the model\n img_id (str): the image id in COCO\n\n Returns:\n list[dict]: the results in densepose evaluation format\n \"\"\"\n scores = instances.scores.tolist()\n segmentations = ToMaskConverter.convert(\n instances.pred_densepose, instances.pred_boxes, instances.image_size\n )\n raw_boxes_xywh = BoxMode.convert(\n instances.pred_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS\n )\n\n results = []\n for k in range(len(instances)):\n densepose_results_quantized_compressed = compress_quantized_densepose_chart_result(\n quantize_densepose_chart_result(\n ToChartResultConverter.convert(instances.pred_densepose[k], instances.pred_boxes[k])\n )\n )\n segmentation = segmentations.tensor[k]\n segmentation_encoded = mask_utils.encode(\n np.require(segmentation.numpy(), dtype=np.uint8, requirements=[\"F\"])\n )\n segmentation_encoded[\"counts\"] = segmentation_encoded[\"counts\"].decode(\"utf-8\")\n result = {\n \"image_id\": img_id,\n \"category_id\": 1, # densepose only has one class\n \"bbox\": raw_boxes_xywh[k].tolist(),\n \"score\": scores[k],\n \"densepose\": densepose_results_quantized_compressed,\n \"segmentation\": segmentation_encoded,\n }\n results.append(result)\n return results\n\n\ndef _evaluate_predictions_on_coco(coco_gt, coco_results, min_threshold=0.5, imgIds=None):\n logger = logging.getLogger(__name__)\n\n segm_metrics = _get_segmentation_metrics()\n densepose_metrics = _get_densepose_metrics(min_threshold)\n if len(coco_results) == 0: # cocoapi does not handle empty results very well\n logger.warn(\"No predictions from the model! Set scores to -1\")\n results_gps = {metric: -1 for metric in densepose_metrics}\n results_gpsm = {metric: -1 for metric in densepose_metrics}\n results_segm = {metric: -1 for metric in segm_metrics}\n return results_gps, results_gpsm, results_segm\n\n coco_dt = coco_gt.loadRes(coco_results)\n results_segm = _evaluate_predictions_on_coco_segm(\n coco_gt, coco_dt, segm_metrics, min_threshold, imgIds\n )\n logger.info(\"Evaluation results for densepose segm: \\n\" + create_small_table(results_segm))\n results_gps = _evaluate_predictions_on_coco_gps(\n coco_gt, coco_dt, densepose_metrics, min_threshold, imgIds\n )\n logger.info(\n \"Evaluation results for densepose, GPS metric: \\n\" + create_small_table(results_gps)\n )\n results_gpsm = _evaluate_predictions_on_coco_gpsm(\n coco_gt, coco_dt, densepose_metrics, min_threshold, imgIds\n )\n logger.info(\n \"Evaluation results for densepose, GPSm metric: \\n\" + create_small_table(results_gpsm)\n )\n return results_gps, results_gpsm, results_segm\n\n\ndef _get_densepose_metrics(min_threshold=0.5):\n metrics = [\"AP\"]\n if min_threshold <= 0.201:\n metrics += [\"AP20\"]\n if min_threshold <= 0.301:\n metrics += [\"AP30\"]\n if min_threshold <= 0.401:\n metrics += [\"AP40\"]\n metrics.extend([\"AP50\", \"AP75\", \"APm\", \"APl\", \"AR\", \"AR50\", \"AR75\", \"ARm\", \"ARl\"])\n return metrics\n\n\ndef _get_segmentation_metrics():\n return [\n \"AP\",\n \"AP50\",\n \"AP75\",\n \"APs\",\n \"APm\",\n \"APl\",\n \"AR@1\",\n \"AR@10\",\n \"AR@100\",\n \"ARs\",\n \"ARm\",\n \"ARl\",\n ]\n\n\ndef _evaluate_predictions_on_coco_gps(coco_gt, coco_dt, metrics, min_threshold=0.5, imgIds=None):\n coco_eval = DensePoseCocoEval(coco_gt, coco_dt, \"densepose\", dpEvalMode=DensePoseEvalMode.GPS)\n if imgIds is not None:\n coco_eval.params.imgIds = imgIds\n coco_eval.params.iouThrs = np.linspace(\n min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True\n )\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}\n return results\n\n\ndef _evaluate_predictions_on_coco_gpsm(coco_gt, coco_dt, metrics, min_threshold=0.5, imgIds=None):\n coco_eval = DensePoseCocoEval(coco_gt, coco_dt, \"densepose\", dpEvalMode=DensePoseEvalMode.GPSM)\n if imgIds is not None:\n coco_eval.params.imgIds = imgIds\n coco_eval.params.iouThrs = np.linspace(\n min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True\n )\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}\n return results\n\n\ndef _evaluate_predictions_on_coco_segm(coco_gt, coco_dt, metrics, min_threshold=0.5, imgIds=None):\n coco_eval = DensePoseCocoEval(coco_gt, coco_dt, \"segm\")\n if imgIds is not None:\n coco_eval.params.imgIds = imgIds\n coco_eval.params.iouThrs = np.linspace(\n min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True\n )\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}\n return results\n"
] | [
[
"torch.device",
"numpy.round",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ioir123ju/mmskeleton | [
"bab5f973f00d68c5e166e450dd2ed95169a6549a"
] | [
"mmskeleton/processor/recognition_granary.py"
] | [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@File : recognition_granary.py.py \n@Contact : JZ\n@License : (C)Copyright 2018-2019, Liugroup-NLPR-CASIA\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2019/10/23 15:07 juzheng 1.0 None\n\"\"\"\n\nimport os\nimport cv2\nimport torch\nimport logging\nimport json\nimport numpy as np\nfrom mmskeleton.utils import call_obj, import_obj, load_checkpoint, cache_checkpoint\nfrom mmcv.runner import Runner\nfrom mmcv import Config, ProgressBar\nfrom mmcv.parallel import MMDataParallel\nimport mmcv\nfrom mmskeleton.apis.estimation import init_pose_estimator, inference_pose_estimator\nfrom multiprocessing import current_process, Process, Manager\n\n\n# process a batch of data\ndef batch_processor(model, datas, train_mode, loss):\n\n data, label = datas\n data = data.cuda()\n label = label.cuda()\n\n # forward\n output = model(data)\n losses = loss(output, label)\n\n # output\n log_vars = dict(loss=losses.item())\n if not train_mode:\n log_vars['top1'] = topk_accuracy(output, label)\n log_vars['top5'] = topk_accuracy(output, label, 5)\n\n outputs = dict(loss=losses, log_vars=log_vars, num_samples=len(data.data))\n return outputs\n\n\ndef topk_accuracy(score, label, k=1):\n rank = score.argsort()\n hit_top_k = [l in rank[i, -k:] for i, l in enumerate(label)]\n accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)\n return accuracy\n\n\ndef weights_init(model):\n classname = model.__class__.__name__\n if classname.find('Conv1d') != -1:\n model.weight.data.normal_(0.0, 0.02)\n if model.bias is not None:\n model.bias.data.fill_(0)\n elif classname.find('Conv2d') != -1:\n model.weight.data.normal_(0.0, 0.02)\n if model.bias is not None:\n model.bias.data.fill_(0)\n elif classname.find('BatchNorm') != -1:\n model.weight.data.normal_(1.0, 0.02)\n model.bias.data.fill_(0)\n\n\ndef render(image, pred, label, person_bbox, bbox_thre=0):\n if pred is None:\n return image\n\n mmcv.imshow_det_bboxes(image,\n person_bbox,\n np.zeros(len(person_bbox)).astype(int),\n class_names=['person'],\n score_thr=bbox_thre,\n show=False,\n wait_time=0)\n\n for person_pred in pred:\n for i, joint_pred in enumerate(person_pred):\n cv2.circle(image, (int(joint_pred[0]), int(joint_pred[1])), 2,\n [255, 0, 0], 2)\n cv2.putText(image, '{}'.format(i), (int(joint_pred[0]), int(joint_pred[1])),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, [255, 255, 255])\n\n cv2.putText(image, '{}'.format(label), (int(person_pred[0][0]), int(person_pred[0][1] - 20)),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, [255, 255, 255])\n image = draw_body_pose(image, person_pred)\n return np.uint8(image)\n\n\n# draw the body keypoint and lims\ndef draw_body_pose(image, person_pred):\n line_seq = [[0, 2], [2, 4], [0, 1], [1, 3], [0, 6], [6, 8], [8, 10], [0, 5],\n [5, 7], [7, 9], [0, 12], [12, 14], [14, 16], [0, 11], [11, 13], [13, 15]]\n\n colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],\n [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],\n [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]\n for i, line in enumerate(line_seq):\n first_index, second_index = line\n first_point = person_pred[first_index]\n second_point = person_pred[second_index]\n cv2.line(image, (int(first_point[0]), int(first_point[1])), (int(second_point[0]), int(second_point[1])),\n colors[i], 2)\n return image\n\n\ndef get_all_file(dir_path, file_list):\n for file in os.listdir(dir_path):\n # print(file)\n filepath = os.path.join(dir_path, file)\n # print(filepath)\n if os.path.isdir(filepath):\n get_all_file(filepath, file_list)\n else:\n file_list.append(filepath)\n return file_list\n\n\ndef build(inputs,\n detection_cfg,\n estimation_cfg,\n tracker_cfg,\n video_dir,\n gpus=1,\n video_max_length=10000,\n category_annotation=None):\n print('data build start')\n cache_checkpoint(detection_cfg.checkpoint_file)\n cache_checkpoint(estimation_cfg.checkpoint_file)\n\n if category_annotation is None:\n video_categories = dict()\n else:\n with open(category_annotation) as f:\n video_categories = json.load(f)['annotations']\n\n if tracker_cfg is not None:\n raise NotImplementedError\n\n pose_estimators = init_pose_estimator(\n detection_cfg, estimation_cfg, device=0)\n\n video_file_list = []\n get_all_file(video_dir, video_file_list)\n\n prog_bar = ProgressBar(len(video_file_list))\n for video_path in video_file_list:\n video_file = os.path.basename(video_path)\n reader = mmcv.VideoReader(video_path)\n video_frames = reader[:video_max_length]\n\n annotations = []\n num_keypoints = -1\n for i, image in enumerate(video_frames):\n res = inference_pose_estimator(pose_estimators, image)\n res['frame_index'] = i\n if not res['has_return']:\n continue\n num_person = len(res['joint_preds'])\n assert len(res['person_bbox']) == num_person\n\n for j in range(num_person):\n keypoints = [[p[0], p[1], round(s[0], 2)] for p, s in zip(\n res['joint_preds'][j].round().astype(int).tolist(), res[\n 'joint_scores'][j].tolist())]\n num_keypoints = len(keypoints)\n person_info = dict(\n person_bbox=res['person_bbox'][j].round().astype(int).tolist(),\n frame_index=res['frame_index'],\n id=j,\n person_id=None,\n keypoints=keypoints)\n annotations.append(person_info)\n annotations = sorted(annotations, key=lambda x: x['frame_index'])\n category_id = video_categories[video_file][\n 'category_id'] if video_file in video_categories else -1\n info = dict(\n video_name=video_file,\n resolution=reader.resolution,\n num_frame=len(video_frames),\n num_keypoints=num_keypoints,\n keypoint_channels=['x', 'y', 'score'],\n version='1.0')\n video_info = dict(\n info=info, category_id=category_id, annotations=annotations)\n inputs.put(video_info)\n prog_bar.update()\n\n\ndef data_parse(data, pipeline, num_track=1):\n info = data['info']\n annotations = data['annotations']\n num_frame = info['num_frame']\n num_keypoints = info['num_keypoints']\n channel = info['keypoint_channels']\n num_channel = len(channel)\n\n # get data\n data['data'] = np.zeros(\n (num_channel, num_keypoints, num_frame, num_track),\n dtype=np.float32)\n\n for a in annotations:\n person_id = a['id'] if a['person_id'] is None else a['person_id']\n frame_index = a['frame_index']\n if person_id < num_track and frame_index < num_frame:\n data['data'][:, :, frame_index, person_id] = np.array(\n a['keypoints']).transpose()\n # 数据预处理\n for stage_args in pipeline:\n data = call_obj(data=data, **stage_args)\n return data\n\n\ndef detect(inputs, results, model_cfg, dataset_cfg, checkpoint, video_dir,\n batch_size=64, gpus=1, workers=4):\n print('detect start')\n # put model on gpus\n if isinstance(model_cfg, list):\n model = [call_obj(**c) for c in model_cfg]\n model = torch.nn.Sequential(*model)\n else:\n model = call_obj(**model_cfg)\n load_checkpoint(model, checkpoint, map_location='cpu')\n model = MMDataParallel(model, device_ids=range(gpus)).cuda()\n model.eval()\n\n results = []\n labels = []\n video_file_list = os.listdir(video_dir)\n prog_bar = ProgressBar(len(video_file_list))\n for video_file in video_file_list:\n data = inputs.get()\n data_loader = data_parse(data, dataset_cfg.pipeline, dataset_cfg.data_source.num_track)\n data, label = data_loader\n with torch.no_grad():\n data = torch.from_numpy(data)\n # 增加一维,表示batch_size\n data = data.unsqueeze(0)\n data = data.float().to(\"cuda:0\").detach()\n output = model(data).data.cpu().numpy()\n results.append(output)\n labels.append(torch.tensor([label]))\n for i in range(len(data)):\n prog_bar.update()\n print('--------', results, labels, '--------------')\n results = np.concatenate(results)\n labels = np.concatenate(labels)\n\n print('Top 1: {:.2f}%'.format(100 * topk_accuracy(results, labels, 1)))\n print('Top 5: {:.2f}%'.format(100 * topk_accuracy(results, labels, 5)))\n\n\ndef realtime_detect(detection_cfg, estimation_cfg, model_cfg, dataset_cfg, tracker_cfg, video_dir,\n category_annotation, checkpoint, batch_size=64, gpus=1, workers=4):\n \"\"\"\n 初始化\n \"\"\"\n # 初始化模型\n pose_estimators = init_pose_estimator(\n detection_cfg, estimation_cfg, device=0)\n if isinstance(model_cfg, list):\n model = [call_obj(**c) for c in model_cfg]\n model = torch.nn.Sequential(*model)\n else:\n model = call_obj(**model_cfg)\n load_checkpoint(model, checkpoint, map_location='cpu')\n model = MMDataParallel(model, device_ids=range(gpus)).cuda()\n model.eval()\n\n # 获取图像\n video_file = 'train/clean/clean10.avi'\n reader = mmcv.VideoReader(os.path.join(video_dir, video_file))\n video_frames = reader[:10000]\n\n if category_annotation is None:\n video_categories = dict()\n else:\n with open(category_annotation) as f:\n json_file = json.load(f)\n video_categories = json_file['annotations']\n action_class = json_file['categories']\n annotations = []\n num_keypoints = -1\n for i, image in enumerate(video_frames):\n res = inference_pose_estimator(pose_estimators, image)\n res['frame_index'] = i\n if not res['has_return']:\n continue\n num_person = len(res['joint_preds'])\n assert len(res['person_bbox']) == num_person\n\n for j in range(num_person):\n keypoints = [[p[0], p[1], round(s[0], 2)] for p, s in zip(\n res['joint_preds'][j].round().astype(int).tolist(), res[\n 'joint_scores'][j].tolist())]\n num_keypoints = len(keypoints)\n person_info = dict(\n person_bbox=res['person_bbox'][j].round().astype(int).tolist(),\n frame_index=res['frame_index'],\n id=j,\n person_id=None,\n keypoints=keypoints)\n annotations.append(person_info)\n category_id = video_categories[video_file][\n 'category_id'] if video_file in video_categories else -1\n info = dict(\n video_name=video_file,\n resolution=reader.resolution,\n num_frame=len(video_frames),\n num_keypoints=num_keypoints,\n keypoint_channels=['x', 'y', 'score'],\n version='1.0')\n video_info = dict(info=info, category_id=category_id, annotations=annotations)\n\n data_loader = data_parse(video_info, dataset_cfg.pipeline, dataset_cfg.data_source.num_track)\n data, label = data_loader\n with torch.no_grad():\n data = torch.from_numpy(data)\n # 增加一维,表示batch_size\n data = data.unsqueeze(0)\n data = data.float().to(\"cuda:0\").detach()\n output = model(data).data.cpu().numpy()\n top1 = output.argmax()\n if output[:, top1] > 3:\n label = action_class[top1]\n else:\n label = 'unknow'\n print(\"reslt:\", output)\n\n res['render_image'] = render(image, res['joint_preds'],\n label,\n res['person_bbox'],\n detection_cfg.bbox_thre)\n cv2.imshow('image', image)\n cv2.waitKey(10)\n\n\ndef recognition(detection_cfg, estimation_cfg, model_cfg, dataset_cfg, tracker_cfg, video_dir,\n category_annotation, checkpoint, batch_size=64, gpus=1, workers=4):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n inputs = Manager().Queue(10000)\n results = Manager().Queue(10000)\n procs = []\n p1 = Process(\n target=build,\n args=(inputs, detection_cfg, estimation_cfg, tracker_cfg, video_dir,\n gpus, 10000, category_annotation))\n p1.start()\n procs.append(p1)\n p2 = Process(\n target=detect,\n args=(inputs, results, model_cfg, dataset_cfg, checkpoint, video_dir,\n batch_size, gpus, workers))\n p2.start()\n procs.append(p2)\n for p in procs:\n p.join()\n\n"
] | [
[
"torch.nn.Sequential",
"numpy.uint8",
"torch.from_numpy",
"torch.tensor",
"numpy.concatenate",
"torch.no_grad",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juliadeneva/NICERsoft | [
"13681678ec57a81605601714896876cb870475ff"
] | [
"scripts/photon_toa.py"
] | [
"#!/usr/bin/env python\n# Program: photon_toa.py\n# Authors: Paul S. Ray <[email protected]>\n# Matthew Kerr <[email protected]>\n# Julia Deneva <[email protected]>\n# Description:\n# Reads a FITS file of photon event times (from NICER or another X-ray mission)\n# and generates TOAs from the unbined times using a pulsar timing model\n# and an analytic template. The TOAs can be output them in Tempo2 format.\nfrom __future__ import division, print_function\n\n# from future import standard_library\n# standard_library.install_aliases()\nfrom builtins import str\nfrom builtins import zip\nfrom builtins import range\nimport os, sys\nimport argparse\nimport numpy as np\nfrom astropy import log\nimport astropy.units as u\nimport astropy.io.fits as pyfits\nimport pint.residuals\nfrom pint.event_toas import load_NICER_TOAs\nfrom pint.event_toas import load_RXTE_TOAs\nfrom pint.event_toas import load_NuSTAR_TOAs\nfrom pint.event_toas import load_XMM_TOAs\nfrom pint.plot_utils import phaseogram_binned\nfrom pint.observatory.satellite_obs import get_satellite_observatory\nimport pint.toa, pint.models\nfrom pint.eventstats import hmw, hm, h2sig\nfrom astropy.time import Time, TimeDelta\nfrom pint.templates.lctemplate import LCTemplate, prim_io\nfrom pint.templates import lcfitters\nfrom copy import deepcopy\nimport pickle\nimport io\nfrom collections import deque\nimport astropy.constants as const\nfrom pint.observatory import get_observatory\nfrom pint.observatory.special_locations import T2SpacecraftObs\n\nlog.setLevel(\"INFO\")\n\n\ndef local_load_NICER_TOAs(eventname):\n \"\"\" Local override to add MET field to each TOA object.\"\"\"\n # TODO -- add this to PINT method ?\n tl = load_NICER_TOAs(eventname)\n f = pyfits.open(eventname)\n mets = f[\"events\"].data.field(\"time\")\n f.close()\n for t, met in zip(tl, mets):\n t.met = met\n # The returned tl has topocentric TT MJD photon times; TIMESYS=TT, TIMEREF=LOCAL in the .evt file\n return tl\n\n\ndef estimate_toa(mjds, phases, ph_times, topo, obs, modelin):\n \"\"\" Return a pint TOA object for the provided times and phases.\n\n Longer description here.\n\n Parameters\n ----------\n mjds : array of floats\n The MJD times of each photon. These are for sorting the TOAs into\n groups and making plots, not for precision work! The timescale\n may be different for different datasets (see pint.toa.get_mjds)\n phases : array\n Array of model-computed phase values for each photon. Should be floats\n between 0.0 and 1.0\n ph_times : array of astropy.Time objects\n Array of photon times, as recorded at Observatory obs\n If obs==\"Barycenter\" then these should be BAT times in the TDB timescale\n with the Roemer delays removed (the usual sense of \"barycentered\" times)\n topo : bool\n If True, then TOA will be computed for the arrival time at the Spacecraft\n and the TOA line will have the spacecraft ECI position included.\n If False, then the TOA will be a Barycentric Arrival Time (BAT)\n obs : pint.observatory.Observatory\n The observatory corresponding to the photon event times.\n This is NOT necessarily the observatory for the output TOAs,\n which can be the Barycenter.\n\"\"\"\n\n # Given some subset of the event times, phases, and weights, compute\n # the TOA based on a reference event near the middle of the span.\n # Build the TOA as a PINT TOA() object\n lcf = lcfitters.LCFitter(deepcopy(template), phases)\n # fitbg does not work! Disabling.\n # if args.fitbg:\n # for i in xrange(2):\n # lcf.fit_position(unbinned=False)\n # lcf.fit_background(unbinned=False)\n dphi, dphierr = lcf.fit_position(unbinned=args.unbinned, track=args.track)\n log.info(\"Measured phase shift dphi={0}, dphierr={1}\".format(dphi, dphierr))\n\n # find time of event closest to center of observation and turn it into a TOA\n argmid = np.searchsorted(mjds, 0.5 * (mjds.min() + mjds.max()))\n tmid = ph_times[argmid]\n # So, tmid should be a time at the observatory if topo, otherwise\n # it should be a BAT (in TDB timescale with delays applied)\n # Here, convert tmid if not topo and data not barycentered\n\n if topo:\n tplus = tmid + TimeDelta(1 * u.s, scale=tmid.scale)\n toamid = pint.toa.TOA(tmid, obs=obs.name)\n toaplus = pint.toa.TOA(tplus, obs=obs.name)\n else:\n # If input data were not barycentered but we want barycentric TOAs\n # then make TMID into a BAT\n if tmid.scale not in (\"tdb\", \"tcb\"):\n log.debug(\n \"Making TOA, tmid {}, tmid.scale {}, obs {}\".format(\n tmid, tmid.scale, obs.name\n )\n )\n toas = pint.toa.get_TOAs_list(\n [pint.toa.TOA(tmid, obs=obs.name)],\n include_gps=args.use_gps,\n include_bipm=args.use_bipm,\n ephem=args.ephem,\n planets=planets,\n )\n tmid = Time(modelin.get_barycentric_toas(toas), format=\"mjd\", scale=\"tdb\")[\n 0\n ]\n log.debug(\"New tmid {}, tmid.scale {}\".format(tmid, tmid.scale))\n\n tplus = tmid + TimeDelta(1 * u.s, scale=tmid.scale)\n toamid = pint.toa.TOA(tmid, obs=\"Barycenter\")\n toaplus = pint.toa.TOA(tplus, obs=\"Barycenter\")\n\n toas = pint.toa.get_TOAs_list(\n [toamid, toaplus],\n include_gps=args.use_gps,\n include_bipm=args.use_bipm,\n ephem=args.ephem,\n planets=planets,\n )\n\n phsi, phsf = modelin.phase(toas, abs_phase=True)\n if topo:\n sc = \"tt\"\n else:\n sc = \"tdb\"\n # Compute frequency = d(phase)/dt\n f = (phsi[1] - phsi[0]) + (phsf[1] - phsf[0])\n f._unit = u.Hz\n\n # First delta is to get time of phase 0.0 of initial model\n # Second term corrects for the measured phase offset to align with template\n tfinal = (\n tmid + TimeDelta(-phsf[0].value / f, scale=sc) + TimeDelta(dphi / f, scale=sc)\n )\n\n # Use PINT's TOA writer to save the TOA\n nsrc = lcf.template.norm() * len(lcf.phases)\n nbkg = (1 - lcf.template.norm()) * len(lcf.phases)\n\n if args.topo: # tfinal is a topocentric TT MJD\n telposvel = obs.posvel_gcrs(tfinal)\n x = telposvel.pos[0].to(u.km)\n y = telposvel.pos[1].to(u.km)\n z = telposvel.pos[2].to(u.km)\n vx = telposvel.vel[0].to(u.km / u.s)\n vy = telposvel.vel[1].to(u.km / u.s)\n vz = telposvel.vel[2].to(u.km / u.s)\n\n toafinal = pint.toa.TOA(\n tfinal.utc,\n obs=\"spacecraft\",\n nsrc=\"%.2f\" % nsrc,\n nbkg=\"%.2f\" % nbkg,\n exposure=\"%.2f\" % exposure,\n dphi=\"%.5f\" % dphi,\n mjdTT=\"%.8f\" % tfinal.tt.mjd,\n telx=\"%.8f\" % x.value,\n tely=\"%.8f\" % y.value,\n telz=\"%.8f\" % z.value,\n vx=\"%.8f\" % vx.value,\n vy=\"%.8f\" % vy.value,\n vz=\"%.8f\" % vz.value,\n )\n\n else:\n # Make a TOA for the Barycenter, which is the default obs\n toafinal = pint.toa.TOA(\n tfinal,\n obs=\"Barycenter\",\n nsrc=\"%.2f\" % nsrc,\n nbkg=\"%.2f\" % nbkg,\n exposure=\"%.2f\" % exposure,\n dphi=\"%.5f\" % dphi,\n )\n toasfinal = pint.toa.get_TOAs_list(\n [toafinal],\n include_gps=args.use_gps,\n include_bipm=args.use_bipm,\n ephem=args.ephem,\n planets=planets,\n )\n log.debug(\n \"Modelin final phase {}\".format(modelin.phase(toasfinal, abs_phase=True))\n )\n log.info(\n \"Src rate = {0} c/s, Bkg rate = {1} c/s\".format(\n nsrc / exposure, nbkg / exposure\n )\n )\n return toafinal, dphierr / f.value * 1.0e6\n\n\ndesc = \"\"\"Generate TOAs from photon event data.\"\"\"\n\nparser = argparse.ArgumentParser(description=desc)\nparser.add_argument(\"eventname\", help=\"FITS file to read events from\")\nparser.add_argument(\"templatename\", help=\"Name of file to read template from\")\nparser.add_argument(\"parname\", help=\"Timing model file name\")\nparser.add_argument(\"--orbfile\", help=\"Name of orbit file\", default=None)\nparser.add_argument(\n \"--ephem\", help=\"Planetary ephemeris to use (default=DE421)\", default=\"DE421\"\n)\nparser.add_argument(\n \"--plot\", help=\"Show phaseogram plot.\", action=\"store_true\", default=False\n)\nparser.add_argument(\n \"--plotfile\", help=\"Output figure file name (default=None)\", default=None\n)\n# parser.add_argument(\"--fitbg\",help=\"Fit an overall background level (e.g. for changing particle background level (default=False).\",action='store_true',default=False)\nparser.add_argument(\n \"--unbinned\",\n help=\"Fit position with unbinned likelihood. Don't use for large data sets. (default=False)\",\n action=\"store_true\",\n default=False,\n)\n# parser.add_argument(\"--fix\",help=\"Adjust times to fix 1.0 second offset in NICER data (default=False)\", action='store_true',default=False)\nparser.add_argument(\n \"--tint\",\n help=\"Integrate for tint seconds for each TOA, or until the total integration exceeds maxint. The algorithm is based on GTI, so the integration will slightly exceed tint (default None; see maxint.)\",\n default=None,\n)\nparser.add_argument(\n \"--maxint\",\n help=\"Maximum time interval to accumulate exposure for a single TOA (default=2*86400s)\",\n type=float,\n default=2 * 86400.0,\n)\nparser.add_argument(\n \"--minexp\",\n help=\"Minimum exposure (s) for which to include a TOA (default=0.0).\",\n default=0.0,\n type=float,\n)\nparser.add_argument(\n \"--track\",\n help=\"Assume model is close to good and only search near 0 phase (to avoid getting TOAs off by 0.5 in double peaked pulsars)\",\n action=\"store_true\",\n default=False,\n)\nparser.add_argument(\n \"--dice\",\n help=\"Dice up long GTIs into chunks of length <= tint\",\n action=\"store_true\",\n default=False,\n)\nparser.add_argument(\n \"--use_bipm\", help=\"Use BIPM clock corrections\", action=\"store_true\", default=False\n)\nparser.add_argument(\n \"--use_gps\",\n help=\"Use GPS to UTC clock corrections\",\n action=\"store_true\",\n default=False,\n)\nparser.add_argument(\n \"--topo\",\n help=\"Make topocentric TOAs; include the spacecraft ECI position on the TOA line\",\n action=\"store_true\",\n default=False,\n)\nparser.add_argument(\n \"--outfile\", help=\"Name of file to save TOAs to (default is STDOUT)\", default=None\n)\nparser.add_argument(\"--append\", help=\"Append TOAs to output file instead of overwriting\", default=False, action=\"store_true\")\n\n## Parse arguments\nargs = parser.parse_args()\n\n# Load PINT model objects\nmodelin = pint.models.get_model(args.parname)\nlog.info(str(modelin))\n\n# check for consistency between ephemeris and options\nif modelin.PLANET_SHAPIRO.quantity:\n planets = True\nelse:\n planets = False\n\n# Load Template objects\ntry:\n template = pickle.load(file(args.templatename))\nexcept:\n primitives, norms = prim_io(args.templatename)\n template = LCTemplate(primitives, norms)\n# print(template)\n\n# Load photons as PINT toas, and weights, if specified\n# Here I might loop over the files specified\n# Read event file header to figure out what instrument is is from\nhdr = pyfits.getheader(args.eventname, ext=1)\nlog.info(\n \"Event file TELESCOPE = {0}, INSTRUMENT = {1}\".format(\n hdr[\"TELESCOP\"], hdr[\"INSTRUME\"]\n )\n)\n\n# If the FITS events are barycentered then these keywords should be set\n# TIMESYS = 'TDB ' / All times in this file are TDB\n# TIMEREF = 'SOLARSYSTEM' / Times are pathlength-corrected to barycenter\nif hdr[\"TIMESYS\"].startswith(\"TDB\"):\n barydata = True\nelse:\n barydata = False\nlog.info(\n \"Event time system = {0}, reference = {1}\".format(hdr[\"TIMESYS\"], hdr[\"TIMEREF\"])\n)\n\nif args.topo and barydata:\n log.error(\"Can't compute topocentric TOAs from barycentered events!\")\n sys.exit(1)\n\nif (args.orbfile is not None) and barydata:\n log.warning(\"Data are barycentered, so ignoring orbfile!\")\n\nif hdr[\"TELESCOP\"] == \"NICER\":\n # Instantiate NICERObs once so it gets added to the observatory registry\n # Bug! It should not do this if the events have already been barycentered!\n if barydata:\n obs = \"Barycenter\"\n else:\n if args.orbfile is not None:\n log.info(\"Setting up NICER observatory\")\n obs = get_satellite_observatory(\"NICER\", args.orbfile)\n else:\n log.error(\n \"NICER .orb file required for non-barycentered events!\\n\"\n \"Please specify with --orbfile\"\n )\n sys.exit(2)\n\n # Read event file and return list of TOA objects\n try:\n tl = local_load_NICER_TOAs(args.eventname)\n except KeyError:\n log.error(\n \"Failed to load NICER TOAs. Make sure orbit file is specified on command line!\"\n )\n raise\nelif hdr[\"TELESCOP\"] == \"XTE\":\n if barydata:\n obs = \"Barycenter\"\n else:\n # Instantiate RXTEObs once so it gets added to the observatory registry\n if args.orbfile is not None:\n # Determine what observatory type is.\n log.info(\"Setting up RXTE observatory\")\n obs = get_satellite_observatory(\"RXTE\", args.orbfile)\n else:\n log.error(\n \"RXTE FPorbit file required for non-barycentered events!\\n\"\n \"Please specify with --orbfile\"\n )\n sys.exit(2)\n # Read event file and return list of TOA objects\n tl = load_RXTE_TOAs(args.eventname)\nelif hdr[\"TELESCOP\"].startswith(\"XMM\"):\n # Not loading orbit file here, since that is not yet supported.\n if barydata:\n obs = \"Barycenter\"\n else:\n log.error(\"Non-barycentered XMM data not yet supported\")\n sys.exit(3)\n tl = load_XMM_TOAs(args.eventname)\nelif hdr[\"TELESCOP\"].startswith(\"NuSTAR\"):\n # Not loading orbit file here, since that is not yet supported.\n if barydata:\n obs = \"Barycenter\"\n else:\n log.error(\"Non-barycentered NuSTAR data not yet supported\")\n sys.exit(3)\n tl = load_NuSTAR_TOAs(args.eventname)\n f = pyfits.open(args.eventname)\n mets = f[\"events\"].data.field(\"time\")\n f.close()\n for t, met in zip(tl, mets):\n t.met = met\nelse:\n log.error(\n \"FITS file not recognized, TELESCOPE = {0}, INSTRUMENT = {1}\".format(\n hdr[\"TELESCOP\"], hdr[\"INSTRUME\"]\n )\n )\n sys.exit(1)\n\nif args.topo: # for writing UTC topo toas\n T2SpacecraftObs(name=\"spacecraft\")\n\nif len(tl) <= 0:\n log.error(\"No TOAs found. Aborting.\")\n sys.exit(1)\n\n# Now convert to TOAs object and compute TDBs and (SSB) posvels\nts = pint.toa.get_TOAs_list(\n tl, ephem=args.ephem, planets=planets, include_bipm=False, include_gps=False\n)\nts.filename = args.eventname\nlog.info(ts.print_summary())\n# print(ts.get_summary())\nmjds = (\n ts.get_mjds()\n) # TT topocentric MJDs as floats; only used to find the index of the photon time closest to the middle of the MJD range\n\n# Compute model phase for each TOA;\nphss = modelin.phase(ts, abs_phase=True)[1].value # discard units\n\n# Note that you can compute barycentric TOAs from topocentric data, so\n# just because topo is False does NOT mean that data are barycentered!\nif barydata:\n ph_times = ts.table[\"tdb\"]\nelse:\n ph_times = ts.table[\"mjd\"]\n\n# ensure all positive\nphases = np.where(phss < 0.0, phss + 1.0, phss)\n\nh = float(hm(phases))\nprint(\"Htest : {0:.2f} ({1:.2f} sigma)\".format(h, h2sig(h)))\nif args.plot:\n phaseogram_binned(mjds, phases, bins=100, plotfile=args.plotfile)\n\n# get exposure information\ntry:\n f = pyfits.open(args.eventname)\n exposure = f[1].header[\"exposure\"]\n f.close()\nexcept:\n exposure = 0\n\n\nif args.tint is None:\n\n # do a single TOA for table\n toafinal, toafinal_err = estimate_toa(\n mjds, phases, ph_times, args.topo, obs, modelin\n )\n if \"OBS_ID\" in hdr:\n # Add ObsID to the TOA flags\n toafinal.flags[\"obsid\"] = hdr[\"OBS_ID\"]\n toafinal.flags[\"htest\"] = \"{0:.2f}\".format(hm(phases))\n toafinal = [toafinal]\n toafinal_err = [toafinal_err]\nelse:\n # Load in GTIs\n f = pyfits.open(args.eventname)\n # Warning:: This is ignoring TIMEZERO!!!!\n gti_t0 = f[\"gti\"].data.field(\"start\")\n gti_t1 = f[\"gti\"].data.field(\"stop\")\n gti_dt = gti_t1 - gti_t0\n mets = np.asarray([t.met for t in tl])\n\n tint = float(args.tint)\n\n if args.dice:\n # Break up larger GTIs into small chunks\n new_t0s = deque()\n new_t1s = deque()\n for t0, t1 in zip(gti_t0, gti_t1):\n dt = t1 - t0\n if dt < tint:\n new_t0s.append(t0)\n new_t1s.append(t1)\n else:\n # break up GTI in such a way to avoid losing time (to tmin) and\n # to avoid having pieces longer than tint\n npiece = int(np.floor(dt / tint)) + 1\n new_edges = np.linspace(t0, t1, npiece + 1)\n for it0, it1 in zip(new_edges[:-1], new_edges[1:]):\n new_t0s.append(it0)\n new_t1s.append(it1)\n gti_t0 = np.asarray(new_t0s)\n gti_t1 = np.asarray(new_t1s)\n gti_dt = gti_t1 - gti_t0\n\n # the algorithm here is simple -- go through the GTI and add them up\n # until either the good time exceeds tint, or until the total time\n # interval exceeds maxint\n i0 = 0\n current = 0.0\n toas = deque()\n maxint = float(args.maxint)\n for i in range(len(gti_t0)):\n current += gti_dt[i]\n # print('iteration=%d, current=%f'%(i,current))\n if (\n (current >= tint)\n or ((gti_t1[i] - gti_t0[i0]) > maxint)\n or (i == len(gti_t0) - 1)\n ):\n # make a TOA\n ph0, ph1 = np.searchsorted(mets, [gti_t0[i0], gti_t1[i]])\n m, p, t = mjds[ph0:ph1], phases[ph0:ph1], ph_times[ph0:ph1]\n # print('Generating TOA ph0={0}, ph1={1}, len(m)={2}, i0={3}, i={4}'.format(ph0,ph1,len(m),i0,i))\n # print('m[0]={0}, m[1]={1}'.format(m[0],m[-1]))\n if len(m) > 0:\n toas.append(estimate_toa(m, p, t, args.topo, obs, modelin))\n toas[-1][0].flags[\"htest\"] = \"{0:.2f}\".format(hm(p))\n # fix exposure\n toas[-1][0].flags[\"exposure\"] = current\n current = 0.0\n i0 = i + 1\n toafinal, toafinal_err = list(zip(*toas))\n\nif args.minexp > 0.0:\n x = [\n (t, e)\n for t, e in zip(toafinal, toafinal_err)\n if float(t.flags[\"exposure\"]) > args.minexp\n ]\n if len(x) > 0:\n toafinal, toafinal_err = list(zip(*x))\n else:\n print(\"No TOAs passed exposure cut!\")\n sys.exit(0)\n\nfor t in toafinal:\n t.flags[\"-t\"] = hdr[\"TELESCOP\"]\ntoas = pint.toa.TOAs(toalist=toafinal)\ntoas.table[\"error\"][:] = np.asarray(toafinal_err)\nsio = io.StringIO()\ntoas.write_TOA_file(sio, name=\"photon_toa\", format=\"tempo2\")\noutput = sio.getvalue()\n\nif args.topo:\n output = output.replace(\"spacecraft\", \"STL_GEO\")\nelse:\n output = output.replace(\"bat\", \"@\")\n\nif args.append:\n output = output.replace(\"FORMAT 1\",\"C \")\n # Try to remove blank lines\n output = output.replace(\"\\n\\n\",\"\\n\")\n\nif args.outfile is not None:\n print(output, file=open(args.outfile, \"a\" if args.append else \"w\"))\nelse:\n print(output)\n"
] | [
[
"numpy.linspace",
"numpy.asarray",
"numpy.floor",
"numpy.searchsorted",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Migelo/lopa-sorting | [
"5ed2732262940e0c24caee8ac734c54167932bcd"
] | [
"bins.py"
] | [
"import numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser(description='Generate bins.')\nparser.add_argument('beginning', help='Beginning of the first bin.', type=str)\nparser.add_argument('end', help='End of the last bin.', type=str)\nparser.add_argument('-interval', help='Size of the bin.', type=int)\nparser.add_argument('-n', help='Number of bins.', type=int)\nparser.add_argument('outputFile', type=str, help='Set the output file.')\nparser.add_argument('-sb', type=bool, default=False, help='Use when \\\n creating sub-bins.')\nargs = parser.parse_args()\n\nargs.beginning = np.float(args.beginning)\nargs.end = np.float(args.end)\n\nif (args.interval is not None) and (args.n is not None):\n parser.error('-n and -interval are mutually exclusive')\n\nbins = []\ni = args.beginning\nif args.interval is not None:\n interval = args.interval\n while i < args.end:\n if i + interval > args.end:\n bins.append([i, args.end])\n else:\n bins.append([i, i + interval])\n i += interval\nelif args.n is not None:\n if args.sb:\n interval = 1.\n else:\n interval = (args.end - args.beginning) / args.n\n for j in range(args.n):\n bins.append([i, i + interval])\n i += interval\n\nnp.savetxt(args.outputFile, bins)\n"
] | [
[
"numpy.savetxt",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeOntalEs/transform3d | [
"f3426c3cce98298c17d3820fa2d8e1bdf92a8571"
] | [
"transform3d/transform.py"
] | [
"import cv2\nimport numpy as np\n\ndef _get_M(w, h, f, rx=0, ry=0, rz=0, dx=0, dy=0, dz=0, sx=1, sy=1, sz=1):\n A1 = np.matrix([ [1, 0, -w/2],\n [0, 1, -h/2],\n [0, 0, 1],\n [0, 0, 1]])\n S = np.matrix([[sx, 0, 0, 0],\n [0, sy, 0, 0],\n [0, 0, sz, 0],\n [0, 0, 0, 1]])\n RX = np.matrix([ [1, 0, 0, 0],\n [0, np.cos(rx), -np.sin(rx), 0],\n [0, np.sin(rx), np.cos(rx), 0],\n [0, 0, 0, 1]])\n RY = np.matrix([ [np.cos(ry), 0, -np.sin(ry), 0],\n [0, 1, 0, 0],\n [np.sin(ry), 0, np.cos(ry), 0],\n [0, 0, 0, 1]])\n RZ = np.matrix([ [np.cos(rz), -np.sin(rz), 0, 0],\n [np.sin(rz), np.cos(rz), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n T = np.matrix([ [1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n A2 = np.matrix([ [f, 0, w/2, 0],\n [0, f, h/2, 0],\n [0, 0, 1, 0]])\n R = RX * RY * RZ\n return A2 * T * R * S * A1\n\ndef transform3d(img, rx=0, ry=0, rz=0, dx=0, dy=0, dz=0, sx=1, sy=1, sz=1):\n h, w, c = img.shape\n rx, ry, rz = np.radians([rx, ry, rz])\n d = np.sqrt(h**2 + w**2)\n f = d / (2 * np.sin(rz) if np.sin(rz) != 0 else 1)\n dz = f\n M = _get_M(w, h, f, rx, ry,rz, dx, dy, dz, sx, sy)\n return cv2.warpPerspective(img.copy(), M, (w, h))\n "
] | [
[
"numpy.matrix",
"numpy.radians",
"numpy.sqrt",
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PM25/Facial_Expression_DL | [
"9c10886b27ed13a352e8ca355b1b512cf6e92db0"
] | [
"main.py"
] | [
"import torch\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom argparse import ArgumentParser\nimport matplotlib.pyplot as plt\n\n\n# Arguments\nparser = ArgumentParser(description='Predicting Facial Expression of an Image.')\nparser.add_argument('--model', type=str, default='models/model.pkl', help='Path of Previous Trained Model')\nparser.add_argument('--img', type=str, default='img.jpg', help='Path of Image')\nargs = parser.parse_args()\n\n\n# PyTorch Transform function\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5))\n ])\n\n# Start frome here!\nif __name__ == '__main__':\n classes = [\"Surprise\", \"Fear\", \"Disgust\", \"Happiness\", \"Sadness\" ,\"Anger\", \"Neutral\"]\n n_classes = len(classes)\n\n # Load Model\n model = torch.load(args.model).eval()\n\n # Load Image & Predict \n with torch.no_grad():\n img = Image.open(args.img).convert('RGB')\n img = transform(img)\n output = model(img.unsqueeze(0))\n output = F.softmax(output, dim=1)\n _, predict = torch.max(output, 1)\n \n plt.barh(range(n_classes), output.squeeze().tolist())\n plt.yticks(range(n_classes), classes)\n plt.title(f'Predict: {classes[predict]}')\n plt.show()"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"matplotlib.pyplot.title",
"torch.load",
"torch.no_grad",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ilyakrasnou/DiplomaProject | [
"a4cfd04fc00780c254e86e47039fecf27c3cbc9b"
] | [
"PythonNeuro/weights_viewer.py"
] | [
"import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nmodel = keras.models.load_model(\"mnist_model.h5\")\n\nfor layer in model.layers:\n weights = layer.get_weights()\n print(layer.name)\n print(weights)\n"
] | [
[
"tensorflow.keras.models.load_model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
je-c/CryptoClassifier | [
"abce5e81e9045581df29af2f77f89e851911e0bc"
] | [
"lib/functionality/processing.py"
] | [
"import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport time, csv, os, itertools, shutil, json, random\nimport btalib\n\nfrom itertools import compress\nfrom PIL import Image\n\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef bool_convert(s):\n \"\"\"\n Parse string booleans from parameters\n * :param s(str): String to convert\n :return s(bool): Boolean type\n \"\"\"\n try:\n if s == \"True\":\n return True\n elif s == \"False\":\n return False\n else:\n return s\n except TypeError:\n return s\n\ndef int_convert(s):\n \"\"\"\n Parse string int from parameters\n * :param s(str): String to convert\n :return s(int): Int type\n \"\"\"\n try:\n return int(s)\n except ValueError:\n return s\n except TypeError:\n return s\n \ndef load_params(filePath, deploy = False):\n \"\"\"\n Parse parameters json\n * :param filePath(str): Location of parameters file\n * :param deploy(bool): Denotation for whether parameters are being loaded by deployment code\n\n :return params(dict): Python dictionary of parameters with correct dtypes\n \"\"\"\n with open(filePath) as f:\n params = json.load(f)\n if deploy:\n return params\n else: \n for split in ['validDL', 'trainDL']:\n params['loadingParams'][split]['shuffle'] = bool_convert(params['loadingParams'][split]['shuffle'])\n\n for key in params:\n for subkey in params[key]:\n if subkey == 'shuffle':\n params[key][subkey] = bool_convert(params[key][subkey])\n else:\n params[key][subkey] = int_convert(params[key][subkey])\n \n f.close()\n return params\n\ndef unpack_img_dataset(params, directory, file_name):\n \"\"\"\n Unpack an image dataset stored as raw data (csv or otherwise) into .png's. Creates file structure for pytorch loading\n and handles train/test/validation splitting internally\n * :param params(dict): Location of parameters file\n * :param directory(str): Name of directory to check for, or create to store images\n * :param file_name(str): Name of .csv file of featureset for image creation \n\n :return parentPath(str): Path to parent directory of the dataset\n \"\"\"\n _, targetDir, classNames, imSize = [value for key, value in params.items()]\n counter = {}\n labelMap = {}\n filePathMap = {\n 0:{}, \n 1:{}\n }\n classFilePaths = {\n 'train':[], \n 'test':[]\n }\n \n for i, j in zip(range(0,len(classNames)), classNames):\n labelMap[str(i)] = j\n filePathMap[0][str(i)] = ''\n filePathMap[1][str(i)] = ''\n\n #Paths for the directory\n parentPath = os.path.join(targetDir, directory)\n trainPath = os.path.join(parentPath, 'train')\n testPath = os.path.join(parentPath, 'test')\n try:\n os.mkdir(parentPath)\n os.mkdir(trainPath)\n os.mkdir(testPath)\n print(f'Directory \\'{directory}\\' created')\n for elem in classNames:\n fpTrain = os.path.join(trainPath, elem)\n fpTest = os.path.join(testPath, elem)\n classFilePaths['train'].append(fpTrain)\n classFilePaths['test'].append(fpTest)\n os.mkdir(fpTrain)\n os.mkdir(fpTest)\n print(f' {elem} class train/test directories created')\n \n for i, itemTrain, itemTest in zip(range(len(classNames)), classFilePaths['train'], classFilePaths['test']):\n i = str(i)\n filePathMap[0][i] = itemTrain\n filePathMap[1][i] = itemTest\n\n except FileExistsError:\n print(f'{directory} already exists - consider deleting the directory for a clean install!')\n \n numSamples = len(pd.read_csv(file_name))\n test_idx = [random.randint(0, numSamples) for i in range(0, int(numSamples * 0.2))]\n print(f'Unpacking {file_name}...')\n print('Please wait...')\n with open(file_name) as csv_file:\n csv_reader = csv.reader(csv_file)\n\n next(csv_reader)\n fileCount = 0\n for row in csv_reader:\n \n if fileCount % 1000 == 0:\n print(f'Unpacking {fileCount}/{numSamples}...', end = ' ')\n\n pixels = row[:-1] # without label\n pixels = np.array(pixels, dtype='float64')\n pixels = pixels.reshape((imSize, imSize, 3))\n image = Image.fromarray(pixels, 'RGB')\n\n label = row[-1][0]\n\n if label not in counter: counter[label] = 0\n counter[label] += 1\n\n filename = f'{labelMap[label]}{counter[label]}.png'\n\n if fileCount in test_idx:\n filepath = os.path.join(filePathMap[1][label], filename)\n\n else:\n filepath = os.path.join(filePathMap[0][label], filename)\n\n image.save(filepath)\n \n if (fileCount % 999 == 0) and (fileCount != 9): print(f'Completed')\n fileCount += 1\n\n print(f'Unpacking complete. {fileCount} images parsed.')\n \n return parentPath\n\ndef tech_ind_features(data):\n \"\"\"\n Generate technical indicators \n * :param data(pd.DataFrame): Raw data for processing\n \n :return transformed_df(pd.DataFrame): Dataframe of features, sample balanced and normalised\n \"\"\"\n data['smoothed_close'] = data.close.rolling(9).mean().rolling(21).mean().shift(-15)\n data['dx'] = np.diff(data['smoothed_close'], prepend=data['smoothed_close'][0])\n data['dx_signal'] = pd.Series(data['dx']).rolling(9).mean()\n data['ddx'] = np.diff(np.diff(data['smoothed_close']), prepend=data['smoothed_close'][0:2])\n\n data['labels'] = np.zeros(len(data))\n data['labels'].iloc[[(data.ddx < 0.1) & (data.dx <= 0) & (data.dx_signal > 0)]] = 1\n data['labels'].iloc[[(data.ddx > -0.075) & (data.dx >= 0) & (data.dx_signal < 0)]] = 2\n\n #Filter and drop all columns except close price, volume and date (for indexing)\n relevant_cols = list(\n compress(\n data.columns,\n [False if i in [1, 3, 4, 5, 6, len(data.columns)-1] else True for i in range(len(data.columns))]\n )\n )\n\n data = data.drop(columns=relevant_cols).rename(columns={'open_date_time': 'date'})\n data.set_index('date', inplace = True)\n\n #Define relevant periods for lookback/feature engineering\n periods = [\n 9, 14, 21, \n 30, 45, 60,\n 90, 100, 120\n ]\n\n #Construct technical features for image synthesis\n for period in periods:\n data[f'ema_{period}'] = btalib.ema(\n data.close,\n period = period\n ).df['ema']\n data[f'ema_{period}_dx'] = np.append(np.nan, np.diff(btalib.ema(\n data.close,\n period = period\n ).df['ema']))\n data[f'rsi_{period}'] = btalib.rsi(\n data.close,\n period = period\n ).df['rsi']\n data[f'cci_{period}'] = btalib.cci(\n data.high,\n data.low,\n data.close,\n period = period\n ).df['cci']\n data[f'macd_{period}'] = btalib.macd(\n data.close,\n pfast = period,\n pslow = period*2,\n psignal = int(period/3)\n ).df['macd']\n data[f'signal_{period}'] = btalib.macd(\n data.close,\n pfast = period,\n pslow = period*2,\n psignal = int(period/3)\n ).df['signal']\n data[f'hist_{period}'] = btalib.macd(\n data.close,\n pfast = period,\n pslow = period*2,\n psignal = int(period/3)\n ).df['histogram']\n data[f'volume_{period}'] = btalib.sma(\n data.volume,\n period = period\n ).df['sma']\n data[f'change_{period}'] = data.close.pct_change(periods = period)\n\n data = data.drop(data.query('labels == 0').sample(frac=.90).index)\n\n data = data.replace([np.inf, -np.inf], np.nan).dropna()\n\n data_trimmed = data.loc[:, 'ema_9':]\n data_trimmed = pd.concat(\n [data_trimmed, \n data_trimmed.shift(1), \n data_trimmed.shift(2)],\n axis = 1\n )\n\n mm_scaler = MinMaxScaler(feature_range=(0, 1))\n transformed_data = mm_scaler.fit_transform(data_trimmed[24:])\n transformed_data = np.c_[\n transformed_data, \n pd.to_numeric(\n data.labels[24:],\n downcast = 'signed'\n ).to_list()\n ]\n\n return transformed_data"
] | [
[
"pandas.read_csv",
"pandas.Series",
"numpy.diff",
"numpy.array",
"pandas.to_numeric",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Mistariano/Deep-Image-Matting | [
"c7e5516ca4a0729b27410c10ab8a017d92b95bf0"
] | [
"demo.py"
] | [
"import math\nimport os\nimport random\n\nimport cv2 as cv\nimport keras.backend as K\nimport numpy as np\n\nfrom data_generator import generate_trimap, random_choice, get_alpha_test\nfrom model import build_encoder_decoder, build_refinement\nfrom utils import compute_mse_loss, compute_sad_loss\nfrom utils import get_final_output, safe_crop, draw_str\n\n\ndef composite4(fg, bg, a, w, h):\n fg = np.array(fg, np.float32)\n bg_h, bg_w = bg.shape[:2]\n x = 0\n if bg_w > w:\n x = np.random.randint(0, bg_w - w)\n y = 0\n if bg_h > h:\n y = np.random.randint(0, bg_h - h)\n bg = np.array(bg[y:y + h, x:x + w], np.float32)\n alpha = np.zeros((h, w, 1), np.float32)\n alpha[:, :, 0] = a / 255.\n im = alpha * fg + (1 - alpha) * bg\n im = im.astype(np.uint8)\n return im, bg\n\n\nif __name__ == '__main__':\n img_rows, img_cols = 320, 320\n channel = 4\n\n pretrained_path = 'models/final.42-0.0398.hdf5'\n encoder_decoder = build_encoder_decoder()\n final = build_refinement(encoder_decoder)\n final.load_weights(pretrained_path)\n print(final.summary())\n\n out_test_path = 'data/merged_test/'\n if not os.path.exists(out_test_path):\n os.makedirs(out_test_path)\n test_images = [f for f in os.listdir(out_test_path) if\n os.path.isfile(os.path.join(out_test_path, f)) and f.endswith('.png')]\n print('test_images:', test_images)\n assert len(test_images) > 0\n samples = random.sample(test_images, min(10, len(test_images)))\n\n bg_test = 'data/bg_test/'\n if not os.path.exists(bg_test):\n os.makedirs(bg_test)\n test_bgs = [f for f in os.listdir(bg_test) if\n os.path.isfile(os.path.join(bg_test, f)) and f.endswith('.png')]\n print('test_bgs:', test_bgs)\n assert len(test_bgs) > 0\n sample_bgs = random.sample(test_bgs, min(10, len(test_bgs)))\n\n total_loss = 0.0\n for i in range(len(samples)):\n filename = samples[i]\n image_name = filename.split('.')[0]\n\n print('\\nStart processing image: {}'.format(filename))\n\n bgr_img = cv.imread(os.path.join(out_test_path, filename))\n bg_h, bg_w = bgr_img.shape[:2]\n print('bg_h, bg_w: ' + str((bg_h, bg_w)))\n\n # a = get_alpha_test(image_name)\n # TODO: fix fucking here\n filename = os.path.join('mask_test', filename)\n filename = os.path.join('data', filename)\n print('mask file name:', filename)\n a = cv.imread(filename, 0)\n\n a_h, a_w = a.shape[:2]\n print('a_h, a_w: ' + str((a_h, a_w)))\n\n alpha = np.zeros((bg_h, bg_w), np.float32)\n alpha[0:a_h, 0:a_w] = a\n trimap = generate_trimap(alpha)\n different_sizes = [(320, 320), (320, 320), (320, 320), (480, 480), (640, 640)]\n crop_size = random.choice(different_sizes)\n x, y = random_choice(trimap, crop_size)\n print('x, y: ' + str((x, y)))\n\n bgr_img = safe_crop(bgr_img, x, y, crop_size)\n alpha = safe_crop(alpha, x, y, crop_size)\n trimap = safe_crop(trimap, x, y, crop_size)\n cv.imwrite('images/{}_image.png'.format(i), np.array(bgr_img).astype(np.uint8))\n cv.imwrite('images/{}_trimap.png'.format(i), np.array(trimap).astype(np.uint8))\n cv.imwrite('images/{}_alpha.png'.format(i), np.array(alpha).astype(np.uint8))\n\n x_test = np.empty((1, img_rows, img_cols, 4), dtype=np.float32)\n x_test[0, :, :, 0:3] = bgr_img / 255.\n x_test[0, :, :, 3] = trimap / 255.\n\n y_true = np.empty((1, img_rows, img_cols, 2), dtype=np.float32)\n y_true[0, :, :, 0] = alpha / 255.\n y_true[0, :, :, 1] = trimap / 255.\n\n y_pred = final.predict(x_test)\n # print('y_pred.shape: ' + str(y_pred.shape))\n\n y_pred = np.reshape(y_pred, (img_rows, img_cols))\n print(y_pred.shape)\n y_pred = y_pred * 255.0\n y_pred = get_final_output(y_pred, trimap)\n y_pred = y_pred.astype(np.uint8)\n\n sad_loss = compute_sad_loss(y_pred, alpha, trimap)\n mse_loss = compute_mse_loss(y_pred, alpha, trimap)\n str_msg = 'sad_loss: %.4f, mse_loss: %.4f, crop_size: %s' % (\n sad_loss, mse_loss, str(crop_size))\n print(str_msg)\n\n out = y_pred.copy()\n draw_str(out, (10, 20), str_msg)\n cv.imwrite('images/{}_out.png'.format(i), out)\n\n sample_bg = sample_bgs[i]\n bg = cv.imread(os.path.join(bg_test, sample_bg))\n bh, bw = bg.shape[:2]\n wratio = img_cols / bw\n hratio = img_rows / bh\n ratio = wratio if wratio > hratio else hratio\n if ratio > 1:\n bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)),\n interpolation=cv.INTER_CUBIC)\n im, bg = composite4(bgr_img, bg, y_pred, img_cols, img_rows)\n cv.imwrite('images/{}_compose.png'.format(i), im)\n cv.imwrite('images/{}_new_bg.png'.format(i), bg)\n\n K.clear_session()\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
georglind/fermihubbard | [
"d76ad1df6b5ea57abf7b290576bbca4dd5275aeb"
] | [
"example.py"
] | [
"from __future__ import division, print_function\nimport numpy as np\nimport lintable as lin\nimport fermihubbard\n\nimport scipy.sparse as sparse\n\n# The model parameters:\n\n# The onsite energies (0)\nH1E = np.zeros((4, 4))\n\n# Hopping on a ring\nH1T = np.zeros((4, 4))\nfor i in xrange(4):\n H1T[i, (i+1) % 4] = -1.\n H1T[(i+1) % 4, i] = -1.\n\n# Only onsite interaction\nH1U = 2*np.eye(4)\n\n# Construc the model\nm = fermihubbard.Model(H1E, H1T, H1U)\n\n# Construct a specific chargestate with eight electrons:\nns4 = m.numbersector(4)\n\n# Consider the sector with net zero spin in the z direction, meaning that\n# 4 electrons = 2 spin-up electrons + 2 spin-down electrons\ns0 = ns4.szsector(0)\n\n# Let us then take a look at the basis. Generate the Lin-table:\nlita = lin.Table(4, 2, 2)\n\n# Print the basis\nprint('Spin-up basis:')\nprint(lita.basisu)\n# Spin-up basis:\n# [[0 0 1 1]\n# [0 1 0 1]\n# [0 1 1 0]\n# [1 0 0 1]\n# [1 0 1 0]\n# [1 1 0 0]]\n\n# Compute the Hamiltonian\n(HTu, HTd, HUd) = s0.hamiltonian\n\n# Print the hopping Hamiltonian for the spin-up electronic systems\nprint('Spin-up hopping:')\nprint(HTu)\n\n# The Total hamiltonian can be generated from these parts.\nNu, Nd = lita.Ns\n\n# The interaction part in sparse format\nHU = sparse.coo_matrix((HUd, (np.arange(Nu*Nd), np.arange(Nu*Nd))), shape=(Nu*Nd, Nu*Nd)).tocsr()\n\n# The kronecker product of the two hopping sectors.\nHT = sparse.kron(np.eye(Nd), HTu, 'dok') + sparse.kron(HTd, np.eye(Nu), 'dok')\n\nprint('The total Hamiltonian:')\nprint(HU + HT)\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SkyfengBiuBiu/pytorch-detect-to-track_1 | [
"afdfdf505deca4506ebc594ec0c2eb032a2a5569"
] | [
"lib/model/correlation/build.py"
] | [
"import os\nimport torch\nimport torch.utils.ffi\n\nstrBasepath = os.path.split(os.path.abspath(__file__))[0] + '/'\nstrHeaders = []\nstrSources = []\nstrDefines = []\nstrObjects = []\n\nif torch.cuda.is_available() == True:\n strHeaders += ['src/correlation_cuda.h']\n strSources += ['src/correlation_cuda.c']\n strDefines += [('WITH_CUDA', None)]\n strObjects += ['src/correlation_cuda_kernel.o']\n\nffi = torch.utils.ffi.create_extension(\n name='_ext.correlation',\n headers=strHeaders,\n sources=strSources,\n verbose=False,\n with_cuda=any(strDefine[0] == 'WITH_CUDA' for strDefine in strDefines),\n package=False,\n relative_to=strBasepath,\n include_dirs=[os.path.expandvars('$CUDA_HOME') + '/include'],\n define_macros=strDefines,\n extra_objects=[os.path.join(strBasepath, strObject) for strObject in strObjects]\n)\n\nif __name__ == '__main__':\n ffi.build()"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MolecularAI/pysmilesutils | [
"9fadf5c34fb21d50cf79e56a3ed1033e4d5015e5"
] | [
"examples/examples_data.py"
] | [
"\n# %%\n# %load_ext autoreload\n# %autoreload 2\n\nimport torch\nimport h5py\nimport time\n\nfrom torch.utils.data import Dataset, TensorDataset, DataLoader\nfrom pysmilesutils.datautils import BlockDataLoader, BucketBatchSampler, MultiDataset\n\nfrom tqdm.notebook import tqdm\n\n\n# %% [markdown]\n# # `BlockDataLoader`\n\n# %% [markdown]\n# Det `BlockDataLoader` is used to split the data loading into two parts: first loading blocks, and then drawing batches from thse blocks. This can be usefull when datasets are very large and don't fit into memory.\n#\n# As an example lets look at data in the form of a single `torch.Tensor`. We use `BlockDataLoader` to load this in blocks of size `10`, from which we draw batches of size `5`.\n\n# %%\nclass TestDataset(Dataset):\n data = torch.arange(20)\n \n def __getitem__(self, idx):\n return self.data[idx]\n \n def __len__(self):\n return len(self.data)\n\n\ndataset = TestDataset()\n \ndataloader = BlockDataLoader(dataset, batch_size=5, block_size=10)\n\nfor batch in dataloader:\n print(batch)\n\n\n# %% [markdown]\n# Note that all elements in a batch come from the same block, i.e., the numbers 0 through 9 are not mixed with the numbers 10 through 19.\n#\n# This is of course just a small example, and does not illstrate the real benefit of the `BlockDataLoader`. Lets instead look at an example with a larger dataset, stored on disk.\n\n# %%\nclass HDFDataset(Dataset):\n \"\"\"Small `Dataset` for loading HDF data.\"\"\"\n def __init__(self, path):\n self.path = path\n \n def __len__(self):\n with h5py.File(self.path, \"r\") as f:\n num = len(f.get(\"tensor_data\"))\n return num\n \n def __getitem__(self, idx):\n with h5py.File(self.path, \"r\") as f:\n data = f.get(\"tensor_data\")[idx]\n return data\n \n\nhdf_dataset = HDFDataset(\"data/data.hdf5\")\n\n# %% [markdown]\n# Here we have created a dataset that loads tensor data stored in a HDF5 file. The file, `\"data.hdf5\"`, contains 1 million integer, 0 through 999999. Lets compare data loading using a block loader, and a torch dataloader. Below we load the entire dataset to memory for comparison.\n\n# %%\nwith h5py.File(\"data/data.hdf5\", \"r\") as f:\n # Loaded data with `h5py` is numpy arrays, we convert to torch tensors\n data_h5py = torch.tensor(f.get(\"tensor_data\")[:])\n \ndata_h5py[:15]\n\n# %% [markdown]\n# Below we calculate the time it takes to load and shuffle the dataset using the `BlockDataLoader`. We also make sure that data is shuffled, and that all data is loaded. Note that we load data in blocksof 50000 samples. This means that we only shuffle batches within blocks of this size.\n\n# %%\nblock_dataloader = BlockDataLoader(dataset=hdf_dataset, block_size=50000, batch_size=500)\n\ndata = []\n\nfor batch in tqdm(block_dataloader):\n data.extend(batch.tolist())\n\n# Loaded data that has been shuffled\nprint(torch.equal(data_h5py, torch.tensor(data)))\n# Loaded data that has been sorted\nprint(torch.equal(data_h5py, torch.tensor(sorted(data))))\n\n# %%\ndataloader = DataLoader(dataset=hdf_dataset, batch_size=500)\n\nt = time.time()\n\nfor batch in tqdm(dataloader):\n pass\n\n# %% [markdown]\n# The time to load all batches using the `DataLoader` is significantly longer.\n\n# %% [markdown]\n# The `BlockDataLoader` receives several arguments which can alter its behaviour.\n\n# %%\ndataloader = BlockDataLoader(\n dataset=dataset,\n block_size=13,\n batch_size=4,\n drop_last_block=False,\n drop_last_batch=True,\n shuffle=False,\n)\n\nfor batch in dataloader:\n print(batch)\n\n# %% [markdown]\n# Depending on how data is stored the default functions in `BlockDataLoader` might not be able to properly retrieve slices. In this case the user needs to specify the `_accessitem` and `_accesslen`.\n\n# %% [markdown]\n# # `BucketBatchSampler`\n\n# %% [markdown]\n# The `BucketBatchSampler` can be used to bucket items in the training set. This could, for example, be to make sure samples of similar length are passed to the model.\n\n# %%\n# random data of differentlenths\ndata = [\n torch.arange(torch.randint(1, 5, size=(1,)).item())\n for _ in range(20)\n]\n\n\nclass WrapperDataset(Dataset):\n def __init__(self, data):\n self.data = data\n \n def __getitem__(self, idx):\n return self.data[idx]\n \n def __len__(self):\n return len(self.data)\n \n\ndef collate(data):\n return data\n\n\ndataset = WrapperDataset(data)\n\n_, sorted_indices = torch.sort(torch.tensor([len(d) for d in data]))\nbucket_sampler = BucketBatchSampler(\n data,\n batch_size=3,\n num_buckets=3,\n indices=sorted_indices,\n drop_last=True,\n)\n\ndataloader = DataLoader(\n dataset,\n batch_sampler=bucket_sampler,\n collate_fn=lambda x: x, # needed to not put batches into tensors\n)\n\nfor batch in dataloader:\n print(batch)\n\n# %% [markdown]\n# Note that in each batch the tensors are of similar length\n\n# %% [markdown]\n# # `MultiDataset`\n\n# %% [markdown]\n# The `MultiDataset` can be used to iterate through different datasets each epoch. This can be usefull when a lot of data is present. As a dummy example let us look at a set of torch tensors.\n\n# %%\n# each list in the element represents one dataset\ndata_list = [\n torch.arange(start=(5 * idx), end=(5 * (idx + 1)))\n for idx in range(4)\n]\n\ndataset = MultiDataset(data_list, repeats=False, shuffle=False)\n\nfor _ in range(dataset.num_steps):\n print(dataset[:])\n dataset.step()\n\n# %% [markdown]\n# Here we used the `step` function to iterate through the different datasets. We could also use the multi dataset as an iterator\n\n# %%\nfor _ in dataset:\n print(dataset[:])\n\n# %% [markdown]\n# We can also repeat data, to allow for an arbitrary number of epochs.\n\n# %%\ndataset = MultiDataset(data_list, repeats=True, shuffle=False)\n\nnum_epochs = 10\n\nfor _ in range(num_epochs):\n print(dataset[:])\n dataset.step()\n\n# %% [markdown]\n# We can also shuffle the dataset order, with our without repeats.\n\n# %%\ndataset = MultiDataset(data_list, repeats=False, shuffle=True)\n\nfor _ in dataset:\n print(dataset[:])\n\n# %%\ndataset = MultiDataset(data_list, repeats=True, shuffle=True)\n \nfor _ in range(num_epochs):\n print(dataset[:])\n dataset.step()\n\n# %%\n\n# %%\n"
] | [
[
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.randint",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
boschresearch/blackboxopt | [
"85abea86f01a4a9d50f05d15e7d850e3288baafd"
] | [
"blackboxopt/optimizers/staged/iteration.py"
] | [
"# Copyright (c) 2020 - for information on the respective copyright owner\n# see the NOTICE file and/or the repository https://github.com/boschresearch/blackboxopt\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, List, Optional, Tuple\nfrom uuid import UUID, uuid4\n\nimport numpy as np\n\nfrom blackboxopt import Evaluation, EvaluationSpecification, Objective\nfrom blackboxopt.optimizers.staged.configuration_sampler import (\n StagedIterationConfigurationSampler,\n)\n\n\n@dataclass\nclass Datum:\n \"\"\"Small container for bookkeeping only.\"\"\"\n\n config_key: Tuple[int, int, int]\n status: str\n loss: float = float(\"NaN\")\n\n\nclass StagedIteration:\n def __init__(\n self,\n iteration: int,\n num_configs: List[int],\n fidelities: List[float],\n config_sampler: StagedIterationConfigurationSampler,\n config_promotion_function: Callable,\n objective: Objective,\n logger: logging.Logger = None,\n ):\n \"\"\"Base class for iterations that compare configurations at different\n fidelities and race them as in SuccessiveHalving or Hyperband.\n\n Args:\n iteration: Index of this iteration.\n num_configs: Number of configurations in each stage.\n fidelities: The fidelity for each stage. Must have the same length as\n `num_configs'.\n config_sampler: Configuration Sampler object that suggests a new\n configuration for evaluation given a fidelity.\n config_promotion_function: Function that decides which configurations are\n promoted. Check\n `blackboxopt.optimizers.utils.staged_iteration.greedy_promotion` for\n the signature.\n objective: The objective of the optimization.\n logger: A standard logger to which some debug output might be written.\n \"\"\"\n assert len(fidelities) == len(\n num_configs\n ), \"Please specify the number of configuration and the fidelities.\"\n self.logger = logging.getLogger(\"blackboxopt\") if logger is None else logger\n self.iteration = iteration\n self.fidelities = fidelities\n self.num_configs = num_configs\n self.config_sampler = config_sampler\n self.config_promotion_function = config_promotion_function\n self.objective = objective\n self.current_stage = 0\n self.evaluation_data: List[List[Datum]] = [[]]\n self.eval_specs: Dict[Tuple[int, int, int], EvaluationSpecification] = {}\n self.pending_evaluations: Dict[UUID, int] = {}\n self.finished = False\n\n def generate_evaluation_specification(self) -> Optional[EvaluationSpecification]:\n \"\"\"Pick the next evaluation specification with a budget i.e. fidelity to run.\n\n Returns:\n [description]\n \"\"\"\n if self.finished:\n return None\n\n # try to find a queued entry first\n for i, d in enumerate(self.evaluation_data[self.current_stage]):\n if d.status == \"QUEUED\":\n es = copy.deepcopy(self.eval_specs[d.config_key])\n es.settings[\"fidelity\"] = self.fidelities[self.current_stage]\n d.status = \"RUNNING\"\n self.pending_evaluations[es.optimizer_info[\"id\"]] = i\n return es\n\n # sample a new configuration if there are empty slots to be filled\n if (\n len(self.evaluation_data[self.current_stage])\n < self.num_configs[self.current_stage]\n ):\n conf_key = (\n self.iteration,\n self.current_stage,\n len(self.evaluation_data[self.current_stage]),\n )\n conf, opt_info = self.config_sampler.sample_configuration()\n opt_info.update({\"configuration_key\": conf_key, \"id\": str(uuid4())})\n self.eval_specs[conf_key] = EvaluationSpecification(\n configuration=conf, settings={}, optimizer_info=opt_info\n )\n self.evaluation_data[self.current_stage].append(Datum(conf_key, \"QUEUED\"))\n # To understand recursion, you first must understand recursion :)\n return self.generate_evaluation_specification()\n\n # at this point there are pending evaluations and this iteration has to wait\n return None\n\n def digest_evaluation(\n self, evaluation_specificiation_id: UUID, evaluation: Evaluation\n ):\n \"\"\"Registers the result of an evaluation.\n\n Args:\n id: [description]\n evaluation: [description]\n \"\"\"\n self.config_sampler.digest_evaluation(evaluation)\n i = self.pending_evaluations.pop(evaluation_specificiation_id)\n d = self.evaluation_data[self.current_stage][i]\n d.status = \"FINISHED\" if not evaluation.all_objectives_none else \"CRASHED\"\n objective_value = evaluation.objectives[self.objective.name]\n if objective_value is not None:\n d.loss = (\n -objective_value\n if self.objective.greater_is_better\n else objective_value\n )\n\n # quick check if all configurations have finished yet\n if len(self.evaluation_data[self.current_stage]) == self.num_configs[\n self.current_stage\n ] and all(\n [\n e.status in [\"FINISHED\", \"CRASHED\"]\n for e in self.evaluation_data[self.current_stage]\n ]\n ):\n self._progress_to_next_stage()\n\n def _progress_to_next_stage(self):\n \"\"\"Implements logic to promote configurations to the next stage.\"\"\"\n # filter out crashed configurations\n data = [\n d for d in self.evaluation_data[self.current_stage] if np.isfinite(d.loss)\n ]\n self.current_stage += 1\n if self.current_stage == len(self.num_configs):\n self.finished = True\n return\n\n config_keys = self.config_promotion_function(\n data, self.num_configs[self.current_stage]\n )\n self.logger.debug(\n \"Iteration %i: Advancing configurations %s to stage %i.\",\n self.iteration,\n str(config_keys),\n self.current_stage,\n )\n self.evaluation_data.append(\n [Datum(config_key, \"QUEUED\") for config_key in config_keys]\n )\n"
] | [
[
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vijoin/ibis | [
"9d1086d7d29c2d3760c8150d8641ab51799720cd"
] | [
"ibis/expr/datatypes.py"
] | [
"import builtins\nimport collections\nimport datetime\nimport functools\nimport itertools\nimport numbers\nimport re\nimport typing\nfrom typing import Any as GenericAny\nfrom typing import (\n Callable,\n Iterator,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Sequence,\n)\nfrom typing import Set as GenericSet\nfrom typing import Tuple, TypeVar, Union\n\nimport pandas as pd\nimport toolz\nfrom multipledispatch import Dispatcher\n\nimport ibis.common.exceptions as com\nimport ibis.expr.types as ir\nfrom ibis import util\n\nIS_SHAPELY_AVAILABLE = False\ntry:\n import shapely.geometry\n\n IS_SHAPELY_AVAILABLE = True\nexcept ImportError:\n ...\n\n\nclass DataType:\n\n __slots__ = ('nullable',)\n\n def __init__(self, nullable: bool = True) -> None:\n self.nullable = nullable\n\n def __call__(self, nullable: bool = True) -> 'DataType':\n if nullable is not True and nullable is not False:\n raise TypeError(\n \"__call__ only accepts the 'nullable' argument. \"\n \"Please construct a new instance of the type to change the \"\n \"values of the attributes.\"\n )\n return self._factory(nullable=nullable)\n\n def _factory(self, nullable: bool = True) -> 'DataType':\n slots = {\n slot: getattr(self, slot)\n for slot in self.__slots__\n if slot != 'nullable'\n }\n return type(self)(nullable=nullable, **slots)\n\n def __eq__(self, other) -> bool:\n return self.equals(other)\n\n def __ne__(self, other) -> bool:\n return not (self == other)\n\n def __hash__(self) -> int:\n custom_parts = tuple(\n getattr(self, slot)\n for slot in toolz.unique(self.__slots__ + ('nullable',))\n )\n return hash((type(self),) + custom_parts)\n\n def __repr__(self) -> str:\n return '{}({})'.format(\n self.name,\n ', '.join(\n '{}={!r}'.format(slot, getattr(self, slot))\n for slot in toolz.unique(self.__slots__ + ('nullable',))\n ),\n )\n\n def __str__(self) -> str:\n return '{}{}'.format(\n self.name.lower(), '[non-nullable]' if not self.nullable else ''\n )\n\n @property\n def name(self) -> str:\n return type(self).__name__\n\n def equals(\n self,\n other: 'DataType',\n cache: Optional[Mapping[GenericAny, bool]] = None,\n ) -> bool:\n if isinstance(other, str):\n raise TypeError(\n 'Comparing datatypes to strings is not allowed. Convert '\n '{!r} to the equivalent DataType instance.'.format(other)\n )\n return (\n isinstance(other, type(self))\n and self.nullable == other.nullable\n and self.__slots__ == other.__slots__\n and all(\n getattr(self, slot) == getattr(other, slot)\n for slot in self.__slots__\n )\n )\n\n def castable(self, target, **kwargs):\n return castable(self, target, **kwargs)\n\n def cast(self, target, **kwargs):\n return cast(self, target, **kwargs)\n\n def scalar_type(self):\n return functools.partial(self.scalar, dtype=self)\n\n def column_type(self):\n return functools.partial(self.column, dtype=self)\n\n def _literal_value_hash_key(self, value) -> int:\n \"\"\"Return a hash for `value`.\"\"\"\n return self, value\n\n\nclass Any(DataType):\n __slots__ = ()\n\n\nclass Primitive(DataType):\n __slots__ = ()\n\n def __repr__(self) -> str:\n name = self.name.lower()\n if not self.nullable:\n return '{}[non-nullable]'.format(name)\n return name\n\n\nclass Null(DataType):\n scalar = ir.NullScalar\n column = ir.NullColumn\n\n __slots__ = ()\n\n\nclass Variadic(DataType):\n __slots__ = ()\n\n\nclass Boolean(Primitive):\n scalar = ir.BooleanScalar\n column = ir.BooleanColumn\n\n __slots__ = ()\n\n\nBounds = NamedTuple('Bounds', [('lower', int), ('upper', int)])\n\n\nclass Integer(Primitive):\n scalar = ir.IntegerScalar\n column = ir.IntegerColumn\n\n __slots__ = ()\n\n @property\n def _nbytes(self) -> int:\n raise TypeError(\n \"Cannot determine the size in bytes of an abstract integer type.\"\n )\n\n\nclass String(Variadic):\n \"\"\"A type representing a string.\n\n Notes\n -----\n Because of differences in the way different backends handle strings, we\n cannot assume that strings are UTF-8 encoded.\n \"\"\"\n\n scalar = ir.StringScalar\n column = ir.StringColumn\n\n __slots__ = ()\n\n\nclass Binary(Variadic):\n \"\"\"A type representing a blob of bytes.\n\n Notes\n -----\n Some databases treat strings and blobs of equally, and some do not. For\n example, Impala doesn't make a distinction between string and binary types\n but PostgreSQL has a TEXT type and a BYTEA type which are distinct types\n that behave differently.\n \"\"\"\n\n scalar = ir.BinaryScalar\n column = ir.BinaryColumn\n\n __slots__ = ()\n\n\nclass Date(Primitive):\n scalar = ir.DateScalar\n column = ir.DateColumn\n\n __slots__ = ()\n\n\nclass Time(Primitive):\n scalar = ir.TimeScalar\n column = ir.TimeColumn\n\n __slots__ = ()\n\n\nclass Timestamp(DataType):\n scalar = ir.TimestampScalar\n column = ir.TimestampColumn\n\n __slots__ = ('timezone',)\n\n def __init__(\n self, timezone: Optional[str] = None, nullable: bool = True\n ) -> None:\n super().__init__(nullable=nullable)\n self.timezone = timezone\n\n def __str__(self) -> str:\n timezone = self.timezone\n typename = self.name.lower()\n if timezone is None:\n return typename\n return '{}({!r})'.format(typename, timezone)\n\n\nclass SignedInteger(Integer):\n @property\n def largest(self):\n return int64\n\n @property\n def bounds(self):\n exp = self._nbytes * 8 - 1\n upper = (1 << exp) - 1\n return Bounds(lower=~upper, upper=upper)\n\n\nclass UnsignedInteger(Integer):\n @property\n def largest(self):\n return uint64\n\n @property\n def bounds(self):\n exp = self._nbytes * 8 - 1\n upper = 1 << exp\n return Bounds(lower=0, upper=upper)\n\n\nclass Floating(Primitive):\n scalar = ir.FloatingScalar\n column = ir.FloatingColumn\n\n __slots__ = ()\n\n @property\n def largest(self):\n return float64\n\n @property\n def _nbytes(self) -> int:\n raise TypeError(\n \"Cannot determine the size in bytes of an abstract floating \"\n \"point type.\"\n )\n\n\nclass Int8(SignedInteger):\n __slots__ = ()\n _nbytes = 1\n\n\nclass Int16(SignedInteger):\n __slots__ = ()\n _nbytes = 2\n\n\nclass Int32(SignedInteger):\n __slots__ = ()\n _nbytes = 4\n\n\nclass Int64(SignedInteger):\n __slots__ = ()\n _nbytes = 8\n\n\nclass UInt8(UnsignedInteger):\n __slots__ = ()\n _nbytes = 1\n\n\nclass UInt16(UnsignedInteger):\n __slots__ = ()\n _nbytes = 2\n\n\nclass UInt32(UnsignedInteger):\n __slots__ = ()\n _nbytes = 4\n\n\nclass UInt64(UnsignedInteger):\n __slots__ = ()\n _nbytes = 8\n\n\nclass Float16(Floating):\n __slots__ = ()\n _nbytes = 2\n\n\nclass Float32(Floating):\n __slots__ = ()\n _nbytes = 4\n\n\nclass Float64(Floating):\n __slots__ = ()\n _nbytes = 8\n\n\nHalffloat = Float16\nFloat = Float32\nDouble = Float64\n\n\nclass Decimal(DataType):\n scalar = ir.DecimalScalar\n column = ir.DecimalColumn\n\n __slots__ = 'precision', 'scale'\n\n def __init__(\n self, precision: int, scale: int, nullable: bool = True\n ) -> None:\n if not isinstance(precision, numbers.Integral):\n raise TypeError('Decimal type precision must be an integer')\n if not isinstance(scale, numbers.Integral):\n raise TypeError('Decimal type scale must be an integer')\n if precision < 0:\n raise ValueError('Decimal type precision cannot be negative')\n if not precision:\n raise ValueError('Decimal type precision cannot be zero')\n if scale < 0:\n raise ValueError('Decimal type scale cannot be negative')\n if precision < scale:\n raise ValueError(\n 'Decimal type precision must be greater than or equal to '\n 'scale. Got precision={:d} and scale={:d}'.format(\n precision, scale\n )\n )\n\n super().__init__(nullable=nullable)\n self.precision = precision # type: int\n self.scale = scale # type: int\n\n def __str__(self) -> str:\n return '{}({:d}, {:d})'.format(\n self.name.lower(), self.precision, self.scale\n )\n\n @property\n def largest(self) -> 'Decimal':\n return Decimal(38, self.scale)\n\n\nclass Interval(DataType):\n scalar = ir.IntervalScalar\n column = ir.IntervalColumn\n\n __slots__ = 'value_type', 'unit'\n\n # based on numpy's units\n _units = dict(\n Y='year',\n Q='quarter',\n M='month',\n W='week',\n D='day',\n h='hour',\n m='minute',\n s='second',\n ms='millisecond',\n us='microsecond',\n ns='nanosecond',\n )\n\n _timedelta_to_interval_units = dict(\n days='D',\n hours='h',\n minutes='m',\n seconds='s',\n milliseconds='ms',\n microseconds='us',\n nanoseconds='ns',\n )\n\n def _convert_timedelta_unit_to_interval_unit(self, unit: str):\n if unit not in self._timedelta_to_interval_units:\n raise ValueError\n return self._timedelta_to_interval_units[unit]\n\n def __init__(\n self,\n unit: str = 's',\n value_type: Integer = None,\n nullable: bool = True,\n ) -> None:\n super().__init__(nullable=nullable)\n if unit not in self._units:\n try:\n unit = self._convert_timedelta_unit_to_interval_unit(unit)\n except ValueError:\n raise ValueError('Unsupported interval unit `{}`'.format(unit))\n\n if value_type is None:\n value_type = int32\n else:\n value_type = dtype(value_type)\n\n if not isinstance(value_type, Integer):\n raise TypeError(\"Interval's inner type must be an Integer subtype\")\n\n self.unit = unit\n self.value_type = value_type\n\n @property\n def bounds(self):\n return self.value_type.bounds\n\n @property\n def resolution(self):\n \"\"\"Unit's name\"\"\"\n return self._units[self.unit]\n\n def __str__(self):\n unit = self.unit\n typename = self.name.lower()\n value_type_name = self.value_type.name.lower()\n return '{}<{}>(unit={!r})'.format(typename, value_type_name, unit)\n\n\nclass Category(DataType):\n scalar = ir.CategoryScalar\n column = ir.CategoryColumn\n\n __slots__ = ('cardinality',)\n\n def __init__(self, cardinality=None, nullable=True):\n super().__init__(nullable=nullable)\n self.cardinality = cardinality\n\n def __repr__(self):\n if self.cardinality is not None:\n cardinality = self.cardinality\n else:\n cardinality = 'unknown'\n return '{}(cardinality={!r})'.format(self.name, cardinality)\n\n def to_integer_type(self):\n # TODO: this should be removed I guess\n if self.cardinality is None:\n return int64\n else:\n return infer(self.cardinality)\n\n\nclass Struct(DataType):\n scalar = ir.StructScalar\n column = ir.StructColumn\n\n __slots__ = 'names', 'types'\n\n def __init__(\n self, names: List[str], types: List[DataType], nullable: bool = True\n ) -> None:\n \"\"\"Construct a ``Struct`` type from a `names` and `types`.\n\n Parameters\n ----------\n names : Sequence[str]\n Sequence of strings indicating the name of each field in the\n struct.\n types : Sequence[Union[str, DataType]]\n Sequence of strings or :class:`~ibis.expr.datatypes.DataType`\n instances, one for each field\n nullable : bool, optional\n Whether the struct can be null\n \"\"\"\n if not (names and types):\n raise ValueError('names and types must not be empty')\n if len(names) != len(types):\n raise ValueError('names and types must have the same length')\n\n super().__init__(nullable=nullable)\n self.names = names\n self.types = types\n\n @classmethod\n def from_tuples(\n cls,\n pairs: Sequence[Tuple[str, Union[str, DataType]]],\n nullable: bool = True,\n ) -> 'Struct':\n names, types = zip(*pairs)\n return cls(list(names), list(map(dtype, types)), nullable=nullable)\n\n @property\n def pairs(self) -> Mapping:\n return collections.OrderedDict(zip(self.names, self.types))\n\n def __getitem__(self, key: str) -> DataType:\n return self.pairs[key]\n\n def __hash__(self) -> int:\n return hash(\n (type(self), tuple(self.names), tuple(self.types), self.nullable)\n )\n\n def __repr__(self) -> str:\n return '{}({}, nullable={})'.format(\n self.name, list(self.pairs.items()), self.nullable\n )\n\n def __str__(self) -> str:\n return '{}<{}>'.format(\n self.name.lower(),\n ', '.join(itertools.starmap('{}: {}'.format, self.pairs.items())),\n )\n\n def _literal_value_hash_key(self, value):\n return self, _tuplize(value.items())\n\n\ndef _tuplize(values):\n \"\"\"Recursively convert `values` to a tuple of tuples.\"\"\"\n\n def tuplize_iter(values):\n yield from (\n tuple(tuplize_iter(value)) if util.is_iterable(value) else value\n for value in values\n )\n\n return tuple(tuplize_iter(values))\n\n\nclass Array(Variadic):\n scalar = ir.ArrayScalar\n column = ir.ArrayColumn\n\n __slots__ = ('value_type',)\n\n def __init__(\n self, value_type: Union[str, DataType], nullable: bool = True\n ) -> None:\n super().__init__(nullable=nullable)\n self.value_type = dtype(value_type)\n\n def __str__(self) -> str:\n return '{}<{}>'.format(self.name.lower(), self.value_type)\n\n def _literal_value_hash_key(self, value):\n return self, _tuplize(value)\n\n\nclass Set(Variadic):\n scalar = ir.SetScalar\n column = ir.SetColumn\n\n __slots__ = ('value_type',)\n\n def __init__(\n self, value_type: Union[str, DataType], nullable: bool = True\n ) -> None:\n super().__init__(nullable=nullable)\n self.value_type = dtype(value_type)\n\n def __str__(self) -> str:\n return '{}<{}>'.format(self.name.lower(), self.value_type)\n\n\nclass Enum(DataType):\n scalar = ir.EnumScalar\n column = ir.EnumColumn\n\n __slots__ = 'rep_type', 'value_type'\n\n def __init__(\n self, rep_type: DataType, value_type: DataType, nullable: bool = True\n ) -> None:\n super().__init__(nullable=nullable)\n self.rep_type = dtype(rep_type)\n self.value_type = dtype(value_type)\n\n\nclass Map(Variadic):\n scalar = ir.MapScalar\n column = ir.MapColumn\n\n __slots__ = 'key_type', 'value_type'\n\n def __init__(\n self, key_type: DataType, value_type: DataType, nullable: bool = True\n ) -> None:\n super().__init__(nullable=nullable)\n self.key_type = dtype(key_type)\n self.value_type = dtype(value_type)\n\n def __str__(self) -> str:\n return '{}<{}, {}>'.format(\n self.name.lower(), self.key_type, self.value_type\n )\n\n def _literal_value_hash_key(self, value):\n return self, _tuplize(value.items())\n\n\nclass JSON(String):\n \"\"\"JSON (JavaScript Object Notation) text format.\"\"\"\n\n scalar = ir.JSONScalar\n column = ir.JSONColumn\n\n\nclass JSONB(Binary):\n \"\"\"JSON (JavaScript Object Notation) data stored as a binary\n representation, which eliminates whitespace, duplicate keys,\n and key ordering.\n \"\"\"\n\n scalar = ir.JSONBScalar\n column = ir.JSONBColumn\n\n\nclass GeoSpatial(DataType):\n __slots__ = 'geotype', 'srid'\n\n column = ir.GeoSpatialColumn\n scalar = ir.GeoSpatialScalar\n\n def __init__(\n self, geotype: str = None, srid: int = None, nullable: bool = True\n ):\n \"\"\"Geospatial data type base class\n\n Parameters\n ----------\n geotype : str\n Specification of geospatial type which could be `geography` or\n `geometry`.\n srid : int\n Spatial Reference System Identifier\n nullable : bool, optional\n Whether the struct can be null\n \"\"\"\n super().__init__(nullable=nullable)\n\n if geotype not in (None, 'geometry', 'geography'):\n raise ValueError(\n 'The `geotype` parameter should be `geometry` or `geography`'\n )\n\n self.geotype = geotype\n self.srid = srid\n\n def __str__(self) -> str:\n geo_op = self.name.lower()\n if self.geotype is not None:\n geo_op += ':' + self.geotype\n if self.srid is not None:\n geo_op += ';' + str(self.srid)\n return geo_op\n\n def _literal_value_hash_key(self, value):\n if IS_SHAPELY_AVAILABLE:\n geo_shapes = (\n shapely.geometry.Point,\n shapely.geometry.LineString,\n shapely.geometry.Polygon,\n shapely.geometry.MultiLineString,\n shapely.geometry.MultiPoint,\n shapely.geometry.MultiPolygon,\n )\n if isinstance(value, geo_shapes):\n return self, value.wkt\n return self, value\n\n\nclass Geometry(GeoSpatial):\n \"\"\"Geometry is used to cast from geography types.\"\"\"\n\n column = ir.GeoSpatialColumn\n scalar = ir.GeoSpatialScalar\n\n __slots__ = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.geotype = 'geometry'\n\n def __str__(self) -> str:\n return self.name.lower()\n\n\nclass Geography(GeoSpatial):\n \"\"\"Geography is used to cast from geometry types.\"\"\"\n\n column = ir.GeoSpatialColumn\n scalar = ir.GeoSpatialScalar\n\n __slots__ = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.geotype = 'geography'\n\n def __str__(self) -> str:\n return self.name.lower()\n\n\nclass Point(GeoSpatial):\n \"\"\"A point described by two coordinates.\"\"\"\n\n scalar = ir.PointScalar\n column = ir.PointColumn\n\n __slots__ = ()\n\n\nclass LineString(GeoSpatial):\n \"\"\"A sequence of 2 or more points.\"\"\"\n\n scalar = ir.LineStringScalar\n column = ir.LineStringColumn\n\n __slots__ = ()\n\n\nclass Polygon(GeoSpatial):\n \"\"\"A set of one or more rings (closed line strings), with the first\n representing the shape (external ring) and the rest representing holes in\n that shape (internal rings).\n \"\"\"\n\n scalar = ir.PolygonScalar\n column = ir.PolygonColumn\n\n __slots__ = ()\n\n\nclass MultiLineString(GeoSpatial):\n \"\"\"A set of one or more line strings.\"\"\"\n\n scalar = ir.MultiLineStringScalar\n column = ir.MultiLineStringColumn\n\n __slots__ = ()\n\n\nclass MultiPoint(GeoSpatial):\n \"\"\"A set of one or more points.\"\"\"\n\n scalar = ir.MultiPointScalar\n column = ir.MultiPointColumn\n\n __slots__ = ()\n\n\nclass MultiPolygon(GeoSpatial):\n \"\"\"A set of one or more polygons.\"\"\"\n\n scalar = ir.MultiPolygonScalar\n column = ir.MultiPolygonColumn\n\n __slots__ = ()\n\n\nclass UUID(String):\n \"\"\"A universally unique identifier (UUID) is a 128-bit number used to\n identify information in computer systems.\n \"\"\"\n\n scalar = ir.UUIDScalar\n column = ir.UUIDColumn\n\n __slots__ = ()\n\n\n# ---------------------------------------------------------------------\nany = Any()\nnull = Null()\nboolean = Boolean()\nint_ = Integer()\nint8 = Int8()\nint16 = Int16()\nint32 = Int32()\nint64 = Int64()\nuint_ = UnsignedInteger()\nuint8 = UInt8()\nuint16 = UInt16()\nuint32 = UInt32()\nuint64 = UInt64()\nfloat = Float()\nhalffloat = Halffloat()\nfloat16 = Halffloat()\nfloat32 = Float32()\nfloat64 = Float64()\ndouble = Double()\nstring = String()\nbinary = Binary()\ndate = Date()\ntime = Time()\ntimestamp = Timestamp()\ninterval = Interval()\ncategory = Category()\n# geo spatial data type\ngeometry = GeoSpatial()\ngeography = GeoSpatial()\npoint = Point()\nlinestring = LineString()\npolygon = Polygon()\nmultilinestring = MultiLineString()\nmultipoint = MultiPoint()\nmultipolygon = MultiPolygon()\n# json\njson = JSON()\njsonb = JSONB()\n# special string based data type\nuuid = UUID()\n\n_primitive_types = [\n ('any', any),\n ('null', null),\n ('boolean', boolean),\n ('bool', boolean),\n ('int8', int8),\n ('int16', int16),\n ('int32', int32),\n ('int64', int64),\n ('uint8', uint8),\n ('uint16', uint16),\n ('uint32', uint32),\n ('uint64', uint64),\n ('float16', float16),\n ('float32', float32),\n ('float64', float64),\n ('float', float),\n ('halffloat', float16),\n ('double', double),\n ('string', string),\n ('binary', binary),\n ('date', date),\n ('time', time),\n ('timestamp', timestamp),\n ('interval', interval),\n ('category', category),\n] # type: List[Tuple[str, DataType]]\n\n\nclass Tokens:\n \"\"\"Class to hold tokens for lexing.\"\"\"\n\n __slots__ = ()\n\n ANY = 0\n NULL = 1\n PRIMITIVE = 2\n DECIMAL = 3\n VARCHAR = 4\n CHAR = 5\n ARRAY = 6\n MAP = 7\n STRUCT = 8\n INTEGER = 9\n FIELD = 10\n COMMA = 11\n COLON = 12\n LPAREN = 13\n RPAREN = 14\n LBRACKET = 15\n RBRACKET = 16\n STRARG = 17\n TIMESTAMP = 18\n TIME = 19\n INTERVAL = 20\n SET = 21\n GEOGRAPHY = 22\n GEOMETRY = 23\n POINT = 24\n LINESTRING = 25\n POLYGON = 26\n MULTILINESTRING = 27\n MULTIPOINT = 28\n MULTIPOLYGON = 29\n SEMICOLON = 30\n JSON = 31\n JSONB = 32\n UUID = 33\n\n @staticmethod\n def name(value):\n return _token_names[value]\n\n\n_token_names = dict(\n (getattr(Tokens, n), n) for n in dir(Tokens) if n.isalpha() and n.isupper()\n)\n\nToken = collections.namedtuple('Token', ('type', 'value'))\n\n\n# Adapted from tokenize.String\n_STRING_REGEX = \"\"\"('[^\\n'\\\\\\\\]*(?:\\\\\\\\.[^\\n'\\\\\\\\]*)*'|\"[^\\n\"\\\\\\\\\"]*(?:\\\\\\\\.[^\\n\"\\\\\\\\]*)*\")\"\"\" # noqa: E501\n\n\nAction = Optional[Callable[[str], Token]]\n\n\n_TYPE_RULES = collections.OrderedDict(\n [\n # any, null, bool|boolean\n ('(?P<ANY>any)', lambda token: Token(Tokens.ANY, any)),\n ('(?P<NULL>null)', lambda token: Token(Tokens.NULL, null)),\n (\n '(?P<BOOLEAN>bool(?:ean)?)',\n typing.cast(\n Action, lambda token: Token(Tokens.PRIMITIVE, boolean)\n ),\n ),\n ]\n + [\n # primitive types\n (\n '(?P<{}>{})'.format(token.upper(), token),\n typing.cast(\n Action,\n lambda token, value=value: Token(Tokens.PRIMITIVE, value),\n ),\n )\n for token, value in _primitive_types\n if token\n not in {'any', 'null', 'timestamp', 'time', 'interval', 'boolean'}\n ]\n + [\n # timestamp\n (\n r'(?P<TIMESTAMP>timestamp)',\n lambda token: Token(Tokens.TIMESTAMP, token),\n )\n ]\n + [\n # interval - should remove?\n (\n r'(?P<INTERVAL>interval)',\n lambda token: Token(Tokens.INTERVAL, token),\n )\n ]\n + [\n # time\n (r'(?P<TIME>time)', lambda token: Token(Tokens.TIME, token))\n ]\n + [\n # decimal + complex types\n (\n '(?P<{}>{})'.format(token.upper(), token),\n typing.cast(\n Action, lambda token, toktype=toktype: Token(toktype, token)\n ),\n )\n for token, toktype in zip(\n (\n 'decimal',\n 'varchar',\n 'char',\n 'array',\n 'set',\n 'map',\n 'struct',\n 'interval',\n ),\n (\n Tokens.DECIMAL,\n Tokens.VARCHAR,\n Tokens.CHAR,\n Tokens.ARRAY,\n Tokens.SET,\n Tokens.MAP,\n Tokens.STRUCT,\n Tokens.INTERVAL,\n ),\n )\n ]\n + [\n # geo spatial data type\n (\n '(?P<{}>{})'.format(token.upper(), token),\n lambda token, toktype=toktype: Token(toktype, token),\n )\n for token, toktype in zip(\n (\n 'geometry',\n 'geography',\n 'point',\n 'linestring',\n 'polygon',\n 'multilinestring',\n 'multipoint',\n 'multipolygon',\n ),\n (\n Tokens.GEOMETRY,\n Tokens.GEOGRAPHY,\n Tokens.POINT,\n Tokens.LINESTRING,\n Tokens.POLYGON,\n Tokens.MULTILINESTRING,\n Tokens.MULTIPOINT,\n Tokens.MULTIPOLYGON,\n ),\n )\n ]\n + [\n # json data type\n (\n '(?P<{}>{})'.format(token.upper(), token),\n lambda token, toktype=toktype: Token(toktype, token),\n )\n for token, toktype in zip(\n # note: `jsonb` should be first to avoid conflict with `json`\n ('jsonb', 'json'),\n (Tokens.JSONB, Tokens.JSON),\n )\n ]\n + [\n # special string based data types\n ('(?P<UUID>uuid)', lambda token: Token(Tokens.UUID, token))\n ]\n + [\n # integers, for decimal spec\n (r'(?P<INTEGER>\\d+)', lambda token: Token(Tokens.INTEGER, int(token))),\n # struct fields\n (\n r'(?P<FIELD>[a-zA-Z_][a-zA-Z_0-9]*)',\n lambda token: Token(Tokens.FIELD, token),\n ),\n # timezones\n ('(?P<COMMA>,)', lambda token: Token(Tokens.COMMA, token)),\n ('(?P<COLON>:)', lambda token: Token(Tokens.COLON, token)),\n ('(?P<SEMICOLON>;)', lambda token: Token(Tokens.SEMICOLON, token)),\n (r'(?P<LPAREN>\\()', lambda token: Token(Tokens.LPAREN, token)),\n (r'(?P<RPAREN>\\))', lambda token: Token(Tokens.RPAREN, token)),\n ('(?P<LBRACKET><)', lambda token: Token(Tokens.LBRACKET, token)),\n ('(?P<RBRACKET>>)', lambda token: Token(Tokens.RBRACKET, token)),\n (r'(?P<WHITESPACE>\\s+)', None),\n (\n '(?P<STRARG>{})'.format(_STRING_REGEX),\n lambda token: Token(Tokens.STRARG, token),\n ),\n ]\n)\n\n\n_TYPE_KEYS = tuple(_TYPE_RULES.keys())\n_TYPE_PATTERN = re.compile('|'.join(_TYPE_KEYS), flags=re.IGNORECASE)\n\n\ndef _generate_tokens(pat: GenericAny, text: str) -> Iterator[Token]:\n \"\"\"Generate a sequence of tokens from `text` that match `pat`\n\n Parameters\n ----------\n pat : compiled regex\n The pattern to use for tokenization\n text : str\n The text to tokenize\n\n \"\"\"\n rules = _TYPE_RULES\n keys = _TYPE_KEYS\n groupindex = pat.groupindex\n scanner = pat.scanner(text)\n for m in iter(scanner.match, None):\n lastgroup = m.lastgroup\n func = rules[keys[groupindex[lastgroup] - 1]]\n if func is not None:\n yield func(m.group(lastgroup))\n\n\nclass TypeParser:\n \"\"\"A type parser for complex types.\n\n Parameters\n ----------\n text : str\n The text to parse\n\n Notes\n -----\n Adapted from David Beazley's and Brian Jones's Python Cookbook\n\n \"\"\"\n\n __slots__ = 'text', 'tokens', 'tok', 'nexttok'\n\n def __init__(self, text: str) -> None:\n self.text = text # type: str\n self.tokens = _generate_tokens(_TYPE_PATTERN, text)\n self.tok = None # type: Optional[Token]\n self.nexttok = None # type: Optional[Token]\n\n def _advance(self) -> None:\n self.tok, self.nexttok = self.nexttok, next(self.tokens, None)\n\n def _accept(self, toktype: int) -> bool:\n if self.nexttok is not None and self.nexttok.type == toktype:\n self._advance()\n assert (\n self.tok is not None\n ), 'self.tok should not be None when _accept succeeds'\n return True\n return False\n\n def _expect(self, toktype: int) -> None:\n if not self._accept(toktype):\n raise SyntaxError(\n 'Expected {} after {!r} in {!r}'.format(\n Tokens.name(toktype),\n getattr(self.tok, 'value', self.tok),\n self.text,\n )\n )\n\n def parse(self) -> DataType:\n self._advance()\n\n # any and null types cannot be nested\n if self._accept(Tokens.ANY) or self._accept(Tokens.NULL):\n assert (\n self.tok is not None\n ), 'self.tok was None when parsing ANY or NULL type'\n return self.tok.value\n\n t = self.type()\n if self.nexttok is None:\n return t\n else:\n # additional junk was passed at the end, throw an error\n additional_tokens = []\n while self.nexttok is not None:\n additional_tokens.append(self.nexttok.value)\n self._advance()\n raise SyntaxError(\n 'Found additional tokens {}'.format(additional_tokens)\n )\n\n def type(self) -> DataType:\n \"\"\"\n type : primitive\n | decimal\n | array\n | set\n | map\n | struct\n\n primitive : \"any\"\n | \"null\"\n | \"bool\"\n | \"boolean\"\n | \"int8\"\n | \"int16\"\n | \"int32\"\n | \"int64\"\n | \"uint8\"\n | \"uint16\"\n | \"uint32\"\n | \"uint64\"\n | \"halffloat\"\n | \"float\"\n | \"double\"\n | \"float16\"\n | \"float32\"\n | \"float64\"\n | \"string\"\n | \"time\"\n\n timestamp : \"timestamp\"\n | \"timestamp\" \"(\" timezone \")\"\n\n interval : \"interval\"\n | \"interval\" \"(\" unit \")\"\n | \"interval\" \"<\" type \">\" \"(\" unit \")\"\n\n decimal : \"decimal\"\n | \"decimal\" \"(\" integer \",\" integer \")\"\n\n integer : [0-9]+\n\n array : \"array\" \"<\" type \">\"\n\n set : \"set\" \"<\" type \">\"\n\n map : \"map\" \"<\" type \",\" type \">\"\n\n struct : \"struct\" \"<\" field \":\" type (\",\" field \":\" type)* \">\"\n\n field : [a-zA-Z_][a-zA-Z_0-9]*\n\n geography: \"geography\"\n\n geometry: \"geometry\"\n\n point : \"point\"\n | \"point\" \";\" srid\n | \"point\" \":\" geotype\n | \"point\" \";\" srid \":\" geotype\n\n linestring : \"linestring\"\n | \"linestring\" \";\" srid\n | \"linestring\" \":\" geotype\n | \"linestring\" \";\" srid \":\" geotype\n\n polygon : \"polygon\"\n | \"polygon\" \";\" srid\n | \"polygon\" \":\" geotype\n | \"polygon\" \";\" srid \":\" geotype\n\n multilinestring : \"multilinestring\"\n | \"multilinestring\" \";\" srid\n | \"multilinestring\" \":\" geotype\n | \"multilinestring\" \";\" srid \":\" geotype\n\n multipoint : \"multipoint\"\n | \"multipoint\" \";\" srid\n | \"multipoint\" \":\" geotype\n | \"multipoint\" \";\" srid \":\" geotype\n\n multipolygon : \"multipolygon\"\n | \"multipolygon\" \";\" srid\n | \"multipolygon\" \":\" geotype\n | \"multipolygon\" \";\" srid \":\" geotype\n\n json : \"json\"\n\n jsonb : \"jsonb\"\n\n uuid : \"uuid\"\n\n \"\"\"\n if self._accept(Tokens.PRIMITIVE):\n assert self.tok is not None\n return self.tok.value\n\n elif self._accept(Tokens.TIMESTAMP):\n if self._accept(Tokens.LPAREN):\n self._expect(Tokens.STRARG)\n assert self.tok is not None\n timezone = self.tok.value[1:-1] # remove surrounding quotes\n self._expect(Tokens.RPAREN)\n return Timestamp(timezone=timezone)\n return timestamp\n\n elif self._accept(Tokens.TIME):\n return Time()\n\n elif self._accept(Tokens.INTERVAL):\n if self._accept(Tokens.LBRACKET):\n self._expect(Tokens.PRIMITIVE)\n assert self.tok is not None\n value_type = self.tok.value\n self._expect(Tokens.RBRACKET)\n else:\n value_type = int32\n\n if self._accept(Tokens.LPAREN):\n self._expect(Tokens.STRARG)\n assert self.tok is not None\n unit = self.tok.value[1:-1] # remove surrounding quotes\n self._expect(Tokens.RPAREN)\n else:\n unit = 's'\n\n return Interval(unit, value_type)\n\n elif self._accept(Tokens.DECIMAL):\n if self._accept(Tokens.LPAREN):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n precision = self.tok.value\n\n self._expect(Tokens.COMMA)\n\n self._expect(Tokens.INTEGER)\n scale = self.tok.value\n\n self._expect(Tokens.RPAREN)\n else:\n precision = 9\n scale = 0\n return Decimal(precision, scale)\n\n elif self._accept(Tokens.VARCHAR) or self._accept(Tokens.CHAR):\n # VARCHAR, VARCHAR(n), CHAR, and CHAR(n) all parse as STRING\n if self._accept(Tokens.LPAREN):\n self._expect(Tokens.INTEGER)\n self._expect(Tokens.RPAREN)\n return string\n return string\n\n elif self._accept(Tokens.ARRAY):\n self._expect(Tokens.LBRACKET)\n\n value_type = self.type()\n\n self._expect(Tokens.RBRACKET)\n return Array(value_type)\n\n elif self._accept(Tokens.SET):\n self._expect(Tokens.LBRACKET)\n\n value_type = self.type()\n\n self._expect(Tokens.RBRACKET)\n return Set(value_type)\n\n elif self._accept(Tokens.MAP):\n self._expect(Tokens.LBRACKET)\n\n self._expect(Tokens.PRIMITIVE)\n assert self.tok is not None\n key_type = self.tok.value\n\n self._expect(Tokens.COMMA)\n\n value_type = self.type()\n\n self._expect(Tokens.RBRACKET)\n\n return Map(key_type, value_type)\n\n elif self._accept(Tokens.STRUCT):\n self._expect(Tokens.LBRACKET)\n\n self._expect(Tokens.FIELD)\n assert self.tok is not None\n names = [self.tok.value]\n\n self._expect(Tokens.COLON)\n\n types = [self.type()]\n\n while self._accept(Tokens.COMMA):\n self._expect(Tokens.FIELD)\n names.append(self.tok.value)\n\n self._expect(Tokens.COLON)\n types.append(self.type())\n\n self._expect(Tokens.RBRACKET)\n return Struct(names, types)\n\n # json data types\n elif self._accept(Tokens.JSON):\n return JSON()\n\n elif self._accept(Tokens.JSONB):\n return JSONB()\n\n # geo spatial data type\n elif self._accept(Tokens.GEOMETRY):\n return Geometry()\n\n elif self._accept(Tokens.GEOGRAPHY):\n return Geography()\n\n elif self._accept(Tokens.POINT):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return Point(geotype=geotype, srid=srid)\n\n elif self._accept(Tokens.LINESTRING):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return LineString(geotype=geotype, srid=srid)\n\n elif self._accept(Tokens.POLYGON):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return Polygon(geotype=geotype, srid=srid)\n\n elif self._accept(Tokens.MULTILINESTRING):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return MultiLineString(geotype=geotype, srid=srid)\n\n elif self._accept(Tokens.MULTIPOINT):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return MultiPoint(geotype=geotype, srid=srid)\n\n elif self._accept(Tokens.MULTIPOLYGON):\n geotype = None\n srid = None\n\n if self._accept(Tokens.SEMICOLON):\n self._expect(Tokens.INTEGER)\n assert self.tok is not None\n srid = self.tok.value\n\n if self._accept(Tokens.COLON):\n if self._accept(Tokens.GEOGRAPHY):\n geotype = 'geography'\n elif self._accept(Tokens.GEOMETRY):\n geotype = 'geometry'\n\n return MultiPolygon(geotype=geotype, srid=srid)\n\n # special string based data types\n elif self._accept(Tokens.UUID):\n return UUID()\n\n else:\n raise SyntaxError('Type cannot be parsed: {}'.format(self.text))\n\n\ndtype = Dispatcher('dtype')\n\nvalidate_type = dtype\n\n\ndef _get_timedelta_units(timedelta: datetime.timedelta) -> List[str]:\n # pandas Timedelta has more granularity\n if hasattr(timedelta, 'components'):\n unit_fields = timedelta.components._fields\n base_object = timedelta.components\n # datetime.timedelta only stores days, seconds, and microseconds internally\n else:\n unit_fields = ['days', 'seconds', 'microseconds']\n base_object = timedelta\n\n time_units = []\n [\n time_units.append(field)\n for field in unit_fields\n if getattr(base_object, field) > 0\n ]\n return time_units\n\n\[email protected](object)\ndef default(value, **kwargs) -> DataType:\n raise com.IbisTypeError('Value {!r} is not a valid datatype'.format(value))\n\n\[email protected](DataType)\ndef from_ibis_dtype(value: DataType) -> DataType:\n return value\n\n\[email protected](str)\ndef from_string(value: str) -> DataType:\n try:\n return TypeParser(value).parse()\n except SyntaxError:\n raise com.IbisTypeError(\n '{!r} cannot be parsed as a datatype'.format(value)\n )\n\n\[email protected](list)\ndef from_list(values: List[GenericAny]) -> Array:\n if not values:\n return Array(null)\n return Array(highest_precedence(map(dtype, values)))\n\n\[email protected](collections.abc.Set)\ndef from_set(values: GenericSet) -> Set:\n if not values:\n return Set(null)\n return Set(highest_precedence(map(dtype, values)))\n\n\ninfer = Dispatcher('infer')\n\n\ndef higher_precedence(left: DataType, right: DataType) -> DataType:\n if castable(left, right, upcast=True):\n return right\n elif castable(right, left, upcast=True):\n return left\n\n raise com.IbisTypeError(\n 'Cannot compute precedence for {} and {} types'.format(left, right)\n )\n\n\ndef highest_precedence(dtypes: Iterator[DataType]) -> DataType:\n \"\"\"Compute the highest precedence of `dtypes`.\"\"\"\n return functools.reduce(higher_precedence, dtypes)\n\n\[email protected](object)\ndef infer_dtype_default(value: GenericAny) -> DataType:\n \"\"\"Default implementation of :func:`~ibis.expr.datatypes.infer`.\"\"\"\n raise com.InputTypeError(value)\n\n\[email protected](collections.OrderedDict)\ndef infer_struct(value: Mapping[str, GenericAny]) -> Struct:\n \"\"\"Infer the :class:`~ibis.expr.datatypes.Struct` type of `value`.\"\"\"\n if not value:\n raise TypeError('Empty struct type not supported')\n return Struct(list(value.keys()), list(map(infer, value.values())))\n\n\[email protected](collections.abc.Mapping)\ndef infer_map(value: Mapping[GenericAny, GenericAny]) -> Map:\n \"\"\"Infer the :class:`~ibis.expr.datatypes.Map` type of `value`.\"\"\"\n if not value:\n return Map(null, null)\n return Map(\n highest_precedence(map(infer, value.keys())),\n highest_precedence(map(infer, value.values())),\n )\n\n\[email protected](list)\ndef infer_list(values: List[GenericAny]) -> Array:\n \"\"\"Infer the :class:`~ibis.expr.datatypes.Array` type of `values`.\"\"\"\n if not values:\n return Array(null)\n return Array(highest_precedence(map(infer, values)))\n\n\[email protected]((set, frozenset))\ndef infer_set(values: GenericSet) -> Set:\n \"\"\"Infer the :class:`~ibis.expr.datatypes.Set` type of `values`.\"\"\"\n if not values:\n return Set(null)\n return Set(highest_precedence(map(infer, values)))\n\n\[email protected](datetime.time)\ndef infer_time(value: datetime.time) -> Time:\n return time\n\n\[email protected](datetime.date)\ndef infer_date(value: datetime.date) -> Date:\n return date\n\n\[email protected](datetime.datetime)\ndef infer_timestamp(value: datetime.datetime) -> Timestamp:\n if value.tzinfo:\n return Timestamp(timezone=str(value.tzinfo))\n else:\n return timestamp\n\n\[email protected](datetime.timedelta)\ndef infer_interval(value: datetime.timedelta) -> Interval:\n time_units = _get_timedelta_units(value)\n # we can attempt a conversion in the simplest case, i.e. there is exactly\n # one unit (e.g. pd.Timedelta('2 days') vs. pd.Timedelta('2 days 3 hours')\n if len(time_units) == 1:\n unit = time_units[0]\n return Interval(unit)\n else:\n return interval\n\n\[email protected](str)\ndef infer_string(value: str) -> String:\n return string\n\n\[email protected](builtins.float)\ndef infer_floating(value: builtins.float) -> Double:\n return double\n\n\[email protected](int)\ndef infer_integer(value: int, allow_overflow: bool = False) -> Integer:\n for dtype in (int8, int16, int32, int64):\n if dtype.bounds.lower <= value <= dtype.bounds.upper:\n return dtype\n\n if not allow_overflow:\n raise OverflowError(value)\n\n return int64\n\n\[email protected](bool)\ndef infer_boolean(value: bool) -> Boolean:\n return boolean\n\n\[email protected]((type(None), Null))\ndef infer_null(value: Optional[Null]) -> Null:\n return null\n\n\nif IS_SHAPELY_AVAILABLE:\n\n @infer.register(shapely.geometry.Point)\n def infer_shapely_point(value: shapely.geometry.Point) -> Point:\n return point\n\n @infer.register(shapely.geometry.LineString)\n def infer_shapely_linestring(\n value: shapely.geometry.LineString,\n ) -> LineString:\n return linestring\n\n @infer.register(shapely.geometry.Polygon)\n def infer_shapely_polygon(value: shapely.geometry.Polygon) -> Polygon:\n return polygon\n\n @infer.register(shapely.geometry.MultiLineString)\n def infer_shapely_multilinestring(\n value: shapely.geometry.MultiLineString,\n ) -> MultiLineString:\n return multilinestring\n\n @infer.register(shapely.geometry.MultiPoint)\n def infer_shapely_multipoint(\n value: shapely.geometry.MultiPoint,\n ) -> MultiPoint:\n return multipoint\n\n @infer.register(shapely.geometry.MultiPolygon)\n def infer_shapely_multipolygon(\n value: shapely.geometry.MultiPolygon,\n ) -> MultiPolygon:\n return multipolygon\n\n\ncastable = Dispatcher('castable')\n\n\[email protected](DataType, DataType)\ndef can_cast_subtype(source: DataType, target: DataType, **kwargs) -> bool:\n return isinstance(target, type(source))\n\n\[email protected](Any, DataType)\[email protected](DataType, Any)\[email protected](Any, Any)\[email protected](Null, Any)\[email protected](Integer, Category)\[email protected](Integer, (Floating, Decimal))\[email protected](Floating, Decimal)\[email protected]((Date, Timestamp), (Date, Timestamp))\ndef can_cast_any(source: DataType, target: DataType, **kwargs) -> bool:\n return True\n\n\[email protected](Null, DataType)\ndef can_cast_null(source: DataType, target: DataType, **kwargs) -> bool:\n return target.nullable\n\n\nIntegral = TypeVar('Integral', SignedInteger, UnsignedInteger)\n\n\[email protected](SignedInteger, UnsignedInteger)\[email protected](UnsignedInteger, SignedInteger)\ndef can_cast_to_differently_signed_integer_type(\n source: Integral, target: Integral, value: Optional[int] = None, **kwargs\n) -> bool:\n if value is None:\n return False\n bounds = target.bounds\n return bounds.lower <= value <= bounds.upper\n\n\[email protected](SignedInteger, SignedInteger)\[email protected](UnsignedInteger, UnsignedInteger)\ndef can_cast_integers(source: Integral, target: Integral, **kwargs) -> bool:\n return target._nbytes >= source._nbytes\n\n\[email protected](Floating, Floating)\ndef can_cast_floats(\n source: Floating, target: Floating, upcast: bool = False, **kwargs\n) -> bool:\n if upcast:\n return target._nbytes >= source._nbytes\n\n # double -> float must be allowed because\n # float literals are inferred as doubles\n return True\n\n\[email protected](Decimal, Decimal)\ndef can_cast_decimals(source: Decimal, target: Decimal, **kwargs) -> bool:\n return (\n target.precision >= source.precision and target.scale >= source.scale\n )\n\n\[email protected](Interval, Interval)\ndef can_cast_intervals(source: Interval, target: Interval, **kwargs) -> bool:\n return source.unit == target.unit and castable(\n source.value_type, target.value_type\n )\n\n\[email protected](Integer, Boolean)\ndef can_cast_integer_to_boolean(\n source: Integer, target: Boolean, value: Optional[int] = None, **kwargs\n) -> bool:\n return value is not None and (value == 0 or value == 1)\n\n\[email protected](Integer, Interval)\ndef can_cast_integer_to_interval(\n source: Interval, target: Interval, **kwargs\n) -> bool:\n return castable(source, target.value_type)\n\n\[email protected](String, (Date, Time, Timestamp))\ndef can_cast_string_to_temporal(\n source: String,\n target: Union[Date, Time, Timestamp],\n value: Optional[str] = None,\n **kwargs,\n) -> bool:\n if value is None:\n return False\n try:\n pd.Timestamp(value)\n except ValueError:\n return False\n else:\n return True\n\n\nCollection = TypeVar('Collection', Array, Set)\n\n\[email protected](Array, Array)\[email protected](Set, Set)\ndef can_cast_variadic(\n source: Collection, target: Collection, **kwargs\n) -> bool:\n return castable(source.value_type, target.value_type)\n\n\[email protected](JSON, JSON)\ndef can_cast_json(source, target, **kwargs):\n return True\n\n\[email protected](JSONB, JSONB)\ndef can_cast_jsonb(source, target, **kwargs):\n return True\n\n\n# geo spatial data type\n# cast between same type, used to cast from/to geometry and geography\nGEO_TYPES = (\n Point,\n LineString,\n Polygon,\n MultiLineString,\n MultiPoint,\n MultiPolygon,\n)\n\n\[email protected](Array, GEO_TYPES)\[email protected](GEO_TYPES, Geometry)\[email protected](GEO_TYPES, Geography)\ndef can_cast_geospatial(source, target, **kwargs):\n return True\n\n\[email protected](UUID, UUID)\ndef can_cast_special_string(source, target, **kwargs):\n return True\n\n\n# @castable.register(Map, Map)\n# def can_cast_maps(source, target):\n# return (source.equals(target) or\n# source.equals(Map(null, null)) or\n# source.equals(Map(any, any)))\n# TODO cast category\n\n\ndef cast(\n source: Union[DataType, str], target: Union[DataType, str], **kwargs\n) -> DataType:\n \"\"\"Attempts to implicitly cast from source dtype to target dtype\"\"\"\n source, result_target = dtype(source), dtype(target)\n\n if not castable(source, result_target, **kwargs):\n raise com.IbisTypeError(\n 'Datatype {} cannot be implicitly '\n 'casted to {}'.format(source, result_target)\n )\n return result_target\n\n\nsame_kind = Dispatcher(\n 'same_kind',\n doc=\"\"\"\\\nCompute whether two :class:`~ibis.expr.datatypes.DataType` instances are the\nsame kind.\n\nParameters\n----------\na : DataType\nb : DataType\n\nReturns\n-------\nbool\n Whether two :class:`~ibis.expr.datatypes.DataType` instances are the same\n kind.\n\"\"\",\n)\n\n\n@same_kind.register(DataType, DataType)\ndef same_kind_default(a: DataType, b: DataType) -> bool:\n \"\"\"Return whether `a` is exactly equiavlent to `b`\"\"\"\n return a.equals(b)\n\n\nNumeric = TypeVar('Numeric', Integer, Floating)\n\n\n@same_kind.register(Integer, Integer)\n@same_kind.register(Floating, Floating)\ndef same_kind_numeric(a: Numeric, b: Numeric) -> bool:\n \"\"\"Return ``True``.\"\"\"\n return True\n\n\n@same_kind.register(DataType, Null)\ndef same_kind_right_null(a: DataType, _: Null) -> bool:\n \"\"\"Return whether `a` is nullable.\"\"\"\n return a.nullable\n\n\n@same_kind.register(Null, DataType)\ndef same_kind_left_null(_: Null, b: DataType) -> bool:\n \"\"\"Return whether `b` is nullable.\"\"\"\n return b.nullable\n\n\n@same_kind.register(Null, Null)\ndef same_kind_both_null(a: Null, b: Null) -> bool:\n \"\"\"Return ``True``.\"\"\"\n return True\n"
] | [
[
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jasonpoh/lenstronomy_sims | [
"dcdfc61ce5351ac94565228c822f1c94392c1ad6"
] | [
"lenstronomy/LensModel/Profiles/sis_truncate.py"
] | [
"__author__ = 'sibirrer'\n\nimport numpy as np\n\nclass SIS_truncate(object):\n \"\"\"\n this class contains the function and the derivatives of the Singular Isothermal Sphere\n \"\"\"\n param_names = ['theta_E', 'r_trunc', 'center_x', 'center_y']\n lower_limit_default = {'theta_E': 0, 'r_trunc': 0, 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'theta_E': 100, 'r_trunc': 100, 'center_x': 100, 'center_y': 100}\n\n def function(self, x, y, theta_E, r_trunc, center_x=0, center_y=0):\n x_shift = x - center_x\n y_shift = y - center_y\n r = np.sqrt(x_shift*x_shift + y_shift*y_shift)\n if isinstance(r, int) or isinstance(r, float):\n if r < r_trunc:\n f_ = theta_E * r\n elif r < 2*r_trunc:\n f_ = theta_E * r_trunc + 1. / 2 * theta_E * (3 - r / r_trunc) * (r - r_trunc)\n else:\n f_ = 3./2 * theta_E * r_trunc\n else:\n f_ = np.zeros_like(r)\n f_[r < r_trunc] = theta_E * r[r < r_trunc]\n r_ = r[(r < 2*r_trunc) & (r > r_trunc)]\n f_[(r < 2*r_trunc) & (r > r_trunc)] = theta_E * r_trunc + 1. / 2 * theta_E * (3 - r_ / r_trunc) * (r_ - r_trunc)\n f_[r > 2*r_trunc] = 3./2 * theta_E * r_trunc\n return f_\n\n def derivatives(self, x, y, theta_E, r_trunc, center_x=0, center_y=0):\n \"\"\"\n returns df/dx and df/dy of the function\n \"\"\"\n x_shift = x - center_x\n y_shift = y - center_y\n\n dphi_dr = self._dphi_dr(x_shift, y_shift, theta_E, r_trunc)\n dr_dx, dr_dy = self._dr_dx(x_shift, y_shift)\n f_x = dphi_dr * dr_dx\n f_y = dphi_dr * dr_dy\n return f_x, f_y\n\n def hessian(self, x, y, theta_E, r_trunc, center_x=0, center_y=0):\n \"\"\"\n returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy\n \"\"\"\n x_shift = x - center_x\n y_shift = y - center_y\n dphi_dr = self._dphi_dr(x_shift, y_shift, theta_E, r_trunc)\n d2phi_dr2 = self._d2phi_dr2(x_shift, y_shift, theta_E, r_trunc)\n dr_dx, dr_dy = self._dr_dx(x, y)\n d2r_dx2, d2r_dy2, d2r_dxy = self._d2r_dx2(x_shift, y_shift)\n f_xx = d2r_dx2*dphi_dr + dr_dx**2*d2phi_dr2\n f_yy = d2r_dy2*dphi_dr + dr_dy**2*d2phi_dr2\n f_xy = d2r_dxy*dphi_dr + dr_dx*dr_dy*d2phi_dr2\n return f_xx, f_yy, f_xy\n\n def _dphi_dr(self, x, y, theta_E, r_trunc):\n \"\"\"\n\n :param x:\n :param y:\n :param r_trunc:\n :return:\n \"\"\"\n r = np.sqrt(x*x + y*y)\n if isinstance(r, int) or isinstance(r, float):\n if r == 0:\n a = 0\n elif r < r_trunc:\n a = theta_E\n elif r < 2*r_trunc:\n a = theta_E * (2 - r / r_trunc)\n else:\n a = 0\n else:\n a = np.zeros_like(r)\n a[(r < r_trunc) & (r > 0)] = theta_E\n r_ = r[(r < 2*r_trunc) & (r >= r_trunc)]\n a[(r < 2*r_trunc) & (r >= r_trunc)] = theta_E * (2 - r_ / r_trunc)\n a[r >= 2*r_trunc] = 0\n return a\n\n def _d2phi_dr2(self, x, y, theta_E, r_trunc):\n \"\"\"\n second derivative of the potential in radial direction\n :param x:\n :param y:\n :param theta_E:\n :param r_trunc:\n :return:\n \"\"\"\n r = np.sqrt(x*x + y*y)\n if isinstance(r, int) or isinstance(r, float):\n if r < r_trunc:\n a = 0\n elif r < 2*r_trunc:\n a = -theta_E / r_trunc\n else:\n a = 0\n else:\n a = np.zeros_like(r)\n a[r < r_trunc] = 0\n a[(r < 2*r_trunc) & (r > r_trunc)] = -theta_E / r_trunc\n a[r > 2*r_trunc] = 0\n return a\n\n def _dr_dx(self, x, y):\n \"\"\"\n derivative of dr/dx, dr/dy\n :param x:\n :param y:\n :return:\n \"\"\"\n\n r = np.sqrt(x**2 + y**2)\n if isinstance(r, int) or isinstance(r, float):\n if r == 0:\n r = 1\n else:\n r[r == 0] = 1\n return x/r, y/r\n\n def _d2r_dx2(self, x, y):\n \"\"\"\n second derivative\n :param x:\n :param y:\n :return:\n \"\"\"\n r = np.sqrt(x**2 + y**2)\n if isinstance(r, int) or isinstance(r, float):\n if r == 0:\n r = 1\n else:\n r[r == 0] = 1\n return y**2/r**3, x**2/r**3, -x*y/r**3"
] | [
[
"numpy.zeros_like",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bing-jian/diffusion-mri | [
"3dad34ae002b4435dd6693a2e4b71b46c889e172"
] | [
"Python/test/test_condition_number.py"
] | [
"#!/usr/bin/env python\n#coding=utf-8\n\nimport os\nimport matplotlib\nimport pylab\n\nfrom dwmri.utils import *\n\ndef compute_wishart_A(p):\n g = pylab.load('81vectors.txt')\n B = prepareB(math.sqrt(1500.0)*g)\n ew = [0.0015,0.0004,0.0004]\n ev1 = pylab.load('81vectors.txt')\n ev2 = pylab.load('321vectors.txt')\n A1 = assemble_wishart_matrix(B,ev1,ew,p)\n A2 = assemble_wishart_matrix(B,ev2,ew,p)\n return A1,A2\n\ndef comp_condA_wishart():\n g = pylab.load('81vectors.txt')\n B = prepareB(math.sqrt(1500.0)*g)\n p_list = [(i+1)*(i+1)+1 for i in range(17)]\n ew = [0.0015,0.0004,0.0004]\n ev1 = pylab.load('81vectors.txt')\n ev2 = pylab.load('321vectors.txt')\n condA_list1 = []\n condA_list2 = []\n for p in p_list:\n A = assemble_wishart_matrix(B,ev1,ew,p)\n U,S,V = numpy.linalg.svd(A)\n condA = S[0]/S[-1]\n condA_list1.append(condA)\n\n A = assemble_wishart_matrix(B,ev2,ew,p)\n U,S,V = numpy.linalg.svd(A)\n condA = S[0]/S[-1]\n condA_list2.append(condA)\n\n return p_list,condA_list1,condA_list2\n\ndef comp_condA_spike():\n g = pylab.load('81vectors.txt')\n B = prepareB(math.sqrt(1500.0)*g)\n sigma_list = range(10,110,10)\n ew = [1,0,0]\n ev1 = pylab.load('81vectors.txt')\n ev2 = pylab.load('321vectors.txt')\n tessellation = pylab.load('vertices_iso_3.txt')\n condA_list1 = []\n condA_list2 = []\n for sigma in sigma_list:\n\n A = assemble_spike_matrix(B,ev1,ew,sigma,tessellation)\n U,S,V = numpy.linalg.svd(A)\n condA = S[0]/S[-1]\n condA_list1.append(condA)\n\n A = assemble_spike_matrix(B,ev2,ew,sigma,tessellation)\n U,S,V = numpy.linalg.svd(A)\n condA = S[0]/S[-1]\n condA_list2.append(condA)\n\n return sigma_list,condA_list1,condA_list2\n\ndef test_cond_wishart():\n #from matplotlib import rc\n #rc('text', usetex=True)\n pylab.rcParams['axes.formatter.limits'] = (-5,5)\n p,c1,c2 = comp_condA_wishart()\n pylab.plot(p,c1,'ro--',label='81x81',linewidth=2)\n pylab.plot(p,c2,'bs-',label='81x321',linewidth=2)\n #pylab.title('condition numbers of assembled matrices',fontsize=20)\n pylab.xlabel('p in Wishart distributions',fontsize=24,fontweight='bold')\n pylab.ylabel('condition numbers, cond(A)',fontsize=24,fontweight='bold')\n fontprop = matplotlib.font_manager.FontProperties(size=24,weight='bold')\n pylab.legend(('size(A):81x81', 'size(A): 81x321'),loc='center right',prop = fontprop)\n for label in pylab.axes().get_xticklabels():\n label.set_fontsize(16)\n label.set_fontweight('bold')\n for label in pylab.axes().get_yticklabels():\n label.set_fontsize(16)\n label.set_fontweight('bold')\n\n pylab.grid(False)\n pylab.show()\n if not os.path.isdir('figures'):\n os.mkdir('figures')\n pylab.savefig('./figures/cond_wishart.png',dpi=300)\n pylab.savefig('./figures/cond_wishart.eps',dpi=300)\n\n\ndef test_cond_spike():\n from matplotlib import rc\n rc('text', usetex=True)\n rc('font', weight='bold')\n pylab.rcParams['axes.formatter.limits'] = (-5,5)\n p,c1,c2 = comp_condA_spike()\n pylab.plot(p,c1,'ro--',label='81x81',linewidth=2)\n pylab.plot(p,c2,'bs-',label='81x321',linewidth=2)\n #pylab.title('condition numbers of assembled matrices',fontsize=20)\n pylab.xlabel(r'$\\sigma$ in radial basis functions',fontsize=24,fontweight='bold')\n pylab.ylabel('condition numbers, cond(A)',fontsize=24,fontweight='bold')\n fontprop = matplotlib.font_manager.FontProperties(size=24,weight='bold')\n pylab.legend(('size(A):81x81', 'size(A): 81x321'),loc='upper center',prop = fontprop)\n\n ## try pylab.setp()\n for label in pylab.axes().get_xticklabels():\n label.set_fontsize(16)\n label.set_fontweight('bold')\n for label in pylab.axes().get_yticklabels():\n label.set_fontsize(16)\n label.set_fontweight('bold')\n\n pylab.grid(False)\n pylab.show()\n if not os.path.isdir('figures'):\n os.mkdir('figures')\n pylab.savefig('./figures/cond_spike.png',dpi=300)\n pylab.savefig('./figures/cond_spike.eps',dpi=300)\n"
] | [
[
"matplotlib.font_manager.FontProperties",
"matplotlib.rc"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
raunaqtri1/MINT-Transformation | [
"cdb95ff3ee05b109e5f2a1016d52702819691701"
] | [
"funcs/topoflow/topoflow/components/channels_base.py"
] | [
"\n# See \"d_bankfull\" in update_flow_depth() ######## (2/21/13)\n# NB! update_diversion() is currently COMMENTED OUT.\n# See \"(5/13/10)\" for a temporary fix.\n\n#------------------------------------------------------------------------\n# Copyright (c) 2001-2020, Scott D. Peckham\n#\n# Apr 2020. Added set_new_defaults(), disable_all_output().\n# Oct 2019. Added FLOOD_OPTION and CHECK_STABILITY flags.\n# Sep 2014. Wrote new update_diversions().\n# New standard names and BMI updates and testing.\n# Nov 2013. Converted TopoFlow to a Python package.\n# Feb 2013. Adapted to use EMELI framework.\n# Jan 2013. Shared scalar doubles are now 0D numpy arrays.\n# This makes them mutable and allows components with\n# a reference to them to see them change.\n# So far: Q_outlet, Q_peak, Q_min...\n# Jan 2013. Revised handling of input/output names.\n# Oct 2012. CSDMS Standard Names and BMI.\n# May 2012. Commented out diversions.update() for now. #######\n# May 2012. Shared scalar doubles are now 1-element 1D numpy arrays.\n# This makes them mutable and allows components with\n# a reference to them to see them change.\n# So far: Q_outlet, Q_peak, Q_min...\n# May 2010. Changes to initialize() and read_cfg_file()\n# Mar 2010. Changed codes to code, widths to width,\n# angles to angle, nvals to nval, z0vals to z0val,\n# slopes to slope (for GUI tools and consistency\n# across all process components)\n# Aug 2009. Updates.\n# Jul 2009. Updates.\n# May 2009. Updates.\n# Jan 2009. Converted from IDL.\n\n#-----------------------------------------------------------------------\n# NB! In the CFG file, change MANNING and LAW_OF_WALL flags to\n# a single string entry like \"friction method\". #########\n#-----------------------------------------------------------------------\n# Notes: Set self.u in manning and law_of_wall functions ??\n# Update friction factor in manning() and law_of_wall() ?\n# Double check how Rh is used in law_of_the_wall().\n\n# d8_flow has \"flow_grids\", but this one has \"codes\".\n# Make sure values are not stored twice.\n#-----------------------------------------------------------------------\n\n#-----------------------------------------------------------------------\n# NOTES: This file defines a \"base class\" for channelized flow\n# components as well as functions used by most or\n# all channel flow methods. The methods of this class\n# (especially \"update_velocity\") should be over-ridden as\n# necessary for different methods of modeling channelized\n# flow. See channels_kinematic_wave.py,\n# channels_diffusive_wave.py and channels_dynamic_wave.py.\n#-----------------------------------------------------------------------\n# NOTES: update_free_surface_slope() is called by the\n# update_velocity() methods of channels_diffusive_wave.py\n# and channels_dynamic_wave.py.\n#-----------------------------------------------------------------------\n#\n# class channels_component\n#\n# ## get_attribute() # (defined in each channel component)\n# get_input_var_names() # (5/15/12)\n# get_output_var_names() # (5/15/12)\n# get_var_name() # (5/15/12)\n# get_var_units() # (5/15/12)\n#-----------------------------\n# set_constants()\n# set_missing_cfg_options() # (4/29/20)\n# initialize()\n# update()\n# finalize()\n# set_computed_input_vars() # (5/11/10)\n#----------------------------------\n# initialize_d8_vars() ########\n# initialize_computed_vars()\n# initialize_diversion_vars() # (9/22/14)\n# initialize_outlet_values()\n# initialize_peak_values()\n# initialize_min_and_max_values() # (2/3/13)\n#-------------------------------------\n# update_flood_d8_vars() # (9/17/19, for flooding)\n# update_R()\n# update_R_integral()\n# update_discharge()\n# update_flood_discharge() # (9/20/19)\n# update_diversions() # (9/22/14)\n# update_flow_volume()\n# update_flood_volume() # (9/20/19)\n# update_flow_depth_LAST()\n# update_flow_depth() # (9/16/19, update)\n# update_flood_depth() # (9/20/19)\n# update_free_surface_slope()\n# update_shear_stress() # (9/9/14, depth-slope product)\n# update_shear_speed() # (9/9/14)\n# update_trapezoid_Rh()\n# update_friction_factor() # (9/9/14)\n#----------------------------------\n# update_velocity() # (override as needed)\n# update_velocity_on_edges()\n# update_froude_number() # (9/9/14)\n#----------------------------------\n# update_outlet_values()\n# update_peak_values() # (at the main outlet)\n# update_Q_out_integral() # (moved here from basins.py)\n# update_mins_and_maxes() # (don't add into update())\n# update_total_channel_water_volume() # (9/17/19)\n# update_total_land_water_volume() # (9/17/19)\n# check_flow_depth()\n# check_flow_velocity()\n#----------------------------------\n# open_input_files()\n# read_input_files()\n# close_input_files()\n#----------------------------------\n# update_outfile_names()\n# bundle_output_files() # (9/21/14. Not used yet)\n# disable_all_output() # (04/29/20)\n# open_output_files()\n# write_output_files()\n# close_output_files()\n# save_grids()\n# save_pixel_values()\n#----------------------------------\n# manning_formula()\n# law_of_the_wall()\n# print_status_report()\n# remove_bad_slopes() \n\n# Functions: # (stand-alone versions of these)\n# Trapezoid_Rh()\n# Manning_Formula()\n# Law_of_the_Wall()\n \n#-----------------------------------------------------------------------\n\nimport numpy as np\nimport os, os.path\nimport copy\n\nfrom topoflow.utils import BMI_base\nfrom topoflow.utils import file_utils ###\nfrom topoflow.utils import model_input\nfrom topoflow.utils import model_output\nfrom topoflow.utils import ncgs_files ###\nfrom topoflow.utils import ncts_files ###\nfrom topoflow.utils import rtg_files ###\nfrom topoflow.utils import text_ts_files ###\nfrom topoflow.utils import tf_utils\n\n#-------------------------------------------------------\n# NOTE: Do not import \"d8_base\" itself, it won't work\n#-------------------------------------------------------\nfrom topoflow.components import d8_global as d8_base # (11/11/16)\n## from topoflow.utils import tf_d8_base as d8_base\n\n#-----------------------------------------------------------------------\nclass channels_component( BMI_base.BMI_component ):\n\n #-----------------------------------------------------------\n # Note: rainfall_volume_flux *must* be liquid-only precip.\n #----------------------------------------------------------- \n _input_var_names = [\n 'atmosphere_water__rainfall_volume_flux', # (P_rain)\n 'glacier_ice__melt_volume_flux', # (MR)\n 'land_surface_water__baseflow_volume_flux', # (GW)\n 'land_surface_water__evaporation_volume_flux', # (ET)\n 'soil_surface_water__infiltration_volume_flux', # (IN)\n 'snowpack__melt_volume_flux', # (SM)\n 'water-liquid__mass-per-volume_density' ] # (rho_H2O)\n #------------------------------------------------------------------\n# 'canals__count', # n_canals\n# 'canals_entrance__x_coordinate', # canals_in_x\n# 'canals_entrance__y_coordinate', # canals_in_y\n# 'canals_entrance_water__volume_fraction', # Q_canals_fraction\n# 'canals_exit__x_coordinate', # canals_out_x\n# 'canals_exit__y_coordinate', # canals_out_y\n# 'canals_exit_water__volume_flow_rate', # Q_canals_out\n# 'sinks__count', # n_sinks\n# 'sinks__x_coordinate', # sinks_x\n# 'sinks__y_coordinate', # sinks_y\n# 'sinks_water__volume_flow_rate', # Q_sinks\n# 'sources__count', # n_sources\n# 'sources__x_coordinate', # sources_x\n# 'sources__y_coordinate', # sources_y\n# 'sources_water__volume_flow_rate' ] # Q_sources\n \n #----------------------------------\n # Maybe add these out_vars later.\n #----------------------------------\n # ['time_sec', 'time_min' ]\n \n _output_var_names = [\n 'basin_outlet_water_flow__half_of_fanning_friction_factor', # f_outlet\n 'basin_outlet_water_x-section__mean_depth', # d_outlet\n 'basin_outlet_water_x-section__peak_time_of_depth', # Td_peak\n 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate', # T_peak\n 'basin_outlet_water_x-section__peak_time_of_volume_flux', # Tu_peak\n 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate', # vol_Q\n 'basin_outlet_water_x-section__time_max_of_mean_depth', # d_peak\n 'basin_outlet_water_x-section__time_max_of_volume_flow_rate', # Q_peak\n 'basin_outlet_water_x-section__time_max_of_volume_flux', # u_peak\n 'basin_outlet_water_x-section__volume_flow_rate', # Q_outlet\n 'basin_outlet_water_x-section__volume_flux', # u_outlet\n #--------------------------------------------------\n 'canals_entrance_water__volume_flow_rate', # Q_canals_in \n #-------------------------------------------------- \n 'channel_bottom_surface__slope', # S_bed \n 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length', # z0val_max\n 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length', # z0val_min\n 'channel_bottom_water_flow__log_law_roughness_length', # z0val\n 'channel_bottom_water_flow__magnitude_of_shear_stress', # tau\n 'channel_bottom_water_flow__shear_speed', # u_star\n 'channel_centerline__sinuosity', # sinu\n 'channel_water__volume', # vol\n 'channel_water_flow__froude_number', # froude\n 'channel_water_flow__half_of_fanning_friction_factor', # f\n 'channel_water_flow__domain_max_of_manning_n_parameter', # nval_max\n 'channel_water_flow__domain_min_of_manning_n_parameter', # nval_min\n 'channel_water_flow__manning_n_parameter', # nval\n 'channel_water_surface__slope', # S_free\n #---------------------------------------------------\n # These might only be available at the end of run.\n #---------------------------------------------------\n 'channel_water_x-section__domain_max_of_mean_depth', # d_max\n 'channel_water_x-section__domain_min_of_mean_depth', # d_min\n 'channel_water_x-section__domain_max_of_volume_flow_rate', # Q_max\n 'channel_water_x-section__domain_min_of_volume_flow_rate', # Q_min\n 'channel_water_x-section__domain_max_of_volume_flux', # u_max\n 'channel_water_x-section__domain_min_of_volume_flux', # u_min\n #--------------------------------------------------------------------- \n 'channel_water_x-section__hydraulic_radius', # Rh\n 'channel_water_x-section__initial_mean_depth', # d0\n 'channel_water_x-section__mean_depth', # d\n 'channel_water_x-section__volume_flow_rate', # Q \n 'channel_water_x-section__volume_flux', # u\n 'channel_water_x-section__wetted_area', # A_wet\n 'channel_water_x-section__wetted_perimeter', # P_wet\n ####### Next one added for flooding: 2019-09-16. ########\n 'channel_water_x-section_top__width', # w_top\n 'channel_x-section_trapezoid_bottom__width', # width\n 'channel_x-section_trapezoid_side__flare_angle', # angle\n ####### Next one added for flooding: 2019-09-16. ########\n 'land_surface_water__depth', # df\n 'land_surface_water__runoff_volume_flux', # R \n 'land_surface_water__domain_time_integral_of_runoff_volume_flux', # vol_R \n 'model__time_step', # dt\n 'model_grid_cell__area', # da\n #---------------------------------------------------------------------\n 'network_channel_water__volume', # vol_chan\n 'land_surface_water__area_integral_of_depth' ] # vol_land\n ################################################\n\n # These come from input files, not from other components\n _config_var_names = [\n 'channel_bottom_water_flow__log_law_roughness_length', # z0val\n 'channel_centerline__sinuosity', # sinu\n 'channel_water_flow__manning_n_parameter', # nval\n 'channel_water_x-section__bankfull_depth', # d_bankfull, NEW\n 'channel_water_x-section__bankfull_width', # w_bankfull, NEW\n 'channel_water_x-section__initial_mean_depth', # d0\n # 'channel_water_x-section_top__width', # w_top\n 'channel_x-section_trapezoid_bottom__width', # width\n 'channel_x-section_trapezoid_side__flare_angle', # angle\n # Next two vars can be obtained from d8 component.\n# 'land_surface__elevation', # DEM\n# 'land_surface__slope', # S_bed\n 'land_surface_water__depth' ] # df\n \n _var_name_map = {\n 'atmosphere_water__rainfall_volume_flux': 'P_rain',\n 'glacier_ice__melt_volume_flux': 'MR',\n# 'land_surface__elevation': 'DEM',\n# 'land_surface__slope': 'S_bed',\n 'land_surface_water__baseflow_volume_flux': 'GW',\n 'land_surface_water__evaporation_volume_flux': 'ET',\n 'soil_surface_water__infiltration_volume_flux': 'IN',\n 'snowpack__melt_volume_flux': 'SM',\n 'water-liquid__mass-per-volume_density': 'rho_H2O',\n #------------------------------------------------------------------------\n 'basin_outlet_water_flow__half_of_fanning_friction_factor':'f_outlet',\n 'basin_outlet_water_x-section__mean_depth': 'd_outlet',\n 'basin_outlet_water_x-section__peak_time_of_depth': 'Td_peak',\n 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'T_peak',\n 'basin_outlet_water_x-section__peak_time_of_volume_flux': 'Tu_peak',\n 'basin_outlet_water_x-section__volume_flow_rate': 'Q_outlet',\n 'basin_outlet_water_x-section__volume_flux': 'u_outlet',\n 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'vol_Q',\n 'basin_outlet_water_x-section__time_max_of_mean_depth': 'd_peak',\n 'basin_outlet_water_x-section__time_max_of_volume_flow_rate':'Q_peak',\n 'basin_outlet_water_x-section__time_max_of_volume_flux': 'u_peak',\n #--------------------------------------------------------------------------\n 'canals_entrance_water__volume_flow_rate': 'Q_canals_in', \n #-------------------------------------------------------------------------- \n 'channel_bottom_surface__slope': 'S_bed',\n 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'z0val_max',\n 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'z0val_min',\n 'channel_bottom_water_flow__log_law_roughness_length': 'z0val',\n 'channel_bottom_water_flow__magnitude_of_shear_stress': 'tau',\n 'channel_bottom_water_flow__shear_speed': 'u_star',\n 'channel_centerline__sinuosity': 'sinu',\n 'channel_water__volume': 'vol',\n 'channel_water_flow__domain_max_of_manning_n_parameter': 'nval_max',\n 'channel_water_flow__domain_min_of_manning_n_parameter': 'nval_min',\n 'channel_water_flow__froude_number': 'froude',\n 'channel_water_flow__half_of_fanning_friction_factor': 'f',\n 'channel_water_flow__manning_n_parameter': 'nval',\n 'channel_water_surface__slope': 'S_free',\n #-----------------------------------------------------------------------\n 'channel_water_x-section__domain_max_of_mean_depth': 'd_max',\n 'channel_water_x-section__domain_min_of_mean_depth': 'd_min',\n 'channel_water_x-section__domain_max_of_volume_flow_rate': 'Q_max',\n 'channel_water_x-section__domain_min_of_volume_flow_rate': 'Q_min',\n 'channel_water_x-section__domain_max_of_volume_flux': 'u_max',\n 'channel_water_x-section__domain_min_of_volume_flux': 'u_min',\n #----------------------------------------------------------------------- \n 'channel_water_x-section__hydraulic_radius': 'Rh',\n 'channel_water_x-section__initial_mean_depth': 'd0',\n 'channel_water_x-section__mean_depth': 'd',\n 'channel_water_x-section__volume_flow_rate': 'Q', \n 'channel_water_x-section__volume_flux': 'u',\n 'channel_water_x-section__wetted_area': 'A_wet',\n 'channel_water_x-section__wetted_perimeter': 'P_wet',\n ## 'channel_water_x-section_top__width': # (not used)\n 'channel_x-section_trapezoid_bottom__width': 'width', ####\n 'channel_x-section_trapezoid_side__flare_angle': 'angle', ####\n 'land_surface_water__depth': 'df',\n 'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'vol_R',\n 'land_surface_water__runoff_volume_flux': 'R',\n 'model__time_step': 'dt',\n 'model_grid_cell__area': 'da',\n #------------------------------------------------------------------\n 'canals__count': 'n_canals',\n 'canals_entrance__x_coordinate': 'canals_in_x',\n 'canals_entrance__y_coordinate': 'canals_in_y',\n 'canals_entrance_water__volume_fraction': 'Q_canals_fraction',\n 'canals_exit__x_coordinate': 'canals_out_x',\n 'canals_exit__y_coordinate': 'canals_out_y',\n 'canals_exit_water__volume_flow_rate': 'Q_canals_out',\n 'sinks__count': 'n_sinks',\n 'sinks__x_coordinate': 'sinks_x',\n 'sinks__y_coordinate': 'sinks_y',\n 'sinks_water__volume_flow_rate': 'Q_sinks',\n 'sources__count': 'n_sources',\n 'sources__x_coordinate': 'sources_x',\n 'sources__y_coordinate': 'sources_y',\n 'sources_water__volume_flow_rate': 'Q_sources',\n #------------------------------------------------------------------\n 'network_channel_water__volume': 'vol_chan',\n 'land_surface_water__area_integral_of_depth': 'vol_land' }\n #####################################\n\n _var_units_map = {\n 'atmosphere_water__rainfall_volume_flux': 'm s-1',\n 'glacier_ice__melt_volume_flux': 'm s-1',\n ## 'land_surface__elevation': 'm',\n ## 'land_surface__slope': '1',\n 'land_surface_water__baseflow_volume_flux': 'm s-1',\n 'land_surface_water__evaporation_volume_flux': 'm s-1',\n 'soil_surface_water__infiltration_volume_flux': 'm s-1',\n 'snowpack__melt_volume_flux': 'm s-1',\n 'water-liquid__mass-per-volume_density': 'kg m-3',\n #--------------------------------------------------------------------------- \n 'basin_outlet_water_flow__half_of_fanning_friction_factor': '1', \n 'basin_outlet_water_x-section__mean_depth': 'm',\n 'basin_outlet_water_x-section__peak_time_of_depth': 'min',\n 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'min',\n 'basin_outlet_water_x-section__peak_time_of_volume_flux': 'min', \n 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'm3',\n 'basin_outlet_water_x-section__time_max_of_mean_depth': 'm',\n 'basin_outlet_water_x-section__time_max_of_volume_flow_rate': 'm3 s-1',\n 'basin_outlet_water_x-section__time_max_of_volume_flux': 'm s-1',\n 'basin_outlet_water_x-section__volume_flow_rate': 'm3 s-1',\n 'basin_outlet_water_x-section__volume_flux': 'm s-1',\n #---------------------------------------------------------------------------\n 'canals_entrance_water__volume_flow_rate': 'm3 s-1', \n #--------------------------------------------------------------------------- \n 'channel_bottom_surface__slope': '1',\n 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'm',\n 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'm',\n 'channel_bottom_water_flow__log_law_roughness_length': 'm',\n 'channel_bottom_water_flow__magnitude_of_shear_stress': 'kg m-1 s-2',\n 'channel_bottom_water_flow__shear_speed': 'm s-1',\n 'channel_centerline__sinuosity': '1', \n 'channel_water__volume': 'm3', \n 'channel_water_flow__froude_number': '1',\n 'channel_water_flow__half_of_fanning_friction_factor': '1', \n 'channel_water_flow__manning_n_parameter': 'm-1/3 s',\n 'channel_water_flow__domain_max_of_manning_n_parameter': 'm-1/3 s',\n 'channel_water_flow__domain_min_of_manning_n_parameter': 'm-1/3 s',\n 'channel_water_surface__slope': '1',\n #--------------------------------------------------------------------\n 'channel_water_x-section__domain_max_of_mean_depth': 'm',\n 'channel_water_x-section__domain_min_of_mean_depth': 'm',\n 'channel_water_x-section__domain_max_of_volume_flow_rate': 'm3 s-1',\n 'channel_water_x-section__domain_min_of_volume_flow_rate': 'm3 s-1',\n 'channel_water_x-section__domain_max_of_volume_flux': 'm s-1',\n 'channel_water_x-section__domain_min_of_volume_flux': 'm s-1',\n #--------------------------------------------------------------------\n 'channel_water_x-section__hydraulic_radius': 'm',\n 'channel_water_x-section__initial_mean_depth': 'm',\n 'channel_water_x-section__mean_depth': 'm',\n 'channel_water_x-section__volume_flow_rate': 'm3 s-1',\n 'channel_water_x-section__volume_flux': 'm s-1',\n 'channel_water_x-section__wetted_area': 'm2',\n 'channel_water_x-section__wetted_perimeter': 'm',\n 'channel_x-section_trapezoid_bottom__width': 'm',\n 'channel_x-section_trapezoid_side__flare_angle': 'rad', # CHECKED \n 'land_surface_water__depth': 'm',\n 'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'm3', \n 'land_surface_water__runoff_volume_flux': 'm s-1', \n 'model__time_step': 's',\n 'model_grid_cell__area': 'm2',\n #------------------------------------------------------------------\n 'canals__count': '1',\n 'canals_entrance__x_coordinate': 'm',\n 'canals_entrance__y_coordinate': 'm',\n 'canals_entrance_water__volume_fraction': '1',\n 'canals_exit__x_coordinate': 'm',\n 'canals_exit__y_coordinate': 'm',\n 'canals_exit_water__volume_flow_rate': 'm3 s-1',\n 'sinks__count': '1',\n 'sinks__x_coordinate': 'm',\n 'sinks__y_coordinate': 'm',\n 'sinks_water__volume_flow_rate': 'm3 s-1',\n 'sources__count': '1',\n 'sources__x_coordinate': 'm',\n 'sources__y_coordinate': 'm',\n 'sources_water__volume_flow_rate': 'm3 s-1',\n #------------------------------------------------------------\n 'network_channel_water__volume': 'm3',\n 'land_surface_water__area_integral_of_depth': 'm3' }\n #####################################\n\n #------------------------------------------------ \n # Return NumPy string arrays vs. Python lists ?\n #------------------------------------------------\n ## _input_var_names = np.array( _input_var_names )\n ## _output_var_names = np.array( _output_var_names )\n \n #-------------------------------------------------------------------\n def get_input_var_names(self):\n\n #--------------------------------------------------------\n # Note: These are currently variables needed from other\n # components vs. those read from files or GUI.\n #-------------------------------------------------------- \n return self._input_var_names\n \n # get_input_var_names()\n #-------------------------------------------------------------------\n def get_output_var_names(self):\n \n return self._output_var_names\n \n # get_output_var_names()\n #-------------------------------------------------------------------\n def get_config_var_names(self):\n \n # New, proposed BMI function\n return self._config_var_names\n \n # get_config_var_names()\n #-------------------------------------------------------------------\n def get_var_name(self, long_var_name):\n \n return self._var_name_map[ long_var_name ]\n\n # get_var_name()\n #-------------------------------------------------------------------\n def get_var_units(self, long_var_name):\n\n return self._var_units_map[ long_var_name ]\n \n # get_var_units()\n #-------------------------------------------------------------------\n## def get_var_type(self, long_var_name):\n##\n## #---------------------------------------\n## # So far, all vars have type \"double\",\n## # but use the one in BMI_base instead.\n## #---------------------------------------\n## return 'float64'\n## \n## # get_var_type()\n #-------------------------------------------------------------------\n def set_constants(self):\n\n #------------------------\n # Define some constants\n #------------------------\n self.g = np.float64(9.81) # (gravitation const.)\n self.aval = np.float64(0.476) # (integration const.)\n self.kappa = np.float64(0.408) # (von Karman's const.)\n self.law_const = np.sqrt(self.g) / self.kappa\n self.one_third = np.float64(1.0) / 3.0 \n self.two_thirds = np.float64(2.0) / 3.0\n self.deg_to_rad = np.pi / 180.0\n self.rad_to_deg = 180.0 / np.pi\n \n # set_constants()\n #-------------------------------------------------------------------\n def set_missing_cfg_options(self): \n\n #------------------------------------------------------\n # (2019-10-08) Added CHECK_STABILITY flag to CFG file\n # so stability check be turned off to increase speed.\n #------------------------------------------------------\n if not(hasattr(self, 'CHECK_STABILITY')):\n self.CHECK_STABILITY = True\n \n #-------------------------------------------------------------- \n # (2019-10-03) Added FLOOD_OPTION flag to CFG file.\n # If not(FLOOD_OPTION), don't write flood depths (all zeros).\n #--------------------------------------------------------------\n if not(hasattr(self, 'FLOOD_OPTION')):\n self.FLOOD_OPTION = False\n self.SAVE_DF_GRIDS = False\n self.SAVE_DF_PIXELS = False\n\n #--------------------------------------------- \n # Also new in 2019, not in older CFG files\n # Not used then, but still need to be set.\n # Need to be set if FLOOD_OPTION is False ??\n #---------------------------------------------\n if not(hasattr(self, 'd_bankfull_type')):\n self.d_bankfull_type = 'Scalar' # or Grid\n self.d_bankfull = 10.0 # [meters]\n self.d_bankfull_file = ''\n\n # set_missing_cfg_options()\n #-------------------------------------------------------------------\n def initialize(self, cfg_file=None, mode=\"nondriver\", SILENT=False): \n\n if not(SILENT):\n print(' ')\n print('Channels component: Initializing...')\n \n self.status = 'initializing' # (OpenMI 2.0 convention)\n self.mode = mode\n self.cfg_file = cfg_file\n \n #-----------------------------------------------\n # Load component parameters from a config file\n #-----------------------------------------------\n self.set_constants() # (12/7/09)\n # print 'CHANNELS calling initialize_config_vars()...'\n self.initialize_config_vars()\n self.set_missing_cfg_options() # (2020-04-29)\n\n # New option, see set_new_defaults().\n if not(self.FLOOD_OPTION):\n self.SAVE_DF_GRIDS = False # (still needed here)\n self.SAVE_DF_PIXELS = False\n self.d_flood_gs_file = ''\n self.d_flood_ts_file = ''\n\n #------------------------------------------------------------\n # Must call read_grid_info() after initialize_config_vars()\n #------------------------------------------------------------\n # print 'CHANNELS calling read_grid_info()...'\n #### self.read_grid_info() # NOW IN initialize_config_vars()\n\n #------------------------------------------------------------\n #print 'CHANNELS calling initialize_basin_vars()...'\n self.initialize_basin_vars() # (5/14/10)\n #-----------------------------------------\n # This must come before \"Disabled\" test.\n #-----------------------------------------\n # print 'CHANNELS calling initialize_time_vars()...'\n self.initialize_time_vars()\n \n #----------------------------------\n # Has component been turned off ?\n #----------------------------------\n if (self.comp_status == 'Disabled'):\n if not(SILENT):\n print('Channels component: Disabled in CFG file.')\n self.disable_all_output() # (04/29/2020)\n self.DONE = True\n self.status = 'initialized' # (OpenMI 2.0 convention) \n return\n\n## print '################################################'\n## print 'min(d0), max(d0) =', self.d0.min(), self.d0.max()\n## print '################################################'\n\n #--------------------------------------------------------\n # Since only Grid type is allowed, these are not set in\n # the CFG file, but need to be defined for next part.\n #--------------------------------------------------------\n self.code_type = 'Grid' # (may not need this one)\n self.slope_type = 'Grid'\n \n ##################################################################\n # Move this block into new: \"initialize_input_file_vars()\" ???\n #---------------------------------------------------\n # Initialize vars to be read from files (11/16/16)\n #---------------------------------------------------\n # Need this in order to use \"update_var()\".\n #----------------------------------------------------------\n # NOTE: read_config_file() sets these to '0.0' if they\n # are not type \"Scalar\", so self has the attribute.\n #----------------------------------------------------------\n if (self.slope_type.lower() != 'scalar'):\n self.slope = self.initialize_var(self.slope_type, dtype='float64')\n if (self.width_type.lower() != 'scalar'):\n self.width = self.initialize_var(self.width_type, dtype='float64')\n if (self.angle_type.lower() != 'scalar'):\n self.angle = self.initialize_var(self.angle_type, dtype='float64')\n if (self.sinu_type.lower() != 'scalar'):\n self.sinu = self.initialize_var(self.sinu_type, dtype='float64')\n if (self.d0_type.lower() != 'scalar'):\n self.d0 = self.initialize_var(self.d0_type, dtype='float64')\n #-------------------------------------------------------------------------------- \n if (self.d_bankfull_type.lower() != 'scalar'):\n self.d_bankfull = self.initialize_var(self.d_bankfull_type, dtype='float64') \n# if (self.w_bankfull_type.lower() != 'scalar'):\n# self.w_bankfull = self.initialize_var(self.w_bankfull_type, dtype='float64') \n #-------------------------------------------------------------------------------- \n if (self.MANNING):\n if (self.nval_type.lower() != 'scalar'): \n self.nval = self.initialize_var(self.nval_type, dtype='float64')\n if (self.LAW_OF_WALL):\n if (self.z0val_type.lower() != 'scalar'): \n self.z0val = self.initialize_var(self.z0val_type, dtype='float64')\n\n #------------------------------------------------------\n # Must now do this before read_input_files (11/11/16) \n #------------------------------------------------------\n print('CHANNELS calling initialize_d8_vars()...')\n self.initialize_d8_vars() # (depend on D8 flow grid)\n \n #---------------------------------------------\n # Open input files needed to initialize vars \n #---------------------------------------------\n # Can't move read_input_files() to start of\n # update(), since initial values needed here.\n #---------------------------------------------\n # print 'CHANNELS calling open_input_files()...'\n self.open_input_files()\n print('CHANNELS calling read_input_files()...')\n self.read_input_files()\n\n #--------------------------------------------\n # Set any input variables that are computed\n #--------------------------------------------------\n # NOTE: Must be called AFTER read_input_files().\n #--------------------------------------------------\n print('CHANNELS calling set_computed_input_vars()...')\n self.set_computed_input_vars()\n \n #-----------------------\n # Initialize variables\n #-----------------------\n ## print 'CHANNELS calling initialize_d8_vars()...'\n ## self.initialize_d8_vars() # (depend on D8 flow grid)\n print('CHANNELS calling initialize_computed_vars()...')\n self.initialize_computed_vars()\n\n #--------------------------------------------------\n # (5/12/10) I think this is obsolete now.\n #--------------------------------------------------\n # Make sure self.Q_ts_file is not NULL (12/22/05) \n # This is only output file that is set by default\n # and is still NULL if user hasn't opened the\n # output var dialog for the channel process.\n #--------------------------------------------------\n## if (self.SAVE_Q_PIXELS and (self.Q_ts_file == '')): \n## self.Q_ts_file = (self.case_prefix + '_0D-Q.txt') \n\n self.open_output_files()\n self.status = 'initialized' # (OpenMI 2.0 convention) \n \n # initialize()\n #-------------------------------------------------------------------\n def update(self, dt=-1.0):\n\n ## DEBUG = True\n DEBUG = False\n \n #---------------------------------------------\n # Note that u and d from previous time step\n # must be used on RHS of the equations here.\n #---------------------------------------------\n self.status = 'updating' # (OpenMI 2.0 convention)\n\n #-------------------------------------------------------\n # There may be times where we want to call this method\n # even if component is not the driver. But note that\n # the TopoFlow driver also makes this same call.\n #-------------------------------------------------------\n if (self.mode == 'driver'):\n self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')\n ### interval=0.5) # [seconds]\n\n # For testing (5/19/12)\n # self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s] CHANNEL')\n\n #-------------------------------------------\n # Read from files as needed to update vars \n #-----------------------------------------------------\n # NB! This is currently not needed for the \"channel\n # process\" because values don't change over time and\n # read_input_files() is called by initialize().\n # NB! read_input_files() is called in initialize().\n #-----------------------------------------------------\n # if (self.time_index > 0):\n # if (DEBUG): print('#### Calling read_input_files()...')\n # self.read_input_files()\n \n #-------------------------\n # Update computed values\n #-------------------------\n if (self.FLOOD_OPTION):\n if (DEBUG): print('#### Calling update_d8_vars()...')\n self.update_flood_d8_vars() ############ (2019-09-17)\n #------------------------------------------------------------ \n if (DEBUG): print('#### Calling update_R()...')\n self.update_R()\n if (DEBUG): print('#### Calling update_R_integral()...')\n self.update_R_integral()\n if (DEBUG): print('#### Calling update_channel_discharge()...')\n self.update_channel_discharge()\n #------------------------------------------------------------\n if (self.FLOOD_OPTION):\n if (DEBUG): print('#### Calling update_flood_discharge()...')\n self.update_flood_discharge() ############ (2019-09-20)\n if (DEBUG): print('#### Calling update_discharge()...')\n self.update_discharge()\n if (DEBUG): print('#### Calling update_diversions()...')\n self.update_diversions()\n if (DEBUG): print('#### Calling update_flow_volume()...')\n self.update_flow_volume()\n #------------------------------------------------------------\n if (self.FLOOD_OPTION):\n if (DEBUG): print('#### Calling update_flood_volume()...')\n self.update_flood_volume() ############ (2019-09-20)\n if (DEBUG): print('#### Calling update_flow_depth()...')\n self.update_flow_depth()\n #------------------------------------------------------------\n if (self.FLOOD_OPTION):\n if (DEBUG): print('#### Calling update_flood_depth()...')\n self.update_flood_depth() ############ (2019-09-20)\n #-----------------------------------------------------------------\n if not(self.DYNAMIC_WAVE):\n if (DEBUG): print('#### Calling update_trapezoid_Rh()...')\n self.update_trapezoid_Rh()\n # print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()a\n #-----------------------------------------------------------------\n # (9/9/14) Moved this here from update_velocity() methods.\n #----------------------------------------------------------------- \n if not(self.KINEMATIC_WAVE):\n if (DEBUG): print('#### Calling update_free_surface_slope()...') \n self.update_free_surface_slope()\n if (DEBUG): print('#### Calling update_shear_stress()...')\n self.update_shear_stress()\n if (DEBUG): print('#### Calling update_shear_speed()...')\n self.update_shear_speed() \n #-----------------------------------------------------------------\n # Must update friction factor before velocity for DYNAMIC_WAVE.\n #----------------------------------------------------------------- \n if (DEBUG): print('#### Calling update_friction_factor()...')\n self.update_friction_factor() \n #----------------------------------------------------------------- \n if (DEBUG): print('#### Calling update_velocity()...')\n self.update_velocity()\n self.update_velocity_on_edges() # (set to zero)\n if (DEBUG): print('#### Calling update_froude_number()...')\n self.update_froude_number()\n #-----------------------------------------------------------------\n## print 'Rmin, Rmax =', self.R.min(), self.R.max()\n## print 'Qmin, Qmax =', self.Q.min(), self.Q.max()\n## print 'umin, umax =', self.u.min(), self.u.max()\n## print 'dmin, dmax =', self.d.min(), self.d.max()\n## print 'nmin, nmax =', self.nval.min(), self.nval.max()\n## print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()\n## print 'Smin, Smax =', self.S_bed.min(), self.S_bed.max()\n if (DEBUG): print('#### Calling update_outlet_values()...')\n self.update_outlet_values()\n if (DEBUG): print('#### Calling update peak values()...')\n self.update_peak_values()\n if (DEBUG): print('#### Calling update_Q_out_integral()...')\n self.update_Q_out_integral()\n\n #---------------------------------------------\n # This takes extra time and is now done\n # only at the end, in finalize(). (8/19/13)\n #---------------------------------------------\n # But then \"topoflow_driver\" doesn't get\n # correctly updated values for some reason.\n #---------------------------------------------\n ## self.update_mins_and_maxes()\n\n #--------------------------------------------------\n # Check computed values (but not if known stable)\n #--------------------------------------------------\n if (self.CHECK_STABILITY):\n D_OK = self.check_flow_depth()\n U_OK = self.check_flow_velocity()\n OK = (D_OK and U_OK)\n else:\n OK = True\n\n #----------------------------------------------\n # Write user-specified data to output files ?\n #----------------------------------------------\n # Components use own self.time_sec by default.\n #-----------------------------------------------\n if (DEBUG): print('#### Calling write_output_files()...')\n self.write_output_files()\n ## self.write_output_files( time_seconds )\n\n #-----------------------------\n # Update internal clock\n # after write_output_files()\n #-----------------------------\n if (DEBUG): print('#### Calling update_time()')\n self.update_time( dt )\n \n if (OK):\n self.status = 'updated' # (OpenMI 2.0 convention)\n else:\n self.status = 'failed'\n self.DONE = True\n \n # update() \n #-------------------------------------------------------------------\n def finalize(self):\n\n #---------------------------------------------------\n # We can compute mins and maxes in the final grids\n # here, but the framework will not then pass them\n # to any component (e.g. topoflow_driver) that may\n # need them.\n #---------------------------------------------------\n self.update_total_channel_water_volume() ## (9/17/19)\n self.update_total_land_water_volume() ## (9/17/19)\n ## self.update_total_edge_water_volume() ## (5/7/20)\n self.update_mins_and_maxes( REPORT=False ) ## (2/6/13)\n self.print_final_report(comp_name='Channels component')\n \n self.status = 'finalizing' # (OpenMI)\n self.close_input_files() # TopoFlow input \"data streams\"\n self.close_output_files()\n self.status = 'finalized' # (OpenMI)\n \n # finalize()\n #-------------------------------------------------------------------\n def set_computed_input_vars(self):\n\n #--------------------------------------------------------------- \n # Note: The initialize() method calls initialize_config_vars()\n # (in BMI_base.py), which calls this method at the end.\n #--------------------------------------------------------------\n cfg_extension = self.get_attribute( 'cfg_extension' ).lower()\n # cfg_extension = self.get_cfg_extension().lower()\n self.KINEMATIC_WAVE = (\"kinematic\" in cfg_extension)\n self.DIFFUSIVE_WAVE = (\"diffusive\" in cfg_extension)\n self.DYNAMIC_WAVE = (\"dynamic\" in cfg_extension)\n \n #-------------------------------------------\n # These currently can't be set to anything\n # else in the GUI, but need to be defined.\n #-------------------------------------------\n self.code_type = 'Grid'\n self.slope_type = 'Grid' # (shouldn't need this)\n\n #---------------------------------------------------------\n # Make sure that all \"save_dts\" are larger or equal to\n # the specified process dt. There is no point in saving\n # results more often than they change.\n # Issue a message to this effect if any are smaller ??\n #---------------------------------------------------------\n self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)\n self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)\n \n # set_computed_input_vars()\n #-------------------------------------------------------------------\n def initialize_d8_vars(self):\n\n #---------------------------------------------\n # Compute and store a variety of (static) D8\n # flow grid variables. Embed structure into\n # the \"channel_base\" component.\n #---------------------------------------------\n self.d8 = d8_base.d8_component()\n \n #-------------------------------------------------- \n # D8 component builds its cfg filename from these \n #-------------------------------------------------------------\n # (2/11/2017) The initialize() method in d8_base.py now\n # uses case_prefix (vs. site_prefix) for its CFG file:\n # <site_prefix>_d8_global.cfg. This is to prevent confusion\n # since this was the only CFG file that used site_prefix.\n #-------------------------------------------------------------\n # Note: This D8 component is serving a channels component\n # that has already been instantiated and knows its\n # directory and prefix information. So we can build\n # the correct D8 cfg_file name from that info. It\n # will then read path_info CFG file to get other info.\n #-------------------------------------------------------------\n cfg_file = (self.case_prefix + '_d8_global.cfg')\n cfg_file = (self.cfg_directory + cfg_file)\n self.d8.initialize( cfg_file=cfg_file, SILENT=self.SILENT, \\\n REPORT=self.REPORT ) \n# self.d8.site_prefix = self.site_prefix\n# self.d8.case_prefix = self.case_prefix # (used in d8_base.py)\n# self.d8.in_directory = self.in_directory\n# self.d8.initialize( cfg_file=None, SILENT=self.SILENT, \\\n# REPORT=self.REPORT )\n \n #---------------------------------------------------\n # The next 2 \"update\" calls are needed when we use\n # the new \"d8_base.py\", but are not needed when\n # using the older \"tf_d8_base.py\". \n #---------------------------------------------------\n self.d8.update(self.time, SILENT=False, REPORT=True)\n\n #----------------------------------------------------------- \n # Note: This is also needed, but is not done by default in\n # d8.update() because it hurts performance of Erode.\n #----------------------------------------------------------- \n self.d8.update_noflow_IDs()\n\n #--------------------------------------------------- \n # Initialize separate set of d8 vars for flooding.\n # (2019-09-21)\n #---------------------------------------------------\n if (self.FLOOD_OPTION): \n d8f = copy.copy( self.d8 ) # (or use \"copy.deepcopy\"?)\n d8f.FILL_PITS_IN_Z0 = False\n d8f.LINK_FLATS = False\n self.d8f = d8f\n\n # initialize_d8_vars()\n #-------------------------------------------------------------\n def initialize_computed_vars(self):\n\n #--------------------------------------------------------\n # (5/17/12) If MANNING, we need to set z0vals to -1 so\n # they are always defined for use with EMELI framework.\n #--------------------------------------------------------\n # BMI_base.read_config_file() reads \"float\" scalars as\n # numpy \"float64\" data type. Applying np.float64()\n # will break references.\n #--------------------------------------------------------\n if (self.MANNING):\n if (self.nval is not None):\n self.nval_min = self.nval.min()\n self.nval_max = self.nval.max()\n #---------------------------------------\n print(' min(nval) = ' + str(self.nval_min) )\n print(' max(nval) = ' + str(self.nval_max) )\n #---------------------------------------\n self.z0val = self.initialize_scalar(-1, dtype='float64')\n self.z0val_min = self.initialize_scalar(-1, dtype='float64')\n self.z0val_max = self.initialize_scalar(-1, dtype='float64')\n \n if (self.LAW_OF_WALL):\n if (self.z0val is not None):\n self.z0val_min = self.z0val.min()\n self.z0val_max = self.z0val.max()\n #-----------------------------------------\n print(' min(z0val) = ' + str(self.z0val_min) )\n print(' max(z0val) = ' + str(self.z0val_max) )\n #-----------------------------------------\n self.nval = self.initialize_scalar(-1, dtype='float64')\n self.nval_min = self.initialize_scalar(-1, dtype='float64')\n self.nval_max = self.initialize_scalar(-1, dtype='float64')\n\n #------------------------------------------------------------ \n # If neither set, use a constant velocity? (Test: 5/18/15)\n #------------------------------------------------------------\n if not(self.MANNING) and not(self.LAW_OF_WALL):\n print('#### WARNING: In CFG file, MANNING=0 and LAW_OF_WALL=0.')\n #-----------------------------------\n self.z0val = self.initialize_scalar(-1, dtype='float64')\n self.z0val_min = self.initialize_scalar(-1, dtype='float64')\n self.z0val_max = self.initialize_scalar(-1, dtype='float64')\n #-------------------------------------------------------------- \n self.nval = self.initialize_scalar(-1, dtype='float64')\n self.nval_min = self.initialize_scalar(-1, dtype='float64')\n self.nval_max = self.initialize_scalar(-1, dtype='float64')\n\n #-----------------------------------------------\n # Convert bank angles from degrees to radians. \n #-------------------------------------------------\n # When bank angles are given as a GRID, this is\n # done in read_input_files(). Then realized that\n # that conversion didn't occur for SCALAR angle.\n # This caused \"denom\" later to be negative.\n # (Fixed on: 2019-10-08.)\n #-------------------------------------------------\n ### if (np.size( self.angle ) == 1):\n if (self.angle_type.lower() == 'scalar'):\n self.angle *= self.deg_to_rad # [radians] \n\n #-----------------------------------------------\n # Print mins and maxes of some other variables\n # that were initialized by read_input_files().\n #-----------------------------------------------\n# print(' min(slope) = ' + str(self.slope.min()) )\n# print(' max(slope) = ' + str(self.slope.max()) )\n print(' min(width) = ' + str(self.width.min()) )\n print(' max(width) = ' + str(self.width.max()) )\n print(' min(angle) = ' + str(self.angle.min() * self.rad_to_deg) + ' [deg]')\n print(' max(angle) = ' + str(self.angle.max() * self.rad_to_deg) + ' [deg]')\n print(' min(sinuosity) = ' + str(self.sinu.min()) )\n print(' max(sinuosity) = ' + str(self.sinu.max()) )\n print(' min(init_depth) = ' + str(self.d0.min()) )\n print(' max(init_depth) = ' + str(self.d0.max()) )\n\n #------------------------------------------------\n # 8/29/05. Multiply ds by (unitless) sinuosity\n # Orig. ds is used by subsurface flow\n #------------------------------------------------\n # NB! We should also divide slopes in S_bed by\n # the sinuosity, as now done here.\n #----------------------------------------------------\n # NB! This saves a modified version of ds that\n # is only used within the \"channels\" component.\n # The original \"ds\" is stored within the\n # topoflow model component and is used for\n # subsurface flow, etc.\n #----------------------------------------------------\n ### self.d8.ds_chan = (self.sinu * ds)\n ### self.ds = (self.sinu * self.d8.ds)\n self.d8.ds = (self.sinu * self.d8.ds) ### USE LESS MEMORY\n\n ###################################################\n ###################################################\n ### S_bed = (S_bed / self.sinu) #*************\n self.slope = (self.slope / self.sinu)\n self.S_bed = self.slope\n self.S_free = self.S_bed.copy() # (2020-04-29)\n\n ###################################################\n ###################################################\n \n #---------------------------\n # Initialize spatial grids\n #-----------------------------------------------\n # NB! It is not a good idea to initialize the\n # water depth grid to a nonzero scalar value.\n #-----------------------------------------------\n print('Initializing u, f, d grids...')\n self.u = self.initialize_grid( 0, dtype='float64' )\n self.f = self.initialize_grid( 0, dtype='float64' )\n self.d = self.initialize_grid( 0, dtype='float64' )\n self.d += self.d0 # (Add initial depth, if any.)\n\n #------------------------------------------\n # Use a constant velocity (Test: 5/18/15)\n #------------------------------------------\n # if not(self.MANNING) and not(self.LAW_OF_WALL):\n # ## self.u[:] = 1.5 # [m/s]\n # self.u[:] = 3.0 # [m/s]\n \n #########################################################\n # Add this on (2/3/13) so make the TF driver happy\n # during its initialize when it gets reference to R.\n # But in \"update_R()\", be careful not to break the ref.\n # \"Q\" may be subject to the same issue.\n #########################################################\n self.Qc = self.initialize_grid( 0, dtype='float64' )\n self.R = self.initialize_grid( 0, dtype='float64' )\n \n #-----------------------------------------\n # Added these new variables for flooding\n #-----------------------------------------\n self.d_flood = self.initialize_grid( 0, dtype='float64' ) #(9/16/19)\n if (self.FLOOD_OPTION):\n self.Qf = self.initialize_grid( 0, dtype='float64' ) #(9/20/19)\n self.Q = self.initialize_grid( 0, dtype='float64' ) #(total)\n else:\n self.Q = self.Qc # (2 names for same thing)\n \n ##############################################################################\n # seconds_per_year = 3600 * 24 * 365 = 31,536,000\n # mm_per_meter = 1000\n ##############################################################################\n # baseflow_rate = 250.0 # [mm per year], was 230.0\n # baseflow_rate_mps = baseflow_rate / (31536000.0 * 1000.0) #[m/s]\n # self.GW_init = np.zeros([self.ny, self.nx], dtype='Float64')\n # self.GW_init += baseflow_rate_mps\n ##############################################################################\n\n\n #---------------------------------------------------\n # Initialize new grids. Is this needed? (9/13/14)\n #---------------------------------------------------\n self.tau = self.initialize_grid( 0, dtype='float64' )\n self.u_star = self.initialize_grid( 0, dtype='float64' )\n self.froude = self.initialize_grid( 0, dtype='float64' )\n \n #---------------------------------------\n # These are used to check mass balance\n #---------------------------------------\n self.vol_R = self.initialize_scalar( 0, dtype='float64')\n self.vol_Q = self.initialize_scalar( 0, dtype='float64')\n self.vol_chan = self.initialize_scalar( 0, dtype='float64') \n self.vol_land = self.initialize_scalar( 0, dtype='float64') \n \n #-------------------------------------------\n # Make sure all slopes are valid & nonzero\n # since otherwise flow will accumulate\n #-------------------------------------------\n if (self.KINEMATIC_WAVE): \n self.remove_bad_slopes() #(3/8/07. Only Kin Wave case)\n #----------------------------------------------\n # Use \"get_new_slope_grid()\" in new_slopes.py\n # instead of \"remove_bad_slopes()\".\n # Or change \"slope_grid\" in the CFG file.\n #----------------------------------------------\n ## self.get_new_slope_grid()\n \n #----------------------------------------\n # Initial volume of water in each pixel\n #-----------------------------------------------------------\n # Note: angles were read as degrees & converted to radians\n #-----------------------------------------------------------\n L2 = self.d * np.tan(self.angle)\n self.A_wet = self.d * (self.width + L2)\n self.P_wet = self.width + (np.float64(2) * self.d / np.cos(self.angle) )\n self.vol = self.A_wet * self.d8.ds # [m3]\n\n #---------------------------------------------------------\n # Volume of water in channel when bankfull (2019-09-16)\n # Note that w_bankfull is not used here, but:\n # w_bankfull = width + (2 * d_bankfull * tan(angle))\n # width = w_bankfull - (2 * d_bankfull * tan(angle))\n #---------------------------------------------------------\n L3 = self.d_bankfull * np.tan(self.angle)\n Ac_bankfull = self.d_bankfull * (self.width + L3)\n self.vol_bankfull = Ac_bankfull * self.d8.ds\n self.vol_flood = self.initialize_grid( 0, dtype='float64') \n\n #------------------------------------------------------- \n # Note: depth is often zero at the start of a run, and\n # both width and then P_wet are also zero in places.\n # Therefore initialize Rh as shown.\n #-------------------------------------------------------\n self.Rh = self.initialize_grid( 0, dtype='float64' )\n ## self.Rh = self.A_wet / self.P_wet # [m]\n ## print 'P_wet.min() =', self.P_wet.min()\n ## print 'width.min() =', self.width.min()\n \n ## self.initialize_diversion_vars() # (9/22/14)\n self.initialize_outlet_values()\n self.initialize_peak_values()\n self.initialize_min_and_max_values() ## (2/3/13)\n \n## w = np.where( self.width <= 0 )\n## nw = np.size( w[0] ) # (This is correct for 1D or 2D.)\n## if (nw > 0):\n## print 'WARNING:'\n## print 'Number of locations where width==0 =', nw\n## if (nw < 10):\n## print 'locations =', w\n## print ' '\n\n # initialize_computed_vars()\n #-------------------------------------------------------------\n def initialize_diversion_vars(self):\n\n #-----------------------------------------\n # Compute source IDs from xy coordinates\n #-----------------------------------------\n source_rows = np.int32( self.sources_y / self.ny )\n source_cols = np.int32( self.sources_x / self.nx )\n self.source_IDs = (source_rows, source_cols)\n ## self.source_IDs = (source_rows * self.nx) + source_cols\n \n #---------------------------------------\n # Compute sink IDs from xy coordinates\n #---------------------------------------\n sink_rows = np.int32( self.sinks_y / self.ny )\n sink_cols = np.int32( self.sinks_x / self.nx )\n self.sink_IDs = (sink_rows, sink_cols)\n ## self.sink_IDs = (sink_rows * self.nx) + sink_cols\n \n #-------------------------------------------------\n # Compute canal entrance IDs from xy coordinates\n #-------------------------------------------------\n canal_in_rows = np.int32( self.canals_in_y / self.ny )\n canal_in_cols = np.int32( self.canals_in_x / self.nx )\n self.canal_in_IDs = (canal_in_rows, canal_in_cols)\n ## self.canal_in_IDs = (canal_in_rows * self.nx) + canal_in_cols\n \n #---------------------------------------------\n # Compute canal exit IDs from xy coordinates\n #---------------------------------------------\n canal_out_rows = np.int32( self.canals_out_y / self.ny )\n canal_out_cols = np.int32( self.canals_out_x / self.nx )\n self.canal_out_IDs = (canal_out_rows, canal_out_cols)\n ## self.canal_out_IDs = (canal_out_rows * self.nx) + canal_out_cols\n\n #--------------------------------------------------\n # This will be computed from Q_canal_fraction and\n # self.Q and then passed back to Diversions\n #--------------------------------------------------\n self.Q_canals_in = np.array( self.n_sources, dtype='float64' )\n\n # initialize_diversion_vars()\n #-------------------------------------------------------------------\n def initialize_outlet_values(self):\n\n #---------------------------------------------------\n # Note: These are retrieved and used by TopoFlow\n # for the stopping condition. TopoFlow\n # receives a reference to these, but in\n # order to see the values change they need\n # to be stored as mutable, 1D numpy arrays.\n #---------------------------------------------------\n # Note: Q_last is internal to TopoFlow.\n #--------------------------------------------------- \n # self.Q_outlet = self.Q[ self.outlet_ID ]\n self.Q_outlet = self.initialize_scalar(0, dtype='float64')\n self.u_outlet = self.initialize_scalar(0, dtype='float64')\n self.d_outlet = self.initialize_scalar(0, dtype='float64')\n self.f_outlet = self.initialize_scalar(0, dtype='float64')\n \n # initialize_outlet_values() \n #-------------------------------------------------------------------\n def initialize_peak_values(self):\n\n #-------------------------\n # Initialize peak values\n #-------------------------\n self.Q_peak = self.initialize_scalar(0, dtype='float64')\n self.T_peak = self.initialize_scalar(0, dtype='float64')\n self.u_peak = self.initialize_scalar(0, dtype='float64')\n self.Tu_peak = self.initialize_scalar(0, dtype='float64') \n self.d_peak = self.initialize_scalar(0, dtype='float64')\n self.Td_peak = self.initialize_scalar(0, dtype='float64')\n\n # initialize_peak_values()\n #-------------------------------------------------------------------\n def initialize_min_and_max_values(self):\n\n #-------------------------------\n # Initialize min & max values\n # (2/3/13), for new framework.\n #-------------------------------\n v = 1e6\n self.Q_min = self.initialize_scalar(v, dtype='float64')\n self.Q_max = self.initialize_scalar(-v, dtype='float64')\n self.u_min = self.initialize_scalar(v, dtype='float64')\n self.u_max = self.initialize_scalar(-v, dtype='float64')\n self.d_min = self.initialize_scalar(v, dtype='float64')\n self.d_max = self.initialize_scalar(-v, dtype='float64')\n\n # initialize_min_and_max_values() \n #-------------------------------------------------------------------\n def update_flood_d8_vars(self):\n\n #--------------------------------------------------------- \n # Note: Use free-surface gradient of d_flood to compute\n # flow to neighbors. (209-09-17)\n #---------------------------------------------------------\n # Note: self.d_flood is used to compute self.Q.\n #---------------------------------------------------------\n self.FLOODING = (self.d_flood.max() > 0)\n if not(self.FLOODING):\n self.d8f = copy.copy( self.d8 )\n self.d8f.FILL_PITS_IN_Z0 = False\n self.d8f.LINK_FLATS = False\n return\n\n #-------------------------------------------------------- \n # Use (DEM + d_flood) to compute a free-surface gradient\n # and update all of the D8 vars.\n #-------------------------------------------------------- \n z_free = (self.d8.DEM + self.d_flood)\n #---------------------------------------\n self.d8f.update_flow_grid( DEM=z_free ) ######\n self.d8f.update_parent_ID_grid()\n self.d8f.update_parent_IDs() # (needed for gradients)\n self.d8f.update_flow_from_IDs()\n self.d8f.update_flow_to_IDs()\n self.d8f.update_noflow_IDs() # (needed to fill depressions naturally)\n self.d8f.update_flow_width_grid() # (dw)\n self.d8f.update_flow_length_grid() # (ds)\n ### self.d8f.update_area_grid()\n #----------------------------------------\n # self.d8f.d8_grid gives the D8 flow codes \n \n # update_flood_d8_vars()\n #-------------------------------------------------------------------\n # def update_excess_rainrate(self):\n def update_R(self):\n\n #----------------------------------------\n # Compute the \"excess rainrate\", R.\n # Each term must have same units: [m/s]\n # Sum = net gain/loss rate over pixel.\n #----------------------------------------------------\n # R can be positive or negative. If negative, then\n # water is removed from the surface at rate R until\n # surface water is consumed.\n #--------------------------------------------------------------\n # P = precip_rate [m/s] (converted by read_input_data()).\n # SM = snowmelt rate [m/s]\n # GW = seep rate [m/s] (water_table intersects surface)\n # ET = evap rate [m/s]\n # IN = infil rate [m/s]\n # MR = icemelt rate [m/s]\n\n #------------------------------------------------------------\n # Use refs to other comp vars from new framework. (5/18/12)\n #------------------------------------------------------------ \n P = self.P_rain # (This is now liquid-only precip. 9/14/14)\n SM = self.SM\n GW = self.GW\n ### GW = self.GW_init\n ET = self.ET\n IN = self.IN\n MR = self.MR\n \n## if (self.DEBUG):\n## print 'At time:', self.time_min, ', P =', P, '[m/s]'\n\n #--------------\n # For testing\n #-------------- \n# print( '(Pmin, Pmax) = ' + str(P.min()) + ', ' + str(P.max()) )\n# print( '(SMmin, SMmax) = ' + str(SM.min()) + ', ' + str(SM.max()) )\n# print( '(GWmin, GWmax) = ' + str(GW.min()) + ', ' + str(GW.max()) )\n# print( '(ETmin, ETmax) = ' + str(ET.min()) + ', ' + str(ET.max()) )\n# print( '(INmin, INmax) = ' + str(IN.min()) + ', ' + str(IN.max()) )\n# print( '(MRmin, MRmax) = ' + str(MR.min()) + ', ' + str(MR.max()) )\n# print( ' ' )\n \n self.R = (P + SM + GW + MR) - (ET + IN)\n \n # update_R()\n #-------------------------------------------------------------------\n def update_R_integral(self):\n\n #-----------------------------------------------\n # Update mass total for R, sum over all pixels\n #---------------------------------------------------------------\n # Note: Typically, chan_dt < met_dt, so that vol_R is updated\n # more frequently than vol_P. Since EMELI performs linear\n # interpolation in time, integrals may be slightly different.\n #--------------------------------------------------------------- \n volume = np.double(self.R * self.da * self.dt) # [m^3]\n if (np.size(volume) == 1):\n self.vol_R += (volume * self.rti.n_pixels)\n else:\n self.vol_R += np.sum(volume)\n\n # update_R_integral() \n #------------------------------------------------------------------- \n def update_channel_discharge(self):\n\n #---------------------------------------------------------\n # The discharge grid, Q, gives the flux of water _out_\n # of each grid cell. This entire amount then flows\n # into one of the 8 neighbor grid cells, as indicated\n # by the D8 flow code. The update_flow_volume() function\n # is called right after this one in update() and uses\n # the Q grid.\n #---------------------------------------------------------\n # 7/15/05. The cross-sectional area of a trapezoid is\n # given by: Ac = d * (w + (d * tan(theta))),\n # where w is the bottom width. If we were to\n # use: Ac = w * d, then we'd have Ac=0 when w=0.\n # We also need angle units to be radians.\n #---------------------------------------------------------\n \n #-----------------------------\n # Compute the discharge grid\n #------------------------------------------------------ \n # A_wet is initialized in initialize_computed_vars().\n # A_wet is updated in update_trapezoid_Rh().\n #------------------------------------------------------ \n self.Qc[:] = self.u * self.A_wet ## (2/19/13, in place)\n\n #--------------\n # For testing\n #-------------- \n## print '(umin, umax) =', self.u.min(), self.u.max()\n## print '(d0min, d0max) =', self.d0.min(), self.d0.max()\n## print '(dmin, dmax) =', self.d.min(), self.d.max()\n## print '(amin, amax) =', self.angle.min(), self.angle.max()\n## print '(wmin, wmax) =', self.width.min(), self.width.max()\n## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max()\n## print '(L2min, L2max) =', L2.min(), L2.max()\n## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max()\n \n #--------------\n # For testing\n #--------------\n # print 'dmin, dmax =', self.d.min(), self.d.max()\n # print 'umin, umax =', self.u.min(), self.u.max()\n # print 'Qmin, Qmax =', self.Q.min(), self.Q.max()\n # print ' ' \n # print 'u(outlet) =', self.u[self.outlet_ID]\n # print 'Q(outlet) =', self.Q[self.outlet_ID] ########\n \n #----------------------------------------------------\n # Wherever depth is less than z0, assume that water\n # is not flowing and set u and Q to zero.\n # However, we also need (d gt 0) to avoid a divide\n # by zero problem, even when numerators are zero.\n #----------------------------------------------------\n # FLOWING = (d > (z0/aval))\n #*** FLOWING[self.d8.noflow_IDs] = False ;******\n # u = (u * FLOWING)\n # Q = (Q * FLOWING)\n # d = np.maximum(d, 0.0) ;(allow depths lt z0, if gt 0.)\n\n # update_channel_discharge()\n #------------------------------------------------------------------- \n def update_flood_discharge(self):\n \n ### if not(self.FLOODING):\n ### return\n \n #------------------------------------------\n # Find grid cells with & without flooding\n #------------------------------------------\n w1 = (self.d_flood > 0) # (array of True or False)\n w2 = np.invert( w1 )\n\n #---------------------------------------------------\n # (2019-09-16) Add discharge due to overbank flow\n # See manning_formula() function in this file.\n #---------------------------------------------------\n uf = (self.u / 5.0)\n Af = (self.d8f.dw * self.d_flood) ###### CHECK dw\n\n self.Qf[ w1 ] = uf[ w1 ] * Af[ w1 ] # (in place) \n self.Qf[ w2 ] = 0.0\n\n # update_flood_discharge()\n #------------------------------------------------------------------- \n def update_discharge(self):\n\n #------------------------------------------------------------ \n # Note: This is not finished yet. The fact that channel\n # flow and overbank flooding flow can have different\n # D8 flow directions, with the flooding flow direction\n # switching back and forth, can result in an oscillation\n # or spikiness in the hydrograph. It is not yet\n # clear how to best handle this. Reducing the timestep\n # does not seem to resolve the issue. However, flood\n # depths seem to be well-behaved.\n #------------------------------------------------------------\n if (self.FLOOD_OPTION):\n #------------------------------------------ \n # Look at where the 2 D8 flow grids align\n # First part here with w1 is solid.\n #------------------------------------------ \n # w1 = (self.d8.d8_grid == self.d8f.d8_grid)\n # w2 = np.invert( w1 )\n ## self.Q[ w1 ] = self.Qc[ w1 ] + self.Qf[ w1 ]\n #--------------------------------------------------\n # Not sure how to handle w2 grid cells. This\n # just makes it easy to see where the D8 flow\n # directions differ, at places in main channels.\n #--------------------------------------------------\n ## self.Q[ w2 ] = 0.0\n \n # This part with w1 is also solid.\n w1 = (self.Qf == 0)\n w2 = np.invert( w1 )\n self.Q[ w1 ] = self.Qc[ w1 ]\n \n \n #----------------------------------------------------\n # This is not 100% correct, since the D8 flow grids\n # are not the same for the channel and flood flows.\n #----------------------------------------------------\n ## self.Q[:] = self.Qc + self.Qf\n \n #--------------------------------------------------------- \n # This gives smoother hydrographs in main channels (with\n # some spikes still), but has Q=0 for most grid cells.\n #--------------------------------------------------------- \n ## self.Q[:] = self.Qf\n\n #-------------------------------------------------\n # A compromise, but hydrograph still has spikes, \n # even with timestep of 1 second for Treynor.\n #-------------------------------------------------\n # np.maximum( self.Qc, self.Qf, self.Q) # (in place)\n \n #---------------------------------------------------\n # Average with previous time step to smooth spikes,\n # thought due to switching of flow direction.\n # Hydrographs are much smoother.\n #----------------------------------------------------\n Q2 = (self.Qc[ w2 ] + self.Qf[ w2 ])\n Q3 = (self.Q[ w2 ] + Q2) / 2.0\n self.Q[ w2 ] = Q3\n ### self.Q[ w2 ] = (self.Qc + self.Qf) / 2.0 # (in place)\n\n # For another idea \n ## self.Q[ self.d8f.parent_IDs ]\n\n ### self.Q[ w2 ] = self.Qc[ w2 ] + self.Qf[ w2 ]/2.0 ###############\n ### self.Q[ w2 ] = self.Qc[ w2 ] ####################\n \n ## self.Q[:] = self.Qc + self.Qf\n else:\n # Set self.Q = self.Qc in initialize().\n dum = 0\n\n # update_discharge()\n #-------------------------------------------------------------------\n def update_diversions(self):\n\n #-------------------------------------------------------------- \n # Note: The Channel component requests the following input\n # vars from the Diversions component by including\n # them in its \"get_input_vars()\":\n # (1) Q_sources, Q_sources_x, Q_sources_y\n # (2) Q_sinks, Q_sinks_x, Q_sinks_y\n # (3) Q_canals_out, Q_canals_out_x, Q_canals_out_y\n # (4) Q_canals_fraction, Q_canals_in_x, Q_canals_in_y.\n \n # source_IDs are computed from (x,y) coordinates during\n # initialize().\n #\n # Diversions component needs to get Q_canals_in from the\n # Channel component.\n #--------------------------------------------------------------\n # Note: This *must* be called after update_discharge() and\n # before update_flow_volume().\n #--------------------------------------------------------------\n # Note: The Q grid stores the volume flow rate *leaving* each\n # grid cell in the domain. For sources, an extra amount\n # is leaving the cell which can flow into its D8 parent\n # cell. For sinks, a lesser amount is leaving the cell\n # toward the D8 parent.\n #--------------------------------------------------------------\n # Note: It is not enough to just update Q and then call the\n # update_flow_volume() method. This is because it\n # won't update the volume in the channels in the grid\n # cells that the extra discharge is leaving from.\n #--------------------------------------------------------------\n # If a grid cell contains a \"source\", then an additional Q\n # will flow *into* that grid cell and increase flow volume.\n #-------------------------------------------------------------- \n\n #------------------------------------------------------------- \n # This is not fully tested but runs. However, the Diversion\n # vars are still computed even when Diversions component is\n # disabled. So it slows things down somewhat.\n #------------------------------------------------------------- \n return\n ########################\n ########################\n \n #---------------------------------------- \n # Update Q and vol due to point sources\n #----------------------------------------\n ## if (hasattr(self, 'source_IDs')): \n if (self.n_sources > 0): \n self.Q[ self.source_IDs ] += self.Q_sources\n self.vol[ self.source_IDs ] += (self.Q_sources * self.dt)\n\n #-------------------------------------- \n # Update Q and vol due to point sinks\n #--------------------------------------\n ## if (hasattr(self, 'sink_IDs')):\n if (self.n_sinks > 0): \n self.Q[ self.sink_IDs ] -= self.Q_sinks\n self.vol[ self.sink_IDs ] -= (self.Q_sinks * self.dt)\n \n #--------------------------------------- \n # Update Q and vol due to point canals\n #--------------------------------------- \n ## if (hasattr(self, 'canal_in_IDs')):\n if (self.n_canals > 0): \n #-----------------------------------------------------------------\n # Q grid was just modified. Apply the canal diversion fractions\n # to compute the volume flow rate into upstream ends of canals.\n #-----------------------------------------------------------------\n Q_canals_in = self.Q_canals_fraction * self.Q[ self.canal_in_IDs ]\n self.Q_canals_in = Q_canals_in\n\n #---------------------------------------------------- \n # Update Q and vol due to losses at canal entrances\n #----------------------------------------------------\n self.Q[ self.canal_in_IDs ] -= Q_canals_in\n self.vol[ self.canal_in_IDs ] -= (Q_canals_in * self.dt) \n\n #------------------------------------------------- \n # Update Q and vol due to gains at canal exits.\n # Diversions component accounts for travel time.\n #------------------------------------------------- \n self.Q[ self.canal_out_IDs ] += self.Q_canals_out\n self.vol[ self.canal_out_IDs ] += (self.Q_canals_out * self.dt) \n \n # update_diversions()\n #-------------------------------------------------------------------\n def update_flow_volume(self):\n\n #-----------------------------------------------------------\n # Notes: This function must be called after\n # update_discharge() and update_diversions().\n #----------------------------------------------------------- \n # Notes: Q = surface discharge [m^3/s]\n # R = excess precip. rate [m/s]\n # da = pixel area [m^2]\n # dt = channel flow timestep [s]\n # vol = total volume of water in pixel [m^3]\n # v2 = temp version of vol\n # w1 = IDs of pixels that...\n # p1 = IDs of parent pixels that...\n #-----------------------------------------------------------\n dt = self.dt # [seconds]\n\n #----------------------------------------------------\n # Add contribution (or loss ?) from excess rainrate\n #----------------------------------------------------\n # Contributions over entire grid cell from rainfall,\n # snowmelt, icemelt and baseflow (minus losses from\n # evaporation and infiltration) are assumed to flow\n # into the channel within the grid cell.\n # Note that R is allowed to be negative.\n #---------------------------------------------------- \n self.vol += (self.R * self.da) * dt # (in place)\n \n #-----------------------------------------\n # Add contributions from neighbor pixels\n #-------------------------------------------------------------\n # Each grid cell passes flow to *one* downstream neighbor.\n # Note that multiple grid cells can flow toward a given grid\n # cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc.\n #-------------------------------------------------------------\n # (2/16/10) RETEST THIS. Before, a copy called \"v2\" was\n # used but this doesn't seem to be necessary.\n #------------------------------------------------------------- \n if (self.d8.p1_OK): \n self.vol[ self.d8.p1 ] += (dt * self.Qc[self.d8.w1])\n if (self.d8.p2_OK): \n self.vol[ self.d8.p2 ] += (dt * self.Qc[self.d8.w2])\n if (self.d8.p3_OK): \n self.vol[ self.d8.p3 ] += (dt * self.Qc[self.d8.w3])\n if (self.d8.p4_OK): \n self.vol[ self.d8.p4 ] += (dt * self.Qc[self.d8.w4])\n if (self.d8.p5_OK): \n self.vol[ self.d8.p5 ] += (dt * self.Qc[self.d8.w5])\n if (self.d8.p6_OK): \n self.vol[ self.d8.p6 ] += (dt * self.Qc[self.d8.w6])\n if (self.d8.p7_OK): \n self.vol[ self.d8.p7 ] += (dt * self.Qc[self.d8.w7])\n if (self.d8.p8_OK): \n self.vol[ self.d8.p8 ] += (dt * self.Qc[self.d8.w8])\n\n #----------------------------------------------------\n # Subtract the amount that flows out to D8 neighbor\n #----------------------------------------------------\n self.vol -= (self.Qc * dt) # (in place)\n \n #--------------------------------------------------------\n # While R can be positive or negative, the surface flow\n # volume must always be nonnegative. This also ensures\n # that the flow depth is nonnegative. (7/13/06)\n #--------------------------------------------------------\n ## self.vol = np.maximum(self.vol, 0.0)\n ## self.vol[:] = np.maximum(self.vol, 0.0) # (2/19/13)\n np.maximum( self.vol, 0.0, self.vol ) # (in place)\n \n # update_flow_volume\n #-------------------------------------------------------------------\n def update_flood_volume(self):\n\n ### if not(self.FLOODING):\n ### return\n\n dt = self.dt # [seconds]\n\n #---------------------------------------------------------\n # Excess water volume from overbank flow acts as a source\n # of water in the cell, that adds to whatever volume of\n # water is already there. Channel volume at bankfull,\n # called vol_bankfull, is computed in initialize(). \n # D8 child cells with a higher free-surface may add to\n # the amount in a cell, and this total is reduced by\n # whatever amount flows to the D8 parent cell.\n #----------------------------------------------------------\n dvol = (self.vol - self.vol_bankfull)\n self.vol_flood += np.maximum(dvol, 0.0)\n ### np.maximum( dvol, 0.0, self.vol_flood) # (in place)\n\n #--------------------------------------------------------------\n # Wherever vol > vol_bankfull, the channel volume computed\n # by update_flow_volume() is wrong and should instead be\n # the bankfull volume. Extra water volume is put into d_flood.\n #--------------------------------------------------------------\n np.minimum(self.vol, self.vol_bankfull, self.vol ) # (in place)\n \n #-----------------------------------------\n # Add contributions from neighbor pixels\n #-------------------------------------------------------------\n # Each grid cell passes flow to *one* downstream neighbor.\n # Note that multiple grid cells can flow toward a given grid\n # cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc.\n #------------------------------------------------------------- \n if (self.d8f.p1_OK): \n self.vol_flood[ self.d8f.p1 ] += (dt * self.Qf[self.d8f.w1])\n if (self.d8f.p2_OK): \n self.vol_flood[ self.d8f.p2 ] += (dt * self.Qf[self.d8f.w2])\n if (self.d8f.p3_OK): \n self.vol_flood[ self.d8f.p3 ] += (dt * self.Qf[self.d8f.w3])\n if (self.d8f.p4_OK): \n self.vol_flood[ self.d8f.p4 ] += (dt * self.Qf[self.d8f.w4])\n if (self.d8f.p5_OK): \n self.vol_flood[ self.d8f.p5 ] += (dt * self.Qf[self.d8f.w5])\n if (self.d8f.p6_OK): \n self.vol_flood[ self.d8f.p6 ] += (dt * self.Qf[self.d8f.w6])\n if (self.d8f.p7_OK): \n self.vol_flood[ self.d8f.p7 ] += (dt * self.Qf[self.d8f.w7])\n if (self.d8f.p8_OK): \n self.vol_flood[ self.d8f.p8 ] += (dt * self.Qf[self.d8f.w8])\n\n #----------------------------------------------------\n # Subtract the amount that flows out to D8 neighbor\n #----------------------------------------------------\n self.vol_flood -= (self.Qf * dt) # (in place)\n \n #--------------------------------------------------------\n # While R can be positive or negative, the surface flow\n # volume must always be nonnegative. This also ensures\n # that the flow depth is nonnegative. (7/13/06)\n #--------------------------------------------------------\n np.maximum( self.vol_flood, 0.0, self.vol_flood ) # (in place)\n \n # update_flood_volume()\n #-------------------------------------------------------------------\n def update_flow_depth_LAST(self):\n\n #-----------------------------------------------------------\n # Notes: 7/18/05. Modified to use the equation for volume\n # of a trapezoidal channel: vol = Ac * ds, where\n # Ac=d*[w + d*tan(t)], and to solve the resulting\n # quadratic (discarding neg. root) for new depth, d.\n\n # 8/29/05. Now original ds is used for subsurface\n # flow and there is a ds_chan which can include a\n # sinuosity greater than 1. This may be especially\n # important for larger pixel sizes.\n\n # Removed (ds > 1) here which was only meant to\n # avoid a \"divide by zero\" error at pixels where\n # (ds eq 0). This isn't necessary since the\n # Flow_Lengths function in utils_TF.pro never\n # returns a value of zero.\n #----------------------------------------------------------\n # Modified to avoid double where calls, which\n # reduced cProfile run time for this method from\n # 1.391 to 0.644. (9/23/14)\n #----------------------------------------------------------\n # Commented this out on (2/18/10) because it doesn't\n # seem to be used anywhere now. Checked all\n # of the Channels components.\n #---------------------------------------------------------- \n # self.d_last = self.d.copy()\n\n #----------------------------------- \n # Make some local aliases and vars\n #-----------------------------------------------------------\n # Note: angles were read as degrees & converted to radians\n #-----------------------------------------------------------\n d = self.d\n d_flood = self.d_flood ##### (2019-09-16)\n width = self.width ###\n angle = self.angle\n SCALAR_ANGLES = (np.size(angle) == 1)\n \n #------------------------------------------------------\n # (2/18/10) New code to deal with case where the flow\n # depth exceeds a bankfull depth.\n # For now, d_bankfull is hard-coded.\n #\n # CHANGE Manning's n here, too?\n #------------------------------------------------------\n d_bankfull = 4.0 # [meters]\n ################################\n wb = (self.d > d_bankfull) # (array of True or False)\n self.width[ wb ] = self.d8.dw[ wb ]\n if not(SCALAR_ANGLES):\n self.angle[ wb ] = 0.0\n\n# w_overbank = np.where( d > d_bankfull )\n# n_overbank = np.size( w_overbank[0] )\n# if (n_overbank != 0):\n# width[ w_overbank ] = self.d8.dw[ w_overbank ]\n# if not(SCALAR_ANGLES): angle[w_overbank] = 0.0\n\n #------------------------------------------------------\n # (2/18/10) New code to deal with case where the top\n # width exceeds the grid cell width, dw.\n #------------------------------------------------------ \n top_width = width + (2.0 * d * np.sin(self.angle))\n wb = (top_width > self.d8.dw) # (array of True or False)\n self.width[ wb ] = self.d8.dw[ wb ]\n if not(SCALAR_ANGLES):\n self.angle[ wb ] = 0.0\n\n# wb = np.where(top_width > self.d8.dw)\n# nb = np.size(w_bad[0])\n# if (nb != 0):\n# width[ wb ] = self.d8.dw[ wb ]\n# if not(SCALAR_ANGLES): angle[ wb ] = 0.0\n \n #----------------------------------\n # Is \"angle\" a scalar or a grid ?\n #----------------------------------\n if (SCALAR_ANGLES):\n if (angle == 0.0): \n d = self.vol / (width * self.d8.ds)\n else:\n denom = 2.0 * np.tan(angle)\n arg = 2.0 * denom * self.vol / self.d8.ds\n arg += width**(2.0)\n d = (np.sqrt(arg) - width) / denom\n else:\n #-----------------------------------------------------\n # Pixels where angle is 0 must be handled separately\n #-----------------------------------------------------\n w1 = ( angle == 0 ) # (arrays of True or False)\n w2 = np.invert( w1 )\n #-----------------------------------\n A_top = width[w1] * self.d8.ds[w1] \n d[w1] = self.vol[w1] / A_top\n #----------------------------------- \n denom = 2.0 * np.tan(angle[w2])\n arg = 2.0 * denom * self.vol[w2] / self.d8.ds[w2]\n arg += width[w2]**(2.0)\n d[w2] = (np.sqrt(arg) - width[w2]) / denom\n \n #-----------------------------------------------------\n # Pixels where angle is 0 must be handled separately\n #-----------------------------------------------------\n# wz = np.where( angle == 0 )\n# nwz = np.size( wz[0] )\n# wzc = np.where( angle != 0 )\n# nwzc = np.size( wzc[0] )\n# \n# if (nwz != 0):\n# A_top = width[wz] * self.d8.ds[wz]\n# ## A_top = self.width[wz] * self.d8.ds_chan[wz] \n# d[wz] = self.vol[wz] / A_top\n# \n# if (nwzc != 0): \n# term1 = 2.0 * np.tan(angle[wzc])\n# arg = 2.0 * term1 * self.vol[wzc] / self.d8.ds[wzc]\n# arg += width[wzc]**(2.0)\n# d[wzc] = (np.sqrt(arg) - width[wzc]) / term1\n\n #------------------------------------------\n # Set depth values on edges to zero since\n # they become spikes (no outflow) 7/15/06\n #------------------------------------------ \n d[ self.d8.noflow_IDs ] = 0.0\n\n #------------------------------------------------\n # 4/19/06. Force flow depth to be positive ?\n #------------------------------------------------\n # This seems to be needed with the non-Richards\n # infiltration routines when starting with zero\n # depth everywhere, since all water infiltrates\n # for some period of time. It also seems to be\n # needed more for short rainfall records to\n # avoid a negative flow depth error.\n #------------------------------------------------\n # 7/13/06. Still needed for Richards method\n #------------------------------------------------\n ## self.d = np.maximum(d, 0.0)\n np.maximum(d, 0.0, self.d) # (2/19/13, in place)\n\n #------------------------------------------------- \n # Find where d <= 0 and save for later (9/23/14)\n #-------------------------------------------------\n self.d_is_pos = (self.d > 0)\n self.d_is_neg = np.invert( self.d_is_pos )\n \n # update_flow_depth_LAST\n #-------------------------------------------------------------------\n def update_flow_depth(self):\n\n #------------------------------------------------------------\n # Notes: 2019-09/16. This function replaces the one above\n # now called \"update_flow_depth_LAST(). This version\n # allows overbank flow and flooding.\n #------------------------------------------------------------ \n # Notes: 7/18/05. Modified to use the equation for volume\n # of a trapezoidal channel: vol = Ac * ds, where\n # Ac=d*[w + d*tan(t)], and to solve the resulting\n # quadratic (discarding neg. root) for new depth, d.\n\n # 8/29/05. Now original ds is used for subsurface\n # flow and there is a ds_chan which can include a\n # sinuosity greater than 1. This may be especially\n # important for larger pixel sizes.\n\n # Removed (ds > 1) here which was only meant to\n # avoid a \"divide by zero\" error at pixels where\n # (ds eq 0). This isn't necessary since the\n # Flow_Lengths function in utils_TF.pro never\n # returns a value of zero.\n #----------------------------------------------------------\n # Modified to avoid double where calls, which\n # reduced cProfile run time for this method from\n # 1.391 to 0.644. (9/23/14)\n #----------------------------------------------------------\n # Commented this out on (2/18/10) because it doesn't\n # seem to be used anywhere now. Checked all\n # of the Channels components.\n #---------------------------------------------------------- \n # self.d_last = self.d.copy()\n\n #----------------------------------- \n # Make some local aliases and vars\n #-----------------------------------------------------------\n # Note: angles were read as degrees & converted to radians\n #-----------------------------------------------------------\n d = self.d\n width = self.width ###\n angle = self.angle\n SCALAR_ANGLES = (np.size(angle) == 1) \n\n #----------------------------------------------- \n # Now compute the water depth in the channels.\n #-----------------------------------------------\n # Is \"angle\" a scalar or a grid ?\n #----------------------------------\n if (SCALAR_ANGLES):\n if (angle == 0.0): \n d = self.vol / (width * self.d8.ds)\n else:\n denom = 2.0 * np.tan(angle)\n arg = 2.0 * denom * self.vol / self.d8.ds\n arg += width**(2.0)\n d = (np.sqrt(arg) - width) / denom\n \n # For debugging\n# print('angle = ' + str(angle) )\n# print('denom.min() = ' + str(denom.min()) ) \n# print('denom.max() = ' + str(denom.max()) ) \n# print('ds.min() = ' + str(self.d8.ds.min()) ) \n# print('ds.max() = ' + str(self.d8.ds.max()) ) \n# print('arg.min() = ' + str(arg.min()) )\n# print('arg.max() = ' + str(arg.max()) )\n# d = (np.sqrt(arg) - width) / denom\n else:\n #-----------------------------------------------------\n # Pixels where angle is 0 must be handled separately\n #-----------------------------------------------------\n w1 = ( angle == 0 ) # (arrays of True or False)\n w2 = np.invert( w1 )\n #-----------------------------------\n A_top = width[w1] * self.d8.ds[w1] \n d[w1] = self.vol[w1] / A_top\n #----------------------------------- \n denom = 2.0 * np.tan(angle[w2])\n arg = 2.0 * denom * self.vol[w2] / self.d8.ds[w2]\n arg += width[w2]**(2.0)\n d[w2] = (np.sqrt(arg) - width[w2]) / denom\n\n #------------------------------------------------------------\n # Wherever vol > vol_bankfull, the flow depth just computed\n # is wrong and should instead be the bankfull depth.\n # Extra water volume has already been put into d_flood.\n #------------------------------------------------------------\n #### d[ wb1 ] = self.d_bankfull[ wb1 ]\n\n #------------------------------------------\n # Set depth values on edges to zero since\n # they become spikes (no outflow) 7/15/06\n #-----------------------------------------------------------\n # NB! This destroys mass, and will have a small effect on\n # mass balance calculations. Since flooding now uses the\n # free-surface gradient (DEM + d_flood), we should not\n # set it to zero at interior noflow_IDs.\n #----------------------------------------------------------- \n d[ self.d8.noflow_IDs ] = 0.0 # (was needed for Baro)\n ## d[ self.d8.edge_IDs ] = 0.0\n\n #------------------------------------------------\n # 4/19/06. Force flow depth to be positive ?\n #------------------------------------------------\n # This seems to be needed with the non-Richards\n # infiltration routines when starting with zero\n # depth everywhere, since all water infiltrates\n # for some period of time. It also seems to be\n # needed more for short rainfall records to\n # avoid a negative flow depth error.\n #------------------------------------------------\n # 7/13/06. Still needed for Richards method\n #------------------------------------------------\n ## self.d = np.maximum(d, 0.0)\n np.maximum(d, 0.0, self.d) # (2/19/13, in place)\n\n #------------------------------------------------- \n # Find where d <= 0 and save for later (9/23/14)\n #-------------------------------------------------\n self.d_is_pos = (self.d > 0)\n self.d_is_neg = np.invert( self.d_is_pos )\n \n # update_flow_depth\n #-------------------------------------------------------------------\n def update_flood_depth(self):\n\n #-----------------------------------------------------------\n # Wherever vol > vol_bankfull, the flow depth computed by\n # update_flow_depth() is wrong and should instead be the\n # bankfull depth. Extra water volume is put into d_flood.\n #-----------------------------------------------------------\n # Note: This shouldn't be necessary now.\n #-----------------------------------------------------------\n # np.minimum(self.d, self.d_bankfull, self.d ) # (in place)\n\n #----------------------------------------------------------\n # (2019-09-16) Compute the overbank/flooding depth. \n # Channel volume at bankfull is computed in initialize().\n #----------------------------------------------------------\n # Remember that \"width\" is the trapezoid bottom width.\n # In addition, w_bankfull is not used here, but:\n # w_bankfull = width + (2 * d_bankfull * tan(angle))\n # width = w_bankfull - (2 * d_bankfull * tan(angle))\n # If we know any 3 of these 4 vars, we can compute the\n # 4th one. So assume d_bankfull, angle & width are known.\n # HOWEVER, values of w_bankfull found by remote sensing\n # may be more accurate than values of d_bankfull.\n #----------------------------------------------------------\n SCALAR_DA = (np.size(self.d8.da) == 1) \n d_flood = self.d_flood\n vol_flood = self.vol_flood ###################\n\n w1 = (vol_flood > 0) # (array of True or False)\n w2 = np.invert( w1 )\n if (SCALAR_DA):\n d_flood[ w1 ] = vol_flood[ w1 ] / self.d8.da\n else:\n d_flood[ w1 ] = vol_flood[ w1 ] / self.d8.da[ w1 ]\n d_flood[ w2 ] = 0.0\n\n #-------------------------------------------\n # Set depth values on edges to zero since\n # otherwise they become spikes (no outflow)\n #-----------------------------------------------------------\n # NB! This destroys mass, and will have a small effect on\n # mass balance calculations. Since flooding uses the\n # free-surface gradient (DEM + d_flood), we should not\n # set it to zero at interior noflow_IDs.\n #-----------------------------------------------------------\n d_flood[ self.d8.noflow_IDs ] = 0.0 \n ## d_flood[ self.d8.edge_IDs ] = 0.0 \n self.d_flood[:] = d_flood # write in place \n\n # update_flood_depth()\n #-------------------------------------------------------------------\n def update_free_surface_slope(self):\n\n #-----------------------------------------------------------\n # Notes: It is assumed that the flow directions don't\n # change even though the free surface is changing.\n #-----------------------------------------------------------\n # NB! This only applies to water in the channels, and\n # cannot be used when there is overbank flow.\n # See \"z_free\" above instead.\n #-----------------------------------------------------------\n delta_d = (self.d - self.d[self.d8.parent_IDs])\n self.S_free[:] = self.S_bed + (delta_d / self.d8.ds)\n \n #--------------------------------------------\n # Don't do this; negative slopes are needed\n # to decelerate flow in dynamic wave case\n # and for backwater effects.\n #--------------------------------------------\n # Set negative slopes to zero\n #------------------------------\n ### self.S_free = np.maximum(self.S_free, 0)\n\n # update_free_surface_slope()\n #-------------------------------------------------------------------\n def update_shear_stress(self):\n\n #--------------------------------------------------------\n # Notes: 9/9/14. Added so shear stress could be shared.\n # This uses the depth-slope product.\n #--------------------------------------------------------\n if (self.KINEMATIC_WAVE):\n slope = self.S_bed\n else:\n slope = self.S_free\n self.tau[:] = self.rho_H2O * self.g * self.d * slope\n \n # update_shear_stress()\n #-------------------------------------------------------------------\n def update_shear_speed(self):\n\n #--------------------------------------------------------\n # Notes: 9/9/14. Added so shear speed could be shared.\n #--------------------------------------------------------\n self.u_star[:] = np.sqrt( self.tau / self.rho_H2O )\n \n # update_shear_speed()\n #-------------------------------------------------------------------\n def update_trapezoid_Rh(self):\n\n #-------------------------------------------------------------\n # Notes: Compute the hydraulic radius of a trapezoid that:\n # (1) has a bed width of wb >= 0 (0 for triangular)\n # (2) has a bank angle of theta (0 for rectangular)\n # (3) is filled with water to a depth of d.\n # The units of wb and d are meters. The units of\n # theta are assumed to be degrees and are converted.\n #-------------------------------------------------------------\n # NB! wb should never be zero, so P_wet can never be 0,\n # which would produce a NaN (divide by zero).\n #-------------------------------------------------------------\n # See Notes for TF_Tan function in utils_TF.pro\n # AW = d * (wb + (d * TF_Tan(theta_rad)) )\n #-------------------------------------------------------------\n # 9/9/14. Bug fix. Angles were already in radians but\n # were converted to radians again.\n #--------------------------------------------------------------\n\n #---------------------------------------------------------\n # Compute hydraulic radius grid for trapezoidal channels\n #-----------------------------------------------------------\n # Note: angles were read as degrees & converted to radians\n #-----------------------------------------------------------\n d = self.d # (local synonyms)\n wb = self.width # (trapezoid bottom width)\n L2 = d * np.tan( self.angle ) \n A_wet = d * (wb + L2) \n P_wet = wb + (np.float64(2) * d / np.cos(self.angle) )\n\n #---------------------------------------------------\n # At noflow_IDs (e.g. edges) P_wet may be zero\n # so do this to avoid \"divide by zero\". (10/29/11)\n #---------------------------------------------------\n P_wet[ self.d8.noflow_IDs ] = np.float64(1)\n Rh = (A_wet / P_wet)\n #--------------------------------\n # w = np.where(P_wet == 0)\n # print 'In update_trapezoid_Rh():'\n # print ' P_wet= 0 at', w[0].size, 'cells'\n\n #---------------------------------------------------\n # Override Rh for overland flow, where d_flood > 0\n # (2019-09-18)\n #---------------------------------------------------\n# w1 = (self.d_flood > 0) # (array of True or False)\n# Rh[ w1 ] = self.d_flood[ w1 ] #########################################\n\n #------------------------------------\n # Force edge pixels to have Rh = 0.\n # This will make u = 0 there also.\n #------------------------------------\n Rh[ self.d8.noflow_IDs ] = np.float64(0) \n## w = np.where(wb <= 0)\n## nw = np.size(w[0])\n## if (nw > 0): Rh[w] = np.float64(0)\n \n self.Rh[:] = Rh\n self.A_wet[:] = A_wet ## (Now shared: 9/9/14)\n self.P_wet[:] = P_wet ## (Now shared: 9/9/14)\n\n #---------------\n # For testing\n #--------------\n## print 'dmin, dmax =', d.min(), d.max()\n## print 'wmin, wmax =', wb.min(), wb.max()\n## print 'amin, amax =', self.angle.min(), self.angle.max()\n\n # update_trapezoid_Rh()\n #-------------------------------------------------------------------\n def update_friction_factor(self): \n\n #---------------------------------------- \n # Note: Added on 9/9/14 to streamline.\n #----------------------------------------------------------\n # Note: f = half of the Fanning friction factor\n # d = flow depth [m]\n # z0 = roughness length\n # S = bed slope (assumed equal to friction slope)\n # g = 9.81 = gravitation constant [m/s^2]\n #--------------------------------------------------------- \n # For law of the wall:\n # kappa = 0.41 = von Karman's constant\n # aval = 0.48 = integration constant\n\n # law_const = sqrt(g)/kappa = 7.6393d\n # smoothness = (aval / z0) * d\n # f = (kappa / alog(smoothness))^2d\n # tau_bed = rho_w * f * u^2 = rho_w * g * d * S\n\n # d, S, and z0 can be arrays.\n\n # To make default z0 correspond to default\n # Manning's n, can use this approximation:\n # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d\n # For n=0.03, this gives: z0 = 0.011417\n #########################################################\n # However, for n=0.3, it gives: z0 = 11417.413\n # which is 11.4 km! So the approximation only\n # holds within some range of values.\n #--------------------------------------------------------\n\n ###############################################################\n # cProfile: This method took: 0.369 secs for topoflow_test()\n ############################################################### \n #--------------------------------------\n # Find where (d <= 0). g=good, b=bad\n #-------------------------------------- \n wg = self.d_is_pos\n wb = self.d_is_neg\n# wg = ( self.d > 0 )\n# wb = np.invert( wg )\n \n #-----------------------------\n # Compute f for Manning case\n #-----------------------------------------\n # This makes f=0 and du=0 where (d <= 0)\n #-----------------------------------------\n if (self.MANNING):\n n2 = self.nval ** np.float64(2) \n self.f[ wg ] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))\n self.f[ wb ] = np.float64(0)\n \n #---------------------------------\n # Compute f for Law of Wall case\n #---------------------------------\n if (self.LAW_OF_WALL):\n #------------------------------------------------\n # Make sure (smoothness > 1) before taking log.\n # Should issue a warning if this is used.\n #------------------------------------------------\n smoothness = (self.aval / self.z0val) * self.d\n np.maximum(smoothness, np.float64(1.1), smoothness) # (in place)\n self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)\n self.f[wb] = np.float64(0)\n\n ##############################################################\n # cProfile: This method took: 0.93 secs for topoflow_test()\n ############################################################## \n# #--------------------------------------\n# # Find where (d <= 0). g=good, b=bad\n# #-------------------------------------- \n# wg = np.where( self.d > 0 )\n# ng = np.size( wg[0])\n# wb = np.where( self.d <= 0 )\n# nb = np.size( wb[0] )\n# \n# #-----------------------------\n# # Compute f for Manning case\n# #-----------------------------------------\n# # This makes f=0 and du=0 where (d <= 0)\n# #-----------------------------------------\n# if (self.MANNING):\n# n2 = self.nval ** np.float64(2) \n# if (ng != 0):\n# self.f[wg] = self.g * (n2[wg] / (self.d[wg] ** self.one_third))\n# if (nb != 0):\n# self.f[wb] = np.float64(0)\n# \n# #---------------------------------\n# # Compute f for Law of Wall case\n# #---------------------------------\n# if (self.LAW_OF_WALL):\n# #------------------------------------------------\n# # Make sure (smoothness > 1) before taking log.\n# # Should issue a warning if this is used.\n# #------------------------------------------------\n# smoothness = (self.aval / self.z0val) * self.d\n# np.maximum(smoothness, np.float64(1.1), smoothness) # (in place)\n# ## smoothness = np.maximum(smoothness, np.float64(1.1))\n# if (ng != 0):\n# self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2)\n# if (nb != 0):\n# self.f[wb] = np.float64(0) \n\n #---------------------------------------------\n # We could share the Fanning friction factor\n #---------------------------------------------\n ### self.fanning = (np.float64(2) * self.f)\n\n # update_friction_factor() \n #-------------------------------------------------------------------\n def update_velocity(self):\n\n #---------------------------------------------------------\n # Note: Do nothing now unless this method is overridden\n # by a particular method of computing velocity.\n #---------------------------------------------------------\n print(\"Warning: update_velocity() method is inactive.\")\n \n # print 'KINEMATIC WAVE =', self.KINEMATIC_WAVE\n # print 'DIFFUSIVE WAVE =', self.DIFFUSIVE_WAVE\n # print 'DYNAMIC WAVE =', self.DYNAMIC_WAVE\n\n # update_velocity()\n #-------------------------------------------------------------------\n def update_velocity_on_edges(self):\n\n #---------------------------------\n # Force edge pixels to have u=0.\n #----------------------------------------\n # Large slope around 1 flows into small\n # slope & leads to a negative velocity.\n #------------------------------------------------------\n # Whenever flow direction is undefined (i.e. noflow),\n # the velocity should be zero. Not just on edges.\n #------------------------------------------------------\n self.u[ self.d8.noflow_IDs ] = np.float64(0)\n ### self.u[ self.d8.edge_IDs ] = np.float64(0)\n \n # update_velocity_on_edges()\n #-------------------------------------------------------------------\n def update_froude_number(self):\n\n #----------------------------------------------------------\n # Notes: 9/9/14. Added so Froude number could be shared.\n # This use of wg & wb reduced cProfile time from:\n # 0.644 sec to: 0.121. (9/23/14)\n #----------------------------------------------------------\n # g = good, b = bad\n #-------------------- \n wg = self.d_is_pos\n wb = self.d_is_neg\n\n self.froude[ wg ] = self.u[wg] / np.sqrt( self.g * self.d[wg] ) \n self.froude[ wb ] = np.float64(0)\n \n # update_froude_number()\n #-------------------------------------------------------------\n def update_outlet_values(self):\n \n #-------------------------------------------------\n # Save computed values at outlet, which are used\n # by the TopoFlow driver.\n #-----------------------------------------------------\n # Note that Q_outlet, etc. are defined as 0D numpy\n # arrays to make them \"mutable scalars\" (i.e.\n # this allows changes to be seen by other components\n # who have a reference. To preserver the reference,\n # however, we must use fill() to assign a new value.\n #-----------------------------------------------------\n Q_outlet = self.Q[ self.outlet_ID ]\n u_outlet = self.u[ self.outlet_ID ]\n d_outlet = self.d[ self.outlet_ID ]\n f_outlet = self.f[ self.outlet_ID ]\n \n self.Q_outlet.fill( Q_outlet )\n self.u_outlet.fill( u_outlet )\n self.d_outlet.fill( d_outlet )\n self.f_outlet.fill( f_outlet )\n \n## self.Q_outlet.fill( self.Q[ self.outlet_ID ] )\n## self.u_outlet.fill( self.u[ self.outlet_ID ] )\n## self.d_outlet.fill( self.d[ self.outlet_ID ] )\n## self.f_outlet.fill( self.f[ self.outlet_ID ] )\n \n## self.Q_outlet = self.Q[ self.outlet_ID ]\n## self.u_outlet = self.u[ self.outlet_ID ]\n## self.d_outlet = self.d[ self.outlet_ID ]\n## self.f_outlet = self.f[ self.outlet_ID ]\n \n## self.Q_outlet = self.Q.flat[self.outlet_ID]\n## self.u_outlet = self.u.flat[self.outlet_ID]\n## self.d_outlet = self.d.flat[self.outlet_ID]\n## self.f_outlet = self.f.flat[self.outlet_ID]\n \n # update_outlet_values()\n #-------------------------------------------------------------\n def update_peak_values(self):\n\n #-------------------------------------------\n # Using \"fill\" saves new values \"in-place\"\n # and preserves \"mutable scalars\".\n #-------------------------------------------\n if (self.Q_outlet > self.Q_peak): \n self.Q_peak.fill( self.Q_outlet )\n self.T_peak.fill( self.time_min ) # (time to peak)\n #---------------------------------------\n if (self.u_outlet > self.u_peak):\n self.u_peak.fill( self.u_outlet )\n self.Tu_peak.fill( self.time_min )\n #---------------------------------------\n if (self.d_outlet > self.d_peak): \n self.d_peak.fill( self.d_outlet )\n self.Td_peak.fill( self.time_min )\n \n## if (self.Q_outlet > self.Q_peak): \n## self.Q_peak = self.Q_outlet\n## self.T_peak = self.time_min # (time to peak)\n## #-----------------------------------\n## if (self.u_outlet > self.u_peak):\n## self.u_peak = self.u_outlet\n## self.Tu_peak = self.time_min\n## #-----------------------------------\n## if (self.d_outlet > self.d_peak): \n## self.d_peak = self.d_outlet\n## self.Td_peak = self.time_min\n\n # update_peak_values()\n #-------------------------------------------------------------\n def update_Q_out_integral(self):\n\n #--------------------------------------------------------\n # Note: Renamed \"volume_out\" to \"vol_Q\" for consistency\n # with vol_P, vol_SM, vol_IN, vol_ET, etc. (5/18/12)\n #--------------------------------------------------------\n self.vol_Q += (self.Q_outlet * self.dt) ## 5/19/12.\n ## self.vol_Q += (self.Q[self.outlet_ID] * self.dt)\n\n # update_Q_out_integral()\n #-------------------------------------------------------------\n def update_mins_and_maxes(self, REPORT=False):\n\n #-------------------------------------------------------\n # Note: Only call this at the end, not from update().\n #-------------------------------------------------------\n \n #--------------------------------------\n # Get mins and max over entire domain\n #--------------------------------------\n## Q_min = self.Q.min()\n## Q_max = self.Q.max()\n## #---------------------\n## u_min = self.u.min()\n## u_max = self.u.max() \n## #---------------------\n## d_min = self.d.min()\n## d_max = self.d.max()\n \n #--------------------------------------------\n # Exclude edges where mins are always zero.\n #--------------------------------------------\n nx = self.nx\n ny = self.ny\n Q_min = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].min()\n Q_max = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].max()\n #-------------------------------------------------\n u_min = self.u[1:(ny - 2)+1,1:(nx - 2)+1].min()\n u_max = self.u[1:(ny - 2)+1,1:(nx - 2)+1].max() \n #-------------------------------------------------\n d_min = self.d[1:(ny - 2)+1,1:(nx - 2)+1].min()\n d_max = self.d[1:(ny - 2)+1,1:(nx - 2)+1].max()\n\n #-------------------------------------------------\n # (2/6/13) This preserves \"mutable scalars\" that\n # can be accessed as refs by other components.\n #-------------------------------------------------\n if (Q_min < self.Q_min):\n self.Q_min.fill( Q_min )\n if (Q_max > self.Q_max):\n self.Q_max.fill( Q_max )\n #------------------------------\n if (u_min < self.u_min):\n self.u_min.fill( u_min )\n if (u_max > self.u_max):\n self.u_max.fill( u_max )\n #------------------------------\n if (d_min < self.d_min):\n self.d_min.fill( d_min )\n if (d_max > self.d_max):\n self.d_max.fill( d_max )\n \n #-------------------------------------------------\n # (2/6/13) This preserves \"mutable scalars\" that\n # can be accessed as refs by other components.\n #------------------------------------------------- \n## self.Q_min.fill( np.minimum( self.Q_min, Q_min ) )\n## self.Q_max.fill( np.maximum( self.Q_max, Q_max ) )\n## #---------------------------------------------------\n## self.u_min.fill( np.minimum( self.u_min, u_min ) )\n## self.u_max.fill( np.maximum( self.u_max, u_max ) )\n## #---------------------------------------------------\n## self.d_min.fill( np.minimum( self.d_min, d_min ) )\n## self.d_max.fill( np.maximum( self.d_max, d_max ) )\n\n #-------------------------------------------------\n # (2/6/13) This preserves \"mutable scalars\" that\n # can be accessed as refs by other components.\n #------------------------------------------------- \n## self.Q_min.fill( min( self.Q_min, Q_min ) )\n## self.Q_max.fill( max( self.Q_max, Q_max ) )\n## #---------------------------------------------------\n## self.u_min.fill( min( self.u_min, u_min ) )\n## self.u_max.fill( max( self.u_max, u_max ) )\n## #---------------------------------------------------\n## self.d_min.fill( min( self.d_min, d_min ) )\n## self.d_max.fill( max( self.d_max, d_max ) )\n \n #----------------------------------------------\n # (2/6/13) This produces \"immutable scalars\".\n #----------------------------------------------\n## self.Q_min = self.Q.min()\n## self.Q_max = self.Q.max()\n## self.u_min = self.u.min()\n## self.u_max = self.u.max()\n## self.d_min = self.d.min()\n## self.d_max = self.d.max()\n\n if (REPORT):\n print('In channels_base.update_mins_and_maxes():')\n print('(dmin, dmax) =', self.d_min, self.d_max)\n print('(umin, umax) =', self.u_min, self.u_max)\n print('(Qmin, Qmax) =', self.Q_min, self.Q_max)\n print(' ')\n \n # update_mins_and_maxes()\n #-------------------------------------------------------------\n def update_total_channel_water_volume(self, REPORT=False):\n\n #---------------------------------------------------- \n # Note: Compute the total volume of water in all\n # channels for the entire DEM. Can use this\n # in the final mass balance reporting.\n # (2019-09-17)\n #----------------------------------------------------\n # Note: This should be called from finalize().\n #----------------------------------------------------\n vol = self.vol\n vol[ self.d8.noflow_IDs ] = 0.0\n ## vol[ self.d8.edge_IDs ] = 0.0 \n vol_chan = np.sum( vol )\n self.vol_chan.fill( vol_chan )\n\n #-------------------------------------\n # Exclude values on edges of the DEM?\n #-------------------------------------\n# nx = self.nx\n# ny = self.ny\n# vol = self.vol[1:(ny - 2)+1,1:(nx - 2)+1].min()\n\n # update_total_channel_water_volume()\n #-------------------------------------------------------------\n def update_total_land_water_volume(self, REPORT=False):\n\n #---------------------------------------------------- \n # Note: Compute the total volume of land water in\n # all grid cells for the entire DEM. Use\n # this in the final mass balance reporting.\n # (2019-09-17)\n #----------------------------------------------------\n\n #-------------------------------------\n # Exclude values on edges of the DEM?\n #-------------------------------------\n# nx = self.nx\n# ny = self.ny\n# d_flood = self.d_flood[1:(ny - 2)+1,1:(nx - 2)+1].min()\n\n d_flood = self.d_flood\n vol_land = np.sum( d_flood * self.da )\n self.vol_land.fill( vol_land ) \n \n # update_total_land_water_volume()\n #-------------------------------------------------------------------\n def check_flow_depth_LAST(self):\n\n OK = True\n d = self.d\n dt = self.dt\n nx = self.nx #################\n \n #---------------------------------\n # All all flow depths positive ?\n #--------------------------------- \n wbad = np.where( np.logical_or( d < 0.0, np.logical_not(np.isfinite(d)) ))\n nbad = np.size( wbad[0] ) \n if (nbad == 0): \n return OK\n\n OK = False\n dmin = d[wbad].min()\n star_line = '*******************************************'\n \n msg = [ star_line, \\\n 'ERROR: Simulation aborted.', ' ', \\\n 'Negative or NaN depth found: ' + str(dmin), \\\n 'Time step may be too large.', \\\n 'Time step: ' + str(dt) + ' [s]' ]\n\n for k in range(len(msg)):\n print(msg[k])\n \n #-------------------------------------------\n # If not too many, print actual velocities\n #-------------------------------------------\n if (nbad < 30): \n brow = wbad[0][0]\n bcol = wbad[1][0]\n## badi = wbad[0]\n## bcol = (badi % nx)\n## brow = (badi / nx)\n crstr = str(bcol) + ', ' + str(brow)\n\n msg = [' ', '(Column, Row): ' + crstr, \\\n 'Flow depth: ' + str(d[brow, bcol])]\n for k in range(len(msg)):\n print(msg[k])\n\n print(star_line) \n print(' ')\n raise RuntimeError('Negative depth found.') # (11/16/16)\n\n return OK\n\n # check_flow_depth_LAST()\n #-------------------------------------------------------------------\n def check_flow_depth(self):\n\n OK = True\n d = self.d\n dt = self.dt\n nx = self.nx #################\n \n #---------------------------------\n # Are any flow depths negative ?\n #---------------------------------\n wneg = np.where( d < 0.0 )\n nneg = np.size( wneg[0] )\n #-----------------------------\n # Are any flow depths NaNs ?\n #-----------------------------\n wnan = np.where( np.isnan(d) )\n nnan = np.size( wnan[0] )\n #-----------------------------\n # Are any flow depths Infs ?\n #-----------------------------\n winf = np.where( np.isinf(d) )\n ninf = np.size( winf[0] )\n #----------------------------------\n # Option to allow NaN but not Inf\n #----------------------------------\n if (nneg == 0) and (ninf == 0):\n return OK\n OK = False\n #-------------------------------------------------- \n# if (nneg == 0) and (nnan == 0) and (ninf == 0):\n# return OK\n# OK = False\n\n #----------------------------------\n # Print informative error message\n #----------------------------------\n star_line = '*******************************************'\n print( star_line ) \n print('ERROR: Simulation aborted.')\n print(' ')\n #-------------------------------------------------------- \n if (nneg > 0):\n dmin = d[ wneg ].min()\n str1 = 'Found ' + str(nneg) + ' negative depths.'\n str2 = ' Smallest negative depth = ' + str(dmin)\n print( str1 )\n print( str2 )\n #--------------------------------------------------------\n if (nnan > 0):\n str3 = 'Found ' + str(nnan) + ' NaN depths.'\n print( str3 )\n #--------------------------------------------------------\n if (ninf > 0):\n str4 = 'Found ' + str(ninf) + ' infinite depths.'\n print( str4 )\n #------------------------------------\n # Option to allow NaNs on the edges\n #------------------------------------\n print( 'Time step may be too large for stability.' )\n print( 'Time step: ' + str(dt) + ' [s]' )\n print( 'Try reducing timestep in channels CFG file.' )\n print( star_line )\n print( ' ' )\n \n #-------------------------------------------\n # If not too many, print actual depths\n #-------------------------------------------\n# if (nbad < 30): \n# brow = wbad[0][0]\n# bcol = wbad[1][0]\n# ## badi = wbad[0]\n# ## bcol = (badi % nx)\n# ## brow = (badi / nx)\n# crstr = str(bcol) + ', ' + str(brow)\n# \n# msg = [' ', '(Column, Row): ' + crstr, \\\n# 'Flow depth: ' + str(d[brow, bcol])]\n# for k in range(len(msg)):\n# print(msg[k])\n# print(star_line) \n# print(' ')\n\n raise RuntimeError('Negative or NaN depth found.') # (11/16/16)\n\n return OK\n\n # check_flow_depth()\n #-------------------------------------------------------------------\n def check_flow_velocity_LAST(self):\n\n OK = True\n u = self.u\n dt = self.dt\n nx = self.nx\n \n #--------------------------------\n # Are all velocities positive ?\n #--------------------------------\n wbad = np.where( np.logical_or( u < 0.0, np.logical_not(np.isfinite(u)) ))\n nbad = np.size( wbad[0] )\n if (nbad == 0): \n return OK\n\n OK = False\n umin = u[wbad].min()\n star_line = '*******************************************'\n msg = [ star_line, \\\n 'ERROR: Simulation aborted.', ' ', \\\n 'Negative or NaN velocity found: ' + str(umin), \\\n 'Time step may be too large.', \\\n 'Time step: ' + str(dt) + ' [s]']\n for k in range(len(msg)):\n print(msg[k])\n\n #-------------------------------------------\n # If not too many, print actual velocities\n #-------------------------------------------\n if (nbad < 30):\n brow = wbad[0][0]\n bcol = wbad[1][0]\n## badi = wbad[0]\n## bcol = (badi % nx)\n## brow = (badi / nx)\n crstr = str(bcol) + ', ' + str(brow)\n\n msg = [' ', '(Column, Row): ' + crstr, \\\n 'Velocity: ' + str(u[brow, bcol])]\n for k in range(len(msg)):\n print(msg[k])\n\n print(star_line)\n print(' ')\n raise RuntimeError('Negative or NaN velocity found.') # (11/16/16)\n\n return OK\n\n \n## umin = u[wbad].min()\n## badi = wbad[0]\n## bcol = (badi % nx)\n## brow = (badi / nx)\n## crstr = str(bcol) + ', ' + str(brow)\n## msg = np.array([' ', \\\n## '*******************************************', \\\n## 'ERROR: Simulation aborted.', ' ', \\\n## 'Negative velocity found: ' + str(umin), \\\n## 'Time step may be too large.', ' ', \\\n## '(Column, Row): ' + crstr, \\\n## 'Velocity: ' + str(u[badi]), \\\n## 'Time step: ' + str(dt) + ' [s]', \\\n## '*******************************************', ' '])\n## for k in xrange( np.size(msg) ):\n## print msg[k]\n\n## return OK \n\n\n # check_flow_velocity_LAST()\n #-------------------------------------------------------------------\n def check_flow_velocity(self):\n\n OK = True\n u = self.u\n dt = self.dt\n nx = self.nx\n \n #---------------------------------\n # Are any flow depths negative ?\n #---------------------------------\n wneg = np.where( u < 0.0 )\n nneg = np.size( wneg[0] )\n #-----------------------------\n # Are any flow depths NaNs ?\n #-----------------------------\n wnan = np.where( np.isnan(u) )\n nnan = np.size( wnan[0] )\n #-----------------------------\n # Are any flow depths Infs ?\n #-----------------------------\n winf = np.where( np.isinf(u) )\n ninf = np.size( winf[0] )\n #----------------------------------\n # Option to allow NaN but not Inf\n #----------------------------------\n if (nneg == 0) and (ninf == 0):\n return OK\n OK = False\n #-------------------------------------------------- \n# if (nneg == 0) and (nnan == 0) and (ninf == 0):\n# return OK\n# OK = False\n\n #----------------------------------\n # Print informative error message\n #----------------------------------\n star_line = '*******************************************'\n print( star_line ) \n print('ERROR: Simulation aborted.')\n print(' ')\n #-------------------------------------------------------- \n if (nneg > 0):\n umin = u[ wneg ].min()\n str1 = 'Found ' + str(nneg) + ' negative velocities.'\n str2 = ' Smallest negative velocity = ' + str(umin)\n print( str1 )\n print( str2 )\n #--------------------------------------------------------\n if (nnan > 0):\n str3 = 'Found ' + str(nnan) + ' NaN velocities.'\n print( str3 )\n #--------------------------------------------------------\n if (ninf > 0):\n str4 = 'Found ' + str(ninf) + ' infinite velocities.'\n print( str4 )\n #------------------------------------\n # Option to allow NaNs on the edges\n #------------------------------------\n print( 'Time step may be too large for stability.' )\n print( 'Time step: ' + str(dt) + ' [s]' )\n print( 'Try reducing timestep in channels CFG file.' )\n print( star_line )\n print( ' ' )\n \n raise RuntimeError('Negative or NaN velocity found.') # (11/16/16)\n\n return OK\n\n\n## umin = u[wbad].min()\n## badi = wbad[0]\n## bcol = (badi % nx)\n## brow = (badi / nx)\n## crstr = str(bcol) + ', ' + str(brow)\n## msg = np.array([' ', \\\n## '*******************************************', \\\n## 'ERROR: Simulation aborted.', ' ', \\\n## 'Negative velocity found: ' + str(umin), \\\n## 'Time step may be too large.', ' ', \\\n## '(Column, Row): ' + crstr, \\\n## 'Velocity: ' + str(u[badi]), \\\n## 'Time step: ' + str(dt) + ' [s]', \\\n## '*******************************************', ' '])\n## for k in xrange( np.size(msg) ):\n## print msg[k]\n\n## return OK \n\n\n # check_flow_velocity()\n #------------------------------------------------------------------- \n def open_input_files(self):\n\n #------------------------------------------------------\n # This method uses prepend_directory() in BMI_base.py\n # which uses both eval and exec.\n #------------------------------------------------------\n# in_files = ['slope_file', 'nval_file', 'z0val_file',\n# 'width_file', 'angle_file', 'sinu_file',\n# 'd0_file', 'd_bankfull_file' ]\n# self.prepend_directory( in_files, INPUT=True )\n\n #------------------------------------------------------\n # This avoids eval/exec, but is brute-force\n # 2020-05-03. Changed in_directory to topo_directory.\n # See set_directories() in BMI_base.py.\n #------------------------------------------------------\n self.slope_file = (self.topo_directory + self.slope_file)\n self.nval_file = (self.topo_directory + self.nval_file)\n self.z0val_file = (self.topo_directory + self.z0val_file) \n self.width_file = (self.topo_directory + self.width_file)\n self.angle_file = (self.topo_directory + self.angle_file)\n self.sinu_file = (self.topo_directory + self.sinu_file) \n self.d0_file = (self.topo_directory + self.d0_file)\n self.d_bankfull_file = (self.topo_directory + self.d_bankfull_file) \n\n #---------------------------------------------- \n # Open all input files and store file objects\n #---------------------------------------------- \n #self.code_unit = model_input.open_file(self.code_type, self.code_file)\n self.slope_unit = model_input.open_file(self.slope_type, self.slope_file)\n if (self.MANNING):\n self.nval_unit = model_input.open_file(self.nval_type, self.nval_file)\n if (self.LAW_OF_WALL):\n self.z0val_unit = model_input.open_file(self.z0val_type, self.z0val_file)\n self.width_unit = model_input.open_file(self.width_type, self.width_file)\n self.angle_unit = model_input.open_file(self.angle_type, self.angle_file)\n self.sinu_unit = model_input.open_file(self.sinu_type, self.sinu_file)\n self.d0_unit = model_input.open_file(self.d0_type, self.d0_file)\n self.d_bankfull_unit = model_input.open_file(self.d_bankfull_type, self.d_bankfull_file)\n\n # open_input_files()\n #------------------------------------------------------------------- \n def read_input_files(self):\n\n #------------------------------------------------------- \n # Note: All grids are assumed to have same dimensions\n # as the DEM.\n #-------------------------------------------------------\n rti = self.rti\n \n #-------------------------------------------------------\n # All grids are assumed to have a data type of Float32\n # as stored in their binary grid file.\n #-------------------------------------------------------\n # If EOF is reached, model_input.read_next() does not\n # change the value of the scalar or grid.\n #-------------------------------------------------------\n slope = model_input.read_next(self.slope_unit, self.slope_type, rti)\n if (slope is not None):\n self.update_var( 'slope', slope )\n\n if (self.MANNING):\n nval = model_input.read_next(self.nval_unit, self.nval_type, rti)\n if (nval is not None):\n self.update_var( 'nval', nval )\n\n if (self.LAW_OF_WALL):\n z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti)\n if (z0val is not None):\n self.update_var( 'z0val', z0val )\n \n width = model_input.read_next(self.width_unit, self.width_type, rti)\n if (width is not None):\n #-------------------------------------------------------\n # Width can be zero on 4 edges, but this can result in\n # a \"divide by zero\" error later on, so need to adjust.\n #-------------------------------------------------------\n w1 = ( width == 0 ) # (arrays of True or False)\n width[w1] = self.d8.dw[w1]\n self.update_var( 'width', width )\n\n angle = model_input.read_next(self.angle_unit, self.angle_type, rti)\n if (angle is not None):\n #------------------------------------------------------------\n # Convert bank angles from degrees to radians. For a\n # SCALAR angle, this is done in initialize_computed_vars().\n # To support general case this is done here for angle GRID. \n #------------------------------------------------------------\n angle *= self.deg_to_rad # [radians]\n self.update_var( 'angle', angle )\n\n sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti)\n if (sinu is not None):\n self.update_var( 'sinu', sinu )\n \n d0 = model_input.read_next(self.d0_unit, self.d0_type, rti)\n if (d0 is not None):\n self.update_var( 'd0', d0 )\n\n # (2019-09-16) ##############################\n d_bankfull = model_input.read_next(self.d_bankfull_unit, self.d_bankfull_type, rti)\n if (d_bankfull is not None):\n self.update_var( 'd_bankfull', d_bankfull )\n \n # read_input_files() \n #------------------------------------------------------------------- \n# def read_input_files_last(self):\n# \n# #----------------------------------------------------\n# # The D8 flow codes are always a grid, size of DEM.\n# #----------------------------------------------------\n# # NB! model_input.py also has a read_grid() function.\n# #---------------------------------------------------- \n# rti = self.rti\n# ## print 'Reading D8 flow grid (in CHANNELS)...'\n# ## self.code = rtg_files.read_grid(self.code_file, rti,\n# ## RTG_type='BYTE')\n# ## print ' '\n# \n# #-------------------------------------------------------\n# # All grids are assumed to have a data type of Float32.\n# #-------------------------------------------------------\n# slope = model_input.read_next(self.slope_unit, self.slope_type, rti)\n# if (slope is not None):\n# self.slope = slope\n# ## print ' min(slope) =', slope.min()\n# ## print ' max(slope) =', slope.max()\n# \n# # If EOF was reached, hopefully numpy's \"fromfile\"\n# # returns None, so that the stored value will be\n# # the last value that was read.\n# \n# if (self.MANNING):\n# nval = model_input.read_next(self.nval_unit, self.nval_type, rti)\n# if (nval is not None):\n# # if (self.nval_type.lower() == 'scalar'):\n# # self.update_scalar( 'nval', nval )\n# # else:\n# # self.nval = nval\n# self.nval = nval\n# self.nval_min = nval.min()\n# self.nval_max = nval.max()\n# print ' min(nval) =', self.nval_min\n# print ' max(nval) =', self.nval_max\n# \n# if (self.LAW_OF_WALL):\n# z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti)\n# if (z0val is not None):\n# self.z0val = z0val\n# self.z0val_min = z0val.min()\n# self.z0val_max = z0val.max()\n# print ' min(z0val) =', self.z0val_min\n# print ' max(z0val) =', self.z0val_max\n# \n# width = model_input.read_next(self.width_unit, self.width_type, rti)\n# if (width is not None):\n# #-------------------------------------------------------\n# # Width can be zero on 4 edges, but this can result in\n# # a \"divide by zero\" error later on, so need to adjust.\n# #-------------------------------------------------------\n# w1 = ( width == 0 ) # (arrays of True or False)\n# width[w1] = self.d8.dw[w1]\n# self.width = width\n# print ' min(width) =', width.min()\n# print ' max(width) =', width.max()\n# \n# angle = model_input.read_next(self.angle_unit, self.angle_type, rti)\n# if (angle is not None):\n# print ' min(angle) =', angle.min(), ' [deg]'\n# print ' max(angle) =', angle.max(), ' [deg]'\n# #-----------------------------------------------\n# # Convert bank angles from degrees to radians. \n# #-----------------------------------------------\n# self.angle = angle * self.deg_to_rad # [radians]\n# ### self.angle = angle # (before 9/9/14)\n# \n# sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti)\n# if (sinu is not None):\n# self.sinu = sinu\n# print ' min(sinuosity) =', sinu.min()\n# print ' max(sinuosity) =', sinu.max()\n# \n# d0 = model_input.read_next(self.d0_unit, self.d0_type, rti)\n# if (d0 is not None):\n# self.d0 = d0\n# print ' min(d0) =', d0.min()\n# print ' max(d0) =', d0.max()\n# \n# ## code = model_input.read_grid(self.code_unit, \\\n# ## self.code_type, rti, dtype='UInt8')\n# ## if (code is not None): self.code = code\n# \n# # read_input_files_last() \n #------------------------------------------------------------------- \n def close_input_files(self):\n\n # if not(self.slope_unit.closed):\n # if (self.slope_unit is not None):\n\n #-------------------------------------------------\n # NB! self.code_unit was never defined as read.\n #-------------------------------------------------\n # if (self.code_type != 'scalar'): self.code_unit.close()\n\n if (self.slope_type != 'Scalar'): self.slope_unit.close()\n if (self.MANNING):\n if (self.nval_type != 'Scalar'): self.nval_unit.close()\n if (self.LAW_OF_WALL):\n if (self.z0val_type != 'Scalar'): self.z0val_unit.close()\n if (self.width_type != 'Scalar'): self.width_unit.close()\n if (self.angle_type != 'Scalar'): self.angle_unit.close()\n if (self.sinu_type != 'Scalar'): self.sinu_unit.close()\n if (self.d0_type != 'Scalar'): self.d0_unit.close()\n if (self.d_bankfull_type != 'Scalar'): self.d_bankfull_unit.close()\n \n## if (self.slope_file != ''): self.slope_unit.close()\n## if (self.MANNING):\n## if (self.nval_file != ''): self.nval_unit.close()\n## if (self.LAW_OF_WALL):\n## if (self.z0val_file != ''): self.z0val_unit.close()\n## if (self.width_file != ''): self.width_unit.close()\n## if (self.angle_file != ''): self.angle_unit.close()\n## if (self.sinu_file != ''): self.sinu_unit.close()\n## if (self.d0_file != ''): self.d0_unit.close()\n\n # close_input_files() \n #------------------------------------------------------------------- \n def update_outfile_names(self):\n\n #-------------------------------------------------\n # Notes: Append out_directory to outfile names.\n #-------------------------------------------------\n self.Q_gs_file = (self.out_directory + self.Q_gs_file)\n self.u_gs_file = (self.out_directory + self.u_gs_file)\n self.d_gs_file = (self.out_directory + self.d_gs_file) \n self.f_gs_file = (self.out_directory + self.f_gs_file)\n self.d_flood_gs_file = (self.out_directory + self.d_flood_gs_file) \n #--------------------------------------------------------\n self.Q_ts_file = (self.out_directory + self.Q_ts_file)\n self.u_ts_file = (self.out_directory + self.u_ts_file) \n self.d_ts_file = (self.out_directory + self.d_ts_file) \n self.f_ts_file = (self.out_directory + self.f_ts_file) \n self.d_flood_ts_file = (self.out_directory + self.d_flood_ts_file) \n \n # update_outfile_names()\n #------------------------------------------------------------------- \n def bundle_output_files(self): \n\n ###################################################\n # NOT READY YET. Need \"get_long_name()\" and a new\n # version of \"get_var_units\". (9/21/14)\n ###################################################\n \n #------------------------------------------------------------- \n # Bundle the output file info into an array for convenience.\n # Then we just need one open_output_files(), in BMI_base.py,\n # and one close_output_files(). Less to maintain. (9/21/14)\n #------------------------------------------------------------- \n # gs = grid stack, ts = time series, ps = profile series.\n #-------------------------------------------------------------\n self.out_files = [\n {var_name:'Q', \n save_gs:self.SAVE_Q_GRIDS, gs_file:self.Q_gs_file,\n save_ts:self.SAVE_Q_PIXELS, ts_file:self.Q_ts_file, \n long_name:get_long_name('Q'), units_name:get_var_units('Q')}, \n #-----------------------------------------------------------------\n {var_name:'u',\n save_gs:self.SAVE_U_GRIDS, gs_file:self.u_gs_file,\n save_ts:self.SAVE_U_PIXELS, ts_file:self.u_ts_file,\n long_name:get_long_name('u'), units_name:get_var_units('u')},\n #-----------------------------------------------------------------\n {var_name:'d',\n save_gs:self.SAVE_D_GRIDS, gs_file:self.d_gs_file,\n save_ts:self.SAVE_D_PIXELS, ts_file:self.d_ts_file,\n long_name:get_long_name('d'), units_name:get_var_units('d')}, \n #-----------------------------------------------------------------\n {var_name:'f',\n save_gs:self.SAVE_F_GRIDS, gs_file:self.f_gs_file,\n save_ts:self.SAVE_F_PIXELS, ts_file:self.f_ts_file,\n long_name:get_long_name('f'), units_name:get_var_units('f')},\n #-----------------------------------------------------------------\n {var_name:'d_flood',\n save_gs:self.SAVE_DF_GRIDS, gs_file:self.d_flood_gs_file,\n save_ts:self.SAVE_DF_PIXELS, ts_file:self.d_flood_ts_file,\n long_name:get_long_name('d_flood'), units_name:get_var_units('d_flood')} ]\n \n # bundle_output_files\n #------------------------------------------------------------------- \n def disable_all_output(self):\n \n self.SAVE_Q_GRIDS = False\n self.SAVE_U_GRIDS = False\n self.SAVE_D_GRIDS = False\n self.SAVE_F_GRIDS = False\n self.SAVE_DF_GRIDS = False\n #----------------------------\n self.SAVE_Q_PIXELS = False\n self.SAVE_U_PIXELS = False\n self.SAVE_D_PIXELS = False\n self.SAVE_F_PIXELS = False\n self.SAVE_DF_PIXELS = False\n \n # disable_all_output()\n #------------------------------------------------------------------- \n def open_output_files(self):\n\n model_output.check_netcdf()\n self.update_outfile_names()\n ## self.bundle_output_files()\n \n\n## print 'self.SAVE_Q_GRIDS =', self.SAVE_Q_GRIDS\n## print 'self.SAVE_U_GRIDS =', self.SAVE_U_GRIDS\n## print 'self.SAVE_D_GRIDS =', self.SAVE_D_GRIDS\n## print 'self.SAVE_F_GRIDS =', self.SAVE_F_GRIDS\n## #---------------------------------------------------\n## print 'self.SAVE_Q_PIXELS =', self.SAVE_Q_PIXELS\n## print 'self.SAVE_U_PIXELS =', self.SAVE_U_PIXELS\n## print 'self.SAVE_D_PIXELS =', self.SAVE_D_PIXELS\n## print 'self.SAVE_F_PIXELS =', self.SAVE_F_PIXELS\n\n# IDs = self.outlet_IDs\n# for k in xrange( len(self.out_files) ):\n# #--------------------------------------\n# # Open new files to write grid stacks\n# #--------------------------------------\n# if (self.out_files[k].save_gs):\n# model_output.open_new_gs_file( self, self.out_files[k], self.rti )\n# #--------------------------------------\n# # Open new files to write time series\n# #--------------------------------------\n# if (self.out_files[k].save_ts):\n# model_output.open_new_ts_file( self, self.out_files[k], IDs )\n \n #--------------------------------------\n # Open new files to write grid stacks\n #--------------------------------------\n if (self.SAVE_Q_GRIDS): \n model_output.open_new_gs_file( self, self.Q_gs_file, self.rti,\n var_name='Q',\n long_name='volumetric_discharge',\n units_name='m^3/s')\n \n if (self.SAVE_U_GRIDS): \n model_output.open_new_gs_file( self, self.u_gs_file, self.rti,\n var_name='u',\n long_name='mean_channel_flow_velocity',\n units_name='m/s')\n \n if (self.SAVE_D_GRIDS): \n model_output.open_new_gs_file( self, self.d_gs_file, self.rti,\n var_name='d',\n long_name='max_channel_flow_depth',\n units_name='m')\n\n if (self.SAVE_F_GRIDS): \n model_output.open_new_gs_file( self, self.f_gs_file, self.rti,\n var_name='f',\n long_name='friction_factor',\n units_name='none')\n \n if (self.SAVE_DF_GRIDS): \n model_output.open_new_gs_file( self, self.d_flood_gs_file, self.rti,\n var_name='d_flood',\n long_name='land_surface_water__depth',\n units_name='m')\n \n #--------------------------------------\n # Open new files to write time series\n #--------------------------------------\n IDs = self.outlet_IDs\n if (self.SAVE_Q_PIXELS): \n model_output.open_new_ts_file( self, self.Q_ts_file, IDs,\n var_name='Q',\n long_name='volumetric_discharge',\n units_name='m^3/s')\n \n if (self.SAVE_U_PIXELS):\n model_output.open_new_ts_file( self, self.u_ts_file, IDs,\n var_name='u',\n long_name='mean_channel_flow_velocity',\n units_name='m/s')\n \n if (self.SAVE_D_PIXELS): \n model_output.open_new_ts_file( self, self.d_ts_file, IDs,\n var_name='d',\n long_name='max_channel_flow_depth',\n units_name='m')\n \n if (self.SAVE_F_PIXELS): \n model_output.open_new_ts_file( self, self.f_ts_file, IDs,\n var_name='f',\n long_name='friction_factor',\n units_name='none')\n\n if (self.SAVE_DF_PIXELS): \n model_output.open_new_ts_file( self, self.d_flood_ts_file, IDs,\n var_name='d_flood',\n long_name='land_surface_water__depth',\n units_name='m')\n \n # open_output_files()\n #------------------------------------------------------------------- \n def write_output_files(self, time_seconds=None):\n\n #---------------------------------------------------------\n # Notes: This function was written to use only model\n # time (maybe from a caller) in seconds, and\n # the save_grid_dt and save_pixels_dt parameters\n # read by read_cfg_file().\n #\n # read_cfg_file() makes sure that all of\n # the \"save_dts\" are larger than or equal to the\n # process dt.\n #---------------------------------------------------------\n \n #-----------------------------------------\n # Allows time to be passed from a caller\n #-----------------------------------------\n if (time_seconds is None):\n time_seconds = self.time_sec\n model_time = int(time_seconds)\n \n #----------------------------------------\n # Save computed values at sampled times\n #----------------------------------------\n if (model_time % int(self.save_grid_dt) == 0):\n self.save_grids()\n if (model_time % int(self.save_pixels_dt) == 0):\n self.save_pixel_values()\n\n #----------------------------------------\n # Save computed values at sampled times\n #----------------------------------------\n## if ((self.time_index % self.grid_save_step) == 0):\n## self.save_grids()\n## if ((self.time_index % self.pixel_save_step) == 0):\n## self.save_pixel_values()\n \n # write_output_files()\n #------------------------------------------------------------------- \n def close_output_files(self):\n\n if (self.SAVE_Q_GRIDS): model_output.close_gs_file( self, 'Q') \n if (self.SAVE_U_GRIDS): model_output.close_gs_file( self, 'u') \n if (self.SAVE_D_GRIDS): model_output.close_gs_file( self, 'd') \n if (self.SAVE_F_GRIDS): model_output.close_gs_file( self, 'f')\n if (self.SAVE_DF_GRIDS): model_output.close_gs_file( self, 'd_flood')\n #---------------------------------------------------------------\n if (self.SAVE_Q_PIXELS): model_output.close_ts_file( self, 'Q') \n if (self.SAVE_U_PIXELS): model_output.close_ts_file( self, 'u') \n if (self.SAVE_D_PIXELS): model_output.close_ts_file( self, 'd') \n if (self.SAVE_F_PIXELS): model_output.close_ts_file( self, 'f')\n if (self.SAVE_DF_PIXELS): model_output.close_ts_file( self, 'd_flood')\n \n # close_output_files() \n #------------------------------------------------------------------- \n def save_grids(self):\n \n #-----------------------------------\n # Save grid stack to a netCDF file\n #---------------------------------------------\n # Note that add_grid() methods will convert\n # var from scalar to grid now, if necessary.\n #--------------------------------------------- \n if (self.SAVE_Q_GRIDS):\n model_output.add_grid( self, self.Q, 'Q', self.time_min )\n \n if (self.SAVE_U_GRIDS):\n model_output.add_grid( self, self.u, 'u', self.time_min )\n \n if (self.SAVE_D_GRIDS):\n model_output.add_grid( self, self.d, 'd', self.time_min )\n\n if (self.SAVE_F_GRIDS):\n model_output.add_grid( self, self.f, 'f', self.time_min ) \n\n if (self.SAVE_DF_GRIDS):\n model_output.add_grid( self, self.d_flood, 'd_flood', self.time_min ) \n \n # save_grids()\n #------------------------------------------------------------------- \n def save_pixel_values(self): ##### save_time_series_data(self) #######\n \n IDs = self.outlet_IDs\n time = self.time_min #####\n\n #-------------\n # New method\n #-------------\n if (self.SAVE_Q_PIXELS):\n model_output.add_values_at_IDs( self, time, self.Q, 'Q', IDs )\n \n if (self.SAVE_U_PIXELS):\n model_output.add_values_at_IDs( self, time, self.u, 'u', IDs )\n \n if (self.SAVE_D_PIXELS):\n model_output.add_values_at_IDs( self, time, self.d, 'd', IDs )\n \n if (self.SAVE_F_PIXELS):\n model_output.add_values_at_IDs( self, time, self.f, 'f', IDs )\n\n if (self.SAVE_DF_PIXELS):\n model_output.add_values_at_IDs( self, time, self.d_flood, 'd_flood', IDs )\n \n # save_pixel_values()\n #-------------------------------------------------------------------\n def manning_formula(self):\n\n #---------------------------------------------------------\n # Notes: R = (A/P) = hydraulic radius [m]\n # N = Manning's roughness coefficient\n # (usually in the range 0.012 to 0.035)\n # S = bed slope or free slope\n\n # R,S, and N may be 2D arrays.\n\n # If length units are all *feet*, then an extra\n # factor of 1.49 must be applied. If units are\n # meters, no such factor is needed.\n\n # Note that Q = Ac * u, where Ac is cross-section\n # area. For a trapezoid, Ac does not equal w*d.\n #---------------------------------------------------------\n if (self.KINEMATIC_WAVE):\n S = self.S_bed\n else:\n S = self.S_free\n\n u = (self.Rh ** self.two_thirds) * np.sqrt(S) / self.nval\n \n #--------------------------------------------------------\n # Add a hydraulic jump option for when u gets too big ?\n #--------------------------------------------------------\n \n return u\n \n # manning_formula()\n #-------------------------------------------------------------------\n def law_of_the_wall(self):\n\n #---------------------------------------------------------\n # Notes: u = flow velocity [m/s]\n # d = flow depth [m]\n # z0 = roughness length\n # S = bed slope or free slope\n\n # g = 9.81 = gravitation constant [m/s^2]\n # kappa = 0.41 = von Karman's constant\n # aval = 0.48 = integration constant\n\n # law_const = sqrt(g)/kappa = 7.6393d\n # smoothness = (aval / z0) * d\n # f = (kappa / alog(smoothness))^2d\n # tau_bed = rho_w * f * u^2 = rho_w * g * d * S\n\n # d, S, and z0 can be arrays.\n\n # To make default z0 correspond to default\n # Manning's n, can use this approximation:\n # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d\n # For n=0.03, this gives: z0 = 0.011417\n #########################################################\n # However, for n=0.3, it gives: z0 = 11417.413\n # which is 11.4 km! So the approximation only\n # holds within some range of values.\n #--------------------------------------------------------\n if (self.KINEMATIC_WAVE):\n S = self.S_bed\n else:\n S = self.S_free\n\n smoothness = (self.aval / self.z0val) * self.d\n \n #------------------------------------------------\n # Make sure (smoothness > 1) before taking log.\n # Should issue a warning if this is used.\n #------------------------------------------------\n smoothness = np.maximum(smoothness, np.float64(1.1))\n\n u = self.law_const * np.sqrt(self.Rh * S) * np.log(smoothness)\n \n #--------------------------------------------------------\n # Add a hydraulic jump option for when u gets too big ?\n #--------------------------------------------------------\n \n return u\n \n # law_of_the_wall()\n #-------------------------------------------------------------------\n def print_status_report(self): \n\n #----------------------------------------------------\n # Wherever depth is less than z0, assume that water\n # is not flowing and set u and Q to zero.\n # However, we also need (d gt 0) to avoid a divide\n # by zero problem, even when numerators are zero.\n #----------------------------------------------------\n # FLOWING = (d > (z0/aval))\n #*** FLOWING[noflow_IDs] = False ;******\n \n wflow = np.where( FLOWING != 0 )\n n_flow = np.size( wflow[0] )\n n_pixels = self.rti.n_pixels\n percent = np.float64(100.0) * (np.float64(n_flow) / n_pixels)\n fstr = ('%5.1f' % percent) + '%'\n # fstr = idl_func.string(percent, format='(F5.1)').strip() + '%'\n print(' Percentage of pixels with flow = ' + fstr)\n print(' ')\n\n self.update_mins_and_maxes(REPORT=True)\n \n wmax = np.where(self.Q == self.Q_max)\n nwmax = np.size(wmax[0])\n print(' Max(Q) occurs at: ' + str( wmax[0] ))\n #print,' Max attained at ', nwmax, ' pixels.'\n print(' ')\n print('-------------------------------------------------')\n\n # print_status_report() \n #-------------------------------------------------------------------\n# def remove_bad_slopes0(self, FLOAT=False):\n# \n# #------------------------------------------------------------\n# # Notes: The main purpose of this routine is to find\n# # pixels that have nonpositive slopes and replace\n# # then with the smallest value that occurs anywhere\n# # in the input slope grid. For example, pixels on\n# # the edges of the DEM will have a slope of zero.\n# \n# # With the Kinematic Wave option, flow cannot leave\n# # a pixel that has a slope of zero and the depth\n# # increases in an unrealistic manner to create a\n# # spike in the depth grid.\n# \n# # It would be better, of course, if there were\n# # no zero-slope pixels in the DEM. We could use\n# # an \"Imposed gradient DEM\" to get slopes or some\n# # method of \"profile smoothing\".\n# \n# # It is possible for the flow code to be nonzero\n# # at a pixel that has NaN for its slope. For these\n# # pixels, we also set the slope to our min value.\n# \n# # 7/18/05. Broke this out into separate procedure.\n# #------------------------------------------------------------\n# \n# #-----------------------------------\n# # Are there any \"bad\" pixels ?\n# # If not, return with no messages.\n# #----------------------------------- \n# wb = np.where(np.logical_or((self.slope <= 0.0), \\\n# np.logical_not(np.isfinite(self.slope))))\n# nbad = np.size(wb[0])\n# print('size(slope) = ' + str(np.size(self.slope)) )\n# print('size(wb) = ' + str(nbad) )\n# \n# wg = np.where(np.invert(np.logical_or((self.slope <= 0.0), \\\n# np.logical_not(np.isfinite(self.slope)))))\n# ngood = np.size(wg[0])\n# if (nbad == 0) or (ngood == 0):\n# return\n# \n# #---------------------------------------------\n# # Find smallest positive value in slope grid\n# # and replace the \"bad\" values with smin.\n# #---------------------------------------------\n# print('-------------------------------------------------')\n# print('WARNING: Zero or negative slopes found.')\n# print(' Replacing them with smallest slope.')\n# print(' Use \"Profile smoothing tool\" instead.')\n# S_min = self.slope[wg].min()\n# S_max = self.slope[wg].max()\n# print(' min(S) = ' + str(S_min))\n# print(' max(S) = ' + str(S_max))\n# print('-------------------------------------------------')\n# print(' ')\n# self.slope[wb] = S_min\n# \n# #--------------------------------\n# # Convert data type to double ?\n# #--------------------------------\n# if (FLOAT): \n# self.slope = np.float32(self.slope)\n# else: \n# self.slope = np.float64(self.slope)\n# \n# # remove_bad_slopes0()\n #-------------------------------------------------------------------\n def remove_bad_slopes(self, FLOAT=False):\n\n #------------------------------------------------------------\n # Notes: The main purpose of this routine is to find\n # pixels that have nonpositive slopes and replace\n # then with the smallest value that occurs anywhere\n # in the input slope grid. For example, pixels on\n # the edges of the DEM will have a slope of zero.\n\n # With the Kinematic Wave option, flow cannot leave\n # a pixel that has a slope of zero and the depth\n # increases in an unrealistic manner to create a\n # spike in the depth grid.\n\n # It would be better, of course, if there were\n # no zero-slope pixels in the DEM. We could use\n # an \"Imposed gradient DEM\" to get slopes or some\n # method of \"profile smoothing\".\n\n # It is possible for the flow code to be nonzero\n # at a pixel that has NaN for its slope. For these\n # pixels, we also set the slope to our min value.\n\n # 7/18/05. Broke this out into separate procedure.\n #------------------------------------------------------------\n\n #------------------------\n # Are any slopes Nans ?\n #------------------------\n wnan = np.where( np.isnan( self.slope ) )\n nnan = np.size( wnan[0] )\n #-------------------------------\n # Are any slopes nonpositive ?\n #-------------------------------\n wneg = np.where( self.slope <= 0.0 )\n nneg = np.size( wneg[0] )\n #-------------------------------\n # Are any slopes infinite ?\n #-------------------------------\n winf = np.where( np.isinf( self.slope ) )\n ninf = np.size( winf[0] )\n #----------------------------\n nbad = (nnan + nneg + ninf)\n if (nbad == 0):\n return\n\n #--------------------------- \n # Merge \"wheres\" into wbad\n #---------------------------\n S_shape = self.slope.shape\n bad = np.zeros( S_shape, dtype='bool' )\n if (nnan > 0): bad[ wnan ] = True\n if (nneg > 0): bad[ wneg ] = True\n if (ninf > 0): bad[ winf ] = True\n good = np.invert( bad )\n \n #--------------------\n # Print information\n #--------------------\n print('Total number of slope values = ' + str(np.size(self.slope)) )\n print('Number of nonpositive values = ' + str(nneg) )\n print('Number of NaN values = ' + str(nnan) )\n print('Number of infinite values = ' + str(ninf) )\n\n #---------------------------------------------\n # Find smallest positive value in slope grid\n # and replace the \"bad\" values with smin.\n #---------------------------------------------\n print('-------------------------------------------------')\n print('WARNING: Zero, negative or NaN slopes found.')\n print(' Replacing them with smallest slope.')\n print(' Use \"new_slopes.py\" instead.')\n S_min = self.slope[ good ].min()\n S_max = self.slope[ good ].max()\n print(' min(S) = ' + str(S_min))\n print(' max(S) = ' + str(S_max))\n print('-------------------------------------------------')\n print(' ')\n self.slope[ bad ] = S_min\n\n #--------------------------------\n # Convert data type to double ?\n #--------------------------------\n if (FLOAT): \n self.slope = np.float32(self.slope)\n else: \n self.slope = np.float64(self.slope)\n \n # remove_bad_slopes\n #-------------------------------------------------------------------\n \n#-------------------------------------------------------------------\ndef Trapezoid_Rh(d, wb, theta):\n\n #-------------------------------------------------------------\n # Notes: Compute the hydraulic radius of a trapezoid that:\n # (1) has a bed width of wb >= 0 (0 for triangular)\n # (2) has a bank angle of theta (0 for rectangular)\n # (3) is filled with water to a depth of d.\n # The units of wb and d are meters. The units of\n # theta are assumed to be degrees and are converted.\n #-------------------------------------------------------------\n # NB! wb should never be zero, so PW can never be 0,\n # which would produce a NaN (divide by zero).\n #-------------------------------------------------------------\n # See Notes for TF_Tan function in utils_TF.pro\n # AW = d * (wb + (d * TF_Tan(theta_rad)) )\n #------------------------------------------------------------- \n theta_rad = (theta * np.pi / 180.0)\n \n AW = d * (wb + (d * np.tan(theta_rad)) ) \n PW = wb + (np.float64(2) * d / np.cos(theta_rad) )\n Rh = (AW / PW)\n\n w = np.where(wb <= 0)\n nw = np.size(w[0])\n \n return Rh\n\n# Trapezoid_Rh()\n#-------------------------------------------------------------------\ndef Manning_Formula(Rh, S, nval):\n\n #---------------------------------------------------------\n # Notes: R = (A/P) = hydraulic radius [m]\n # N = Manning's roughness coefficient\n # (usually in the range 0.012 to 0.035)\n # S = bed slope (assumed equal to friction slope)\n\n # R,S, and N may be 2D arrays.\n\n # If length units are all *feet*, then an extra\n # factor of 1.49 must be applied. If units are\n # meters, no such factor is needed.\n\n # Note that Q = Ac * u, where Ac is cross-section\n # area. For a trapezoid, Ac does not equal w*d.\n #---------------------------------------------------------\n ## if (N is None): N = np.float64(0.03)\n\n two_thirds = np.float64(2) / 3.0\n \n u = (Rh ** two_thirds) * np.sqrt(S) / nval\n \n #------------------------------\n # Add a hydraulic jump option\n # for when u gets too big ??\n #------------------------------\n \n return u\n\n# Manning_Formula()\n#-------------------------------------------------------------------\ndef Law_of_the_Wall(d, Rh, S, z0val):\n\n #---------------------------------------------------------\n # Notes: u = flow velocity [m/s]\n # d = flow depth [m]\n # z0 = roughness height\n # S = bed slope (assumed equal to friction slope)\n\n # g = 9.81 = gravitation constant [m/s^2]\n # kappa = 0.41 = von Karman's constant\n # aval = 0.48 = integration constant\n\n # sqrt(g)/kappa = 7.6393d\n # smoothness = (aval / z0) * d\n # f = (kappa / alog(smoothness))^2d\n # tau_bed = rho_w * f * u^2 = rho_w * g * d * S\n\n # d, S, and z0 can be arrays.\n\n # To make default z0 correspond to default\n # Manning's n, can use this approximation:\n # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d\n # For n=0.03, this gives: z0 = 0.011417\n # However, for n=0.3, it gives: z0 = 11417.413\n # which is 11.4 km! So the approximation only\n # holds within some range of values.\n #--------------------------------------------------------\n## if (self.z0val is None): \n## self.z0val = np.float64(0.011417) # (about 1 cm)\n\n #------------------------\n # Define some constants\n #------------------------\n g = np.float64(9.81) # (gravitation const.)\n aval = np.float64(0.476) # (integration const.)\n kappa = np.float64(0.408) # (von Karman's const.)\n law_const = np.sqrt(g) / kappa\n \n smoothness = (aval / z0val) * d\n \n #-----------------------------\n # Make sure (smoothness > 1)\n #-----------------------------\n smoothness = np.maximum(smoothness, np.float64(1.1))\n\n u = law_const * np.sqrt(Rh * S) * np.log(smoothness)\n \n #------------------------------\n # Add a hydraulic jump option\n # for when u gets too big ??\n #------------------------------\n \n return u\n\n# Law_of_the_Wall()\n#------------------------------------------------------------------- \n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.where",
"numpy.double",
"numpy.sin",
"numpy.size",
"numpy.float32",
"numpy.zeros",
"numpy.log",
"numpy.invert",
"numpy.isnan",
"numpy.tan",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.isfinite",
"numpy.int32",
"numpy.cos",
"numpy.float64",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eshanmherath/computer_vision_expert_nd | [
"46b4c4377af497fe6b6316ecf954d492148c0d01"
] | [
"06.classifying_day_and_night/01.pixel_based_run.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport helpers\n\nimage_dir_train = 'day_night_images/training'\nimage_dir_test = 'day_night_images/test'\n\nIMAGE_LIST = helpers.load_dataset(image_dir_train)\n\n# Visualize one image\n\nimage_index = 0\nselected_image = IMAGE_LIST[image_index][0]\nselected_label = IMAGE_LIST[image_index][1]\n\nprint(\"Image Shape\", selected_image.shape)\nprint(\"Image Label\", selected_label)\n\nplt.imshow(selected_image)\nplt.show()\n\n# Display a night image using label\nnight_image_index = None\nfor index, image in enumerate(IMAGE_LIST):\n if image[1] == 'night':\n night_image = image[0]\n plt.imshow(night_image)\n plt.show()\n night_image_index = index\n break\n\n\n# Display a night image using image data\n\ndef _average_pixel(im):\n return np.average(im)\n\n\nday_images_pixels = [_average_pixel(image[0]) for image in IMAGE_LIST if image[1] == 'day']\naverage_day_images = np.average(day_images_pixels)\nminimum_day_image = np.min(day_images_pixels)\nmaximum_day_image = np.max(day_images_pixels)\n\nnight_images_pixels = [_average_pixel(image[0]) for image in IMAGE_LIST if image[1] == 'night']\naverage_night_images = np.average(night_images_pixels)\nminimum_night_images = np.min(night_images_pixels)\nmaximum_night_images = np.max(night_images_pixels)\n\nprint(\"\\naverage_day_images\", average_day_images)\nprint(\"minimum_day_image\", minimum_day_image)\nprint(\"maximum_day_image\", maximum_day_image)\nprint(\"\\naverage_night_images\", average_night_images)\nprint(\"minimum_night_images\", minimum_night_images)\nprint(\"maximum_night_images\", maximum_day_image)\n\n# Crude method. There will be night images not captured\nfor image in IMAGE_LIST:\n if _average_pixel(image[0]) <= minimum_day_image:\n night_image = image[0]\n plt.imshow(night_image)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.min",
"numpy.max",
"numpy.average",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nozdi/masters | [
"eb4fe26ce14a731145f128b59884ba9d80000bf6"
] | [
"ladder/train.py"
] | [
"#!/usr/bin/env python\n\nimport logging\nimport os\nfrom collections import OrderedDict\nimport sys\n\nimport numpy\nimport theano\nfrom theano.tensor.type import TensorType\n\nfrom blocks.algorithms import GradientDescent, Adam\nfrom blocks.extensions import FinishAfter\nfrom blocks.extensions.monitoring import TrainingDataMonitoring\nfrom blocks.filter import VariableFilter\nfrom blocks.graph import ComputationGraph\nfrom blocks.main_loop import MainLoop\nfrom blocks.model import Model\nfrom blocks.roles import PARAMETER\nfrom fuel.schemes import ShuffledScheme, SequentialScheme\nfrom fuel.streams import DataStream\nfrom fuel.transformers import Transformer\n\nfrom picklable_itertools import cycle, imap\n\nfrom utils import ShortPrinting, prepare_dir, load_df\nfrom utils import AttributeDict\nfrom nn import ApproxTestMonitoring, FinalTestMonitoring, TestMonitoring\nfrom nn import LRDecay\nfrom ladder import LadderAE\n\nlogger = logging.getLogger('main')\n\n\nclass Whitening(Transformer):\n \"\"\" Makes a copy of the examples in the underlying dataset and whitens it\n if necessary.\n \"\"\"\n def __init__(self, data_stream, iteration_scheme, whiten, cnorm=None,\n **kwargs):\n super(Whitening, self).__init__(data_stream,\n iteration_scheme=iteration_scheme,\n **kwargs)\n data = data_stream.get_data(slice(data_stream.dataset.num_examples))\n self.data = []\n for s, d in zip(self.sources, data):\n if 'features' == s:\n # Fuel provides Cifar in uint8, convert to float32\n d = numpy.require(d, dtype=numpy.float32)\n if cnorm is not None:\n d = cnorm.apply(d)\n if whiten is not None:\n d = whiten.apply(d)\n self.data += [d]\n elif 'targets' == s:\n d = unify_labels(d)\n self.data += [d]\n else:\n raise Exception(\"Unsupported Fuel target: %s\" % s)\n\n def get_data(self, request=None):\n return (s[request] for s in self.data)\n\n\nclass SemiDataStream(Transformer):\n \"\"\" Combines two datastreams into one such that 'target' source (labels)\n is used only from the first one. The second one is renamed\n to avoid collision. Upon iteration, the first one is repeated until\n the second one depletes.\n \"\"\"\n def __init__(self, data_stream_labeled, data_stream_unlabeled, **kwargs):\n super(Transformer, self).__init__(**kwargs)\n self.ds_labeled = data_stream_labeled\n self.ds_unlabeled = data_stream_unlabeled\n # Rename the sources for clarity\n self.ds_labeled.sources = ('features_labeled', 'targets_labeled')\n # Rename the source for input pixels and hide its labels!\n self.ds_unlabeled.sources = ('features_unlabeled',)\n\n @property\n def sources(self):\n if hasattr(self, '_sources'):\n return self._sources\n return self.ds_labeled.sources + self.ds_unlabeled.sources\n\n @sources.setter\n def sources(self, value):\n self._sources = value\n\n def close(self):\n self.ds_labeled.close()\n self.ds_unlabeled.close()\n\n def reset(self):\n self.ds_labeled.reset()\n self.ds_unlabeled.reset()\n\n def next_epoch(self):\n self.ds_labeled.next_epoch()\n self.ds_unlabeled.next_epoch()\n\n def get_epoch_iterator(self, **kwargs):\n unlabeled = self.ds_unlabeled.get_epoch_iterator(**kwargs)\n labeled = self.ds_labeled.get_epoch_iterator(**kwargs)\n assert type(labeled) == type(unlabeled)\n\n return imap(self.mergedicts, cycle(labeled), unlabeled)\n\n def mergedicts(self, x, y):\n return dict(list(x.items()) + list(y.items()))\n\n\ndef unify_labels(y):\n \"\"\" Work-around for Fuel bug where MNIST and Cifar-10\n datasets have different dimensionalities for the targets:\n e.g. (50000, 1) vs (60000,) \"\"\"\n yshape = y.shape\n y = y.flatten()\n assert y.shape[0] == yshape[0]\n return y\n\n\ndef load_and_log_params(cli_params):\n cli_params = AttributeDict(cli_params)\n if cli_params.get('load_from'):\n p = load_df(cli_params.load_from, 'params').to_dict()[0]\n p = AttributeDict(p)\n for key in cli_params.iterkeys():\n if key not in p:\n p[key] = None\n new_params = cli_params\n loaded = True\n else:\n p = cli_params\n new_params = {}\n loaded = False\n\n # Make dseed seed unless specified explicitly\n if p.get('dseed') is None and p.get('seed') is not None:\n p['dseed'] = p['seed']\n\n logger.info('== COMMAND LINE ==')\n logger.info(' '.join(sys.argv))\n\n logger.info('== PARAMETERS ==')\n for k, v in p.iteritems():\n if new_params.get(k) is not None:\n p[k] = new_params[k]\n replace_str = \"<- \" + str(new_params.get(k))\n else:\n replace_str = \"\"\n logger.info(\" {:20}: {:<20} {}\".format(k, v, replace_str))\n return p, loaded\n\n\ndef make_datastream(dataset, indices, batch_size,\n scheme=SequentialScheme):\n\n return SemiDataStream(\n data_stream_labeled=Whitening(\n DataStream(dataset),\n iteration_scheme=scheme(indices, batch_size),\n whiten=None, cnorm=None),\n data_stream_unlabeled=Whitening(\n DataStream(dataset),\n iteration_scheme=scheme(indices, batch_size),\n whiten=None, cnorm=None)\n )\n\n\ndef setup_model(p):\n ladder = LadderAE(p)\n # Setup inputs\n input_type = TensorType('float32', [False] * (1 + 1))\n x_only = input_type('features_unlabeled')\n x = input_type('features_labeled')\n y = theano.tensor.lvector('targets_labeled')\n ladder.apply(x, y, x_only)\n\n # Load parameters if requested\n if p.get('load_from'):\n with open(p.load_from + '/trained_params.npz') as f:\n loaded = numpy.load(f)\n cg = ComputationGraph([ladder.costs.total])\n current_params = VariableFilter(roles=[PARAMETER])(cg.variables)\n logger.info('Loading parameters: %s' % ', '.join(loaded.keys()))\n for param in current_params:\n assert param.get_value().shape == loaded[param.name].shape\n param.set_value(loaded[param.name])\n\n return ladder\n\n\ndef train_ladder(cli_params, dataset=None, save_to='results/ova_all_full'):\n cli_params['save_dir'] = prepare_dir(save_to)\n logfile = os.path.join(cli_params['save_dir'], 'log.txt')\n\n # Log also DEBUG to a file\n fh = logging.FileHandler(filename=logfile)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n\n logger.info('Logging into %s' % logfile)\n\n p, loaded = load_and_log_params(cli_params)\n\n ladder = setup_model(p)\n\n # Training\n all_params = ComputationGraph([ladder.costs.total]).parameters\n logger.info('Found the following parameters: %s' % str(all_params))\n\n # Fetch all batch normalization updates. They are in the clean path.\n bn_updates = ComputationGraph([ladder.costs.class_clean]).updates\n assert 'counter' in [u.name for u in bn_updates.keys()], \\\n 'No batch norm params in graph - the graph has been cut?'\n\n training_algorithm = GradientDescent(\n cost=ladder.costs.total, params=all_params,\n step_rule=Adam(learning_rate=ladder.lr))\n # In addition to actual training, also do BN variable approximations\n training_algorithm.add_updates(bn_updates)\n\n short_prints = {\n \"train\": {\n 'T_C_class': ladder.costs.class_corr,\n 'T_C_de': ladder.costs.denois.values(),\n },\n \"valid_approx\": OrderedDict([\n ('V_C_class', ladder.costs.class_clean),\n ('V_E', ladder.error.clean),\n ('V_C_de', ladder.costs.denois.values()),\n ]),\n \"valid_final\": OrderedDict([\n ('VF_C_class', ladder.costs.class_clean),\n ('VF_E', ladder.error.clean),\n ('VF_C_de', ladder.costs.denois.values()),\n ]),\n }\n\n ovadataset = dataset['ovadataset']\n train_indexes = dataset['train_indexes']\n val_indexes = dataset['val_indexes']\n\n main_loop = MainLoop(\n training_algorithm,\n # Datastream used for training\n make_datastream(ovadataset, train_indexes,\n p.batch_size, scheme=ShuffledScheme),\n model=Model(ladder.costs.total),\n extensions=[\n FinishAfter(after_n_epochs=p.num_epochs),\n\n # This will estimate the validation error using\n # running average estimates of the batch normalization\n # parameters, mean and variance\n ApproxTestMonitoring(\n [ladder.costs.class_clean, ladder.error.clean] +\n ladder.costs.denois.values(),\n make_datastream(ovadataset, val_indexes,\n p.batch_size),\n prefix=\"valid_approx\"),\n\n # This Monitor is slower, but more accurate since it will first\n # estimate batch normalization parameters from training data and\n # then do another pass to calculate the validation error.\n FinalTestMonitoring(\n [ladder.costs.class_clean, ladder.error.clean_mc] +\n ladder.costs.denois.values(),\n make_datastream(ovadataset, train_indexes,\n p.batch_size),\n make_datastream(ovadataset, val_indexes,\n p.batch_size),\n prefix=\"valid_final\",\n after_n_epochs=p.num_epochs),\n\n TrainingDataMonitoring(\n [ladder.costs.total, ladder.costs.class_corr,\n training_algorithm.total_gradient_norm] +\n ladder.costs.denois.values(),\n prefix=\"train\", after_epoch=True),\n\n ShortPrinting(short_prints),\n LRDecay(ladder.lr, p.num_epochs * p.lrate_decay, p.num_epochs,\n after_epoch=True),\n ])\n main_loop.run()\n\n # Get results\n df = main_loop.log.to_dataframe()\n col = 'valid_final_error_matrix_cost'\n logger.info('%s %g' % (col, df[col].iloc[-1]))\n\n ds = make_datastream(ovadataset, val_indexes,\n p.batch_size)\n outputs = ladder.act.clean.labeled.h[len(ladder.layers) - 1]\n outputreplacer = TestMonitoring()\n _, _, outputs = outputreplacer._get_bn_params(outputs)\n\n cg = ComputationGraph(outputs)\n f = cg.get_theano_function()\n\n it = ds.get_epoch_iterator(as_dict=True)\n res = []\n inputs = {'features_labeled': [],\n 'targets_labeled': [],\n 'features_unlabeled': []}\n # Loop over one epoch\n for d in it:\n # Store all inputs\n for k, v in d.iteritems():\n inputs[k] += [v]\n # Store outputs\n res += [f(*[d[str(inp)] for inp in cg.inputs])]\n\n # Concatenate all minibatches\n res = [numpy.vstack(minibatches) for minibatches in zip(*res)]\n inputs = {k: numpy.vstack(v) for k, v in inputs.iteritems()}\n\n if main_loop.log.status['epoch_interrupt_received']:\n return None\n return res[0], inputs\n"
] | [
[
"numpy.load",
"numpy.require",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrkwjc/cupy | [
"ae9705dcc8b59ed05a6c91fdfb401b71f7c2b224"
] | [
"tests/cupy_tests/test_cublas.py"
] | [
"import unittest\n\nimport numpy\n\nimport cupy\nfrom cupy import cublas\nfrom cupy import testing\nfrom cupy.testing import attr\n\n\[email protected](*testing.product({\n 'dtype': ['float32', 'float64', 'complex64', 'complex128'],\n 'n': [10, 33, 100],\n 'bs': [None, 1, 10],\n 'nrhs': [None, 1, 10],\n}))\[email protected]\nclass TestBatchedGesv(unittest.TestCase):\n _tol = {'f': 5e-5, 'd': 1e-12}\n\n def _make_random_matrices(self, shape, xp):\n a = testing.shaped_random(shape, xp, dtype=self.r_dtype, scale=1)\n if self.dtype.char in 'FD':\n a = a + 1j * testing.shaped_random(shape, xp, dtype=self.r_dtype,\n scale=1)\n return a\n\n def _make_well_conditioned_matrices(self, shape):\n a = self._make_random_matrices(shape, numpy)\n u, s, vh = numpy.linalg.svd(a)\n s = testing.shaped_random(s.shape, numpy, dtype=self.r_dtype,\n scale=1) + 1\n a = numpy.einsum('...ik,...k,...kj->...ij', u, s, vh)\n return cupy.array(a)\n\n def setUp(self):\n self.dtype = numpy.dtype(self.dtype)\n if self.dtype.char in 'fF':\n self.r_dtype = numpy.float32\n else:\n self.r_dtype = numpy.float64\n n = self.n\n bs = 1 if self.bs is None else self.bs\n nrhs = 1 if self.nrhs is None else self.nrhs\n a = self._make_well_conditioned_matrices((bs, n, n))\n x = self._make_random_matrices((bs, n, nrhs), cupy)\n b = cupy.matmul(a, x)\n a_shape = (n, n) if self.bs is None else (bs, n, n)\n b_shape = [n]\n if self.bs is not None:\n b_shape.insert(0, bs)\n if self.nrhs is not None:\n b_shape.append(nrhs)\n self.a = a.reshape(a_shape)\n self.b = b.reshape(b_shape)\n self.x_ref = x.reshape(b_shape)\n if self.r_dtype == numpy.float32:\n self.tol = self._tol['f']\n elif self.r_dtype == numpy.float64:\n self.tol = self._tol['d']\n\n def test_batched_gesv(self):\n x = cublas.batched_gesv(self.a, self.b)\n cupy.testing.assert_allclose(x, self.x_ref,\n rtol=self.tol, atol=self.tol)\n"
] | [
[
"numpy.linalg.svd",
"numpy.dtype",
"numpy.einsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FizzBuzzCoder/dowhy | [
"4e93c0d04a74c9ea8f29265dcd0c9cc3fc7f8112"
] | [
"dowhy/causal_estimators/instrumental_variable_estimator.py"
] | [
"import numpy as np\nimport sympy as sp\nimport sympy.stats as spstats\n\nfrom dowhy.causal_estimator import CausalEstimate\nfrom dowhy.causal_estimator import CausalEstimator\nfrom dowhy.causal_estimator import RealizedEstimand\n\n\nclass InstrumentalVariableEstimator(CausalEstimator):\n \"\"\"Compute effect of treatment using the instrumental variables method.\n\n This is a superclass that is inherited by other specific methods.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.logger.debug(\"Instrumental Variables used:\" +\n \",\".join(self._target_estimand.instrumental_variables))\n self._instrument_names = self._target_estimand.instrumental_variables\n\n # choosing the instrumental variable to use\n if getattr(self, 'iv_instrument_name', None) is None:\n self._instruments = self._data[self._instrument_names]\n self.estimating_instrument = self._instruments[self._instrument_names[0]]\n else:\n self.estimating_instrument = self._data[self.iv_instrument_name]\n self.logger.info(\"INFO: Using Instrumental Variable Estimator\")\n\n self.symbolic_estimator = self.construct_symbolic_estimator(self._target_estimand)\n self.logger.info(self.symbolic_estimator)\n\n def _estimate_effect(self):\n instrument = self.estimating_instrument\n self.logger.debug(\"Instrument Variable values: {0}\".format(instrument))\n num_unique_values = len(np.unique(instrument))\n instrument_is_binary = (num_unique_values <= 2)\n if instrument_is_binary:\n # Obtain estimate by Wald Estimator\n y1_z = np.mean(self._outcome[instrument == 1])\n y0_z = np.mean(self._outcome[instrument == 0])\n x1_z = np.mean(self._treatment[instrument == 1])\n x0_z = np.mean(self._treatment[instrument == 0])\n num = y1_z - y0_z\n deno = x1_z - x0_z\n iv_est = num / deno\n else:\n # Obtain estimate by Pearl (1995) ratio estimator.\n # y = x+ u; multiply both sides by z and take expectation.\n num_yz = np.dot(self._outcome, instrument)\n deno_xz = np.dot(self._treatment, instrument)\n iv_est = num_yz / deno_xz\n\n estimate = CausalEstimate(estimate=iv_est,\n target_estimand=self._target_estimand,\n realized_estimand_expr=self.symbolic_estimator)\n return estimate\n\n def construct_symbolic_estimator(self, estimand):\n sym_outcome = (spstats.Normal(estimand.outcome_variable, 0, 1))\n sym_treatment = (spstats.Normal(estimand.treatment_variable, 0, 1))\n sym_instrument = sp.Symbol(estimand.instrumental_variables[0])\n sym_outcome_derivative = sp.Derivative(sym_outcome, sym_instrument)\n sym_treatment_derivative = sp.Derivative(sym_treatment, sym_instrument)\n sym_effect = (\n spstats.Expectation(sym_outcome_derivative) /\n sp.stats.Expectation(sym_treatment_derivative)\n )\n estimator_assumptions = {\n \"treatment_effect_homogeneity\": (\n \"Each unit's treatment {0} is\".format(self._treatment_name) +\n \"affected in the same way by common causes of \"\n \"{0} and {1}\".format(self._treatment_name, self._outcome_name)\n ),\n \"outcome_effect_homogeneity\": (\n \"Each unit's outcome {0} is\".format(self._outcome_name) +\n \"affected in the same way by common causes of \"\n \"{0} and {1}\".format(self._treatment_name, self._outcome_name)\n ),\n }\n sym_assumptions = {**estimand.estimands[\"iv\"][\"assumptions\"],\n **estimator_assumptions}\n\n symbolic_estimand = RealizedEstimand(estimand,\n estimator_name=\"Wald Estimator\")\n symbolic_estimand.update_assumptions(sym_assumptions)\n symbolic_estimand.update_estimand_expression(sym_effect)\n return symbolic_estimand\n"
] | [
[
"numpy.dot",
"numpy.mean",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RyanElliott10/slope | [
"3247b04181cb6696978ce544382d97fa570b862d"
] | [
"slope/utils/preco_parser.py"
] | [
"from enum import Enum\n\nimport pandas as pd\nfrom pandas import DataFrame, read_json\n\nfrom slope.utils.decorators import setter\n\n\nclass PreCoFileType(Enum):\n DEV = 'dev.json'\n TRAIN = 'train.json'\n\n\nclass PreCoParser(object):\n '''\n Parses data from PreCo formatted files. Allows filtration of mentions on the number of referents\n are associated with a given mention.\n '''\n\n def __init__(self, file_type: PreCoFileType, singletons: bool = True):\n self.file_type = file_type\n self.singletons = singletons\n self.df = read_json(self.filepath, lines=True, encoding='ascii')\n\n def data(self) -> DataFrame:\n if not self.singletons:\n self._filter_singleton()\n return self.df\n\n def _filter_singleton(self):\n filtered = list()\n for mentions in self.df.mention_clusters:\n filtered.append([clusters for clusters in mentions if len(clusters) > 1])\n\n self.df.mention_clusters = filtered\n\n def debug_ents(self, num: int = None, show_sent: bool = False):\n ''' Prints clusters of `num` datapoints. '''\n sents = self.df.sentences\n mention_clusters = self.df.mention_clusters\n for i, sent in enumerate(sents[:num]):\n if show_sent:\n print(sent)\n print('********\\tClusters\\t********\\n')\n ent = list()\n for cluster in mention_clusters[i]:\n ent = [' '.join(sent[sent_idx][start:end]) for sent_idx, start, end in cluster]\n print(ent)\n\n @setter\n def file_type(self, f: PreCoFileType):\n vars(self)['file_type'] = f.value\n self.df = read_json(self.filepath, lines=True, encoding='ascii')\n\n @property\n def filepath(self) -> str:\n return f'../data/preco/{self.file_type}'\n\n\nif __name__ == '__main__':\n parser = PreCoParser(PreCoFileType.DEV, singletons=False)\n data = parser.data()\n"
] | [
[
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BASTAcode/BASTA | [
"6de8b8b866787d6745c4e77378bb94e0bab97090"
] | [
"basta/priors.py"
] | [
"\"\"\"\nDefinition of priors\n\nDefine any prior functions here that you want to use in BASTA!!\n\nThe prior function must be of the form:\nPRIOR = PRIORFUN(LIBITEM, INDEX)\n\nAny prior defined here can be used from an .xml input file.\n\"\"\"\nimport numpy as np\nfrom basta import utils_general as util\n\n\ndef salpeter1955(libitem, index):\n \"\"\"\n Initial mass function from Salpeter (1955)\n https://ui.adsabs.harvard.edu/abs/1955ApJ...121..161S\n \"\"\"\n return libitem[\"massini\"][index] ** (-2.35)\n\n\ndef millerscalo1979(libitem, index):\n \"\"\"\n Initial mass function from Miller & Scalo (1979)\n https://ui.adsabs.harvard.edu/abs/1979ApJS...41..513M\n The global normalisation is not needed as we normalise later.\n \"\"\"\n m = libitem[\"massini\"][index]\n ms = [0.1, 1, 10, 100]\n alphas = [-1.4, -2.5, -3.3]\n ks = util.normfactor(alphas, ms)\n if (ms[0] <= m) & (m < ms[1]):\n return ks[0] * m ** alphas[0]\n elif (ms[1] <= m) & (m < ms[2]):\n return ks[1] * m ** alphas[1]\n elif (ms[2] <= m) & (m < ms[3]):\n return ks[2] * m ** alphas[2]\n else:\n print(\"Mass outside range of IMF prior\")\n return 0\n\n\ndef kennicutt1994(libitem, index):\n \"\"\"\n Initial mass function from Kennicutt et al. 1994\n https://ui.adsabs.harvard.edu/abs/1994ApJ...435...22K\n The global normalisation is not needed as we normalise later.\n \"\"\"\n m = libitem[\"massini\"][index]\n ms = [0.1, 1, 100]\n alphas = [-1.4, -2.5]\n ks = util.normfactor(alphas, ms)\n if (ms[0] <= m) & (m < ms[1]):\n return ks[0] * m ** alphas[0]\n elif (ms[1] <= m) & (m < ms[2]):\n return ks[1] * m ** alphas[1]\n else:\n print(\"Mass outside range of IMF prior\")\n return 0\n\n\ndef scalo1998(libitem, index):\n \"\"\"\n Initial mass function from Scalo (1998)\n https://ui.adsabs.harvard.edu/abs/1998ASPC..142..201S\n The global normalisation is not needed as we normalise later.\n \"\"\"\n m = libitem[\"massini\"][index]\n ms = [0.1, 1, 10, 100]\n alphas = [-1.2, -2.7, -2.3]\n ks = util.normfactor(alphas, ms)\n if (ms[0] <= m) & (m < ms[1]):\n return ks[0] * m ** alphas[0]\n elif (ms[1] <= m) & (m < ms[2]):\n return ks[1] * m ** alphas[1]\n elif (ms[2] <= m) & (m < ms[3]):\n return ks[2] * m ** alphas[2]\n else:\n print(\"Mass outside range of IMF prior\")\n return 0\n\n\ndef kroupa2001(libitem, index):\n \"\"\"\n Initial mass function from Kroupa (2001)\n https://ui.adsabs.harvard.edu/abs/2001MNRAS.322..231K\n https://ui.adsabs.harvard.edu/abs/2002Sci...295...82K\n The global normalisation is not needed as we normalise later.\n \"\"\"\n m = libitem[\"massini\"][index]\n ms = [0.01, 0.08, 0.5, 1, 150]\n alphas = [-0.3, -1.3, -2.3]\n ks = util.normfactor(alphas, ms)\n if (ms[0] <= m) & (m < ms[1]):\n return ks[0] * m ** alphas[0]\n elif (ms[1] <= m) & (m < ms[2]):\n return ks[1] * m ** alphas[1]\n # This case and the last case are identical with these values\n elif (ms[2] <= m) & (m < ms[4]):\n return ks[2] * m ** alphas[2]\n else:\n print(\"Mass outside range of IMF prior\")\n return 0\n\n\ndef baldryglazebrook2003(libitem, index):\n \"\"\"\n Initial mass function from Baldry & Glazebrook (2003)\n https://ui.adsabs.harvard.edu/abs/2003ApJ...593..258B\n The global normalisation is not needed as we normalise later.\n \"\"\"\n m = libitem[\"massini\"][index]\n ms = [0.1, 0.5, 120]\n alphas = [-1.5, -2.2]\n ks = util.normfactor(alphas, ms)\n if (ms[0] <= m) & (m < ms[1]):\n return ks[0] * m ** alphas[0]\n elif (ms[1] <= m) & (m < ms[2]):\n return ks[1] * m ** alphas[1]\n else:\n print(\"Mass outside range of IMF prior\")\n return 0\n\n\ndef chabrier2003(libitem, index):\n \"\"\"\n Initial mass function from Chabrier (2003)\n https://ui.adsabs.harvard.edu/abs/2003PASP..115..763C/abstract\n Note that this is in linear mass space, hence the (1/m)\n \"\"\"\n m = libitem[\"massini\"][index]\n ks = [0.158, 0.0443]\n if m < 1:\n return (\n ks[0]\n * (1 / m)\n * np.exp(-0.5 * ((np.log10(m) - np.log10(0.079)) / 0.69) ** 2)\n )\n else:\n return ks[1] * m ** (-2.3)\n"
] | [
[
"numpy.log10"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
markovalexander/few_shot | [
"4b29ddf2efcb6c677d49bfc6eae27966343ab015"
] | [
"experiments/maml_ens_mgpu.py"
] | [
"from torch.utils.data import DataLoader\nfrom torch import nn\nimport torch.nn.functional as F\nimport argparse\nimport torch\nimport random\n\nimport sys\n\nsys.path.append('..')\n\nfrom few_shot.datasets import OmniglotDataset, MiniImageNet\nfrom few_shot.core import NShotTaskSampler, create_nshot_task_label, EvaluateFewShot, AccumulateSNR\nfrom few_shot.maml_ens_mgpu import meta_gradient_ens_step_mgpu_2order, \\\n meta_gradient_ens_step_mgpu_1order\nfrom few_shot.maml_mean_loss import meta_gradient_ens_step_mgpu_meanloss\nfrom few_shot.models import FewShotClassifier\nfrom few_shot.train import fit\nfrom few_shot.callbacks import *\nfrom few_shot.utils import setup_dirs\nfrom config import PATH\nfrom few_shot.functions import get_pred_fn, logmeanexp_preds\n\nsetup_dirs()\nassert torch.cuda.is_available()\n\ndevice = torch.device('cuda')\ntorch.backends.cudnn.benchmark = True\n\n##############\n# Parameters #\n##############\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset')\nparser.add_argument('--n', default=1, type=int)\nparser.add_argument('--k', default=5, type=int)\nparser.add_argument('--q', default=1, type=int) # Number of examples per class to calculate meta gradients with\nparser.add_argument('--inner-train-steps', default=1, type=int)\nparser.add_argument('--inner-val-steps', default=3, type=int)\nparser.add_argument('--inner-lr', default=0.4, type=float)\nparser.add_argument('--meta-lr', default=0.001, type=float)\nparser.add_argument('--meta-batch-size', default=32, type=int)\nparser.add_argument('--order', default=1, type=int,\n help=\"1, 2 or 0 (0 for mean losses)\")\nparser.add_argument('--epochs', default=50, type=int)\nparser.add_argument('--epoch-len', default=100, type=int)\nparser.add_argument('--eval-batches', default=20, type=int)\nparser.add_argument('--n-models', default=3, type=int)\nparser.add_argument('--train-pred-mode', default='mean', type=str)\nparser.add_argument('--test-pred-mode', default='same', type=str)\nparser.add_argument('--track-snr', action='store_true')\n\nargs = parser.parse_args()\n\nif args.dataset == 'omniglot':\n dataset_class = OmniglotDataset\n fc_layer_size = 64\n num_input_channels = 1\nelif args.dataset == 'miniImageNet':\n dataset_class = MiniImageNet\n fc_layer_size = 1600\n num_input_channels = 3\nelse:\n raise (ValueError('Unsupported dataset'))\n\nparam_str = f'{args.dataset}_order={args.order}_n={args.n}_k={args.k}_metabatch={args.meta_batch_size}_' \\\n f'train_steps={args.inner_train_steps}_val_steps={args.inner_val_steps}_n_models={args.n_models}_train_pred_mode={args.train_pred_mode}_' \\\n f'test_pred_mode={args.test_pred_mode}'\nprint(param_str)\n\n###################\n# Create datasets #\n###################\nbackground = dataset_class('background')\nbackground_taskloader = DataLoader(\n background,\n batch_sampler=NShotTaskSampler(background, args.epoch_len, n=args.n, k=args.k, q=args.q,\n num_tasks=args.meta_batch_size),\n num_workers=8\n)\nevaluation = dataset_class('evaluation')\nevaluation_taskloader = DataLoader(\n evaluation,\n batch_sampler=NShotTaskSampler(evaluation, args.eval_batches, n=args.n, k=args.k, q=args.q,\n num_tasks=args.meta_batch_size),\n num_workers=8\n)\n\n############\n# Training #\n############\nprint(f'Training MAML on {args.dataset}...')\n\nmodel_params = [num_input_channels, args.k, fc_layer_size]\nmeta_models = [FewShotClassifier(num_input_channels, args.k, fc_layer_size).to(device, dtype=torch.double)\n for _ in range(args.n_models)]\nmeta_optimisers = [torch.optim.Adam(meta_model.parameters(), lr=args.meta_lr)\n for meta_model in meta_models]\n\nloss_fn = F.nll_loss if args.order > 0 else F.cross_entropy\nif args.order == 2:\n fit_fn = meta_gradient_ens_step_mgpu_2order\nelif args.order == 1:\n fit_fn = meta_gradient_ens_step_mgpu_1order\nelse:\n fit_fn = meta_gradient_ens_step_mgpu_meanloss\n\ntrain_pred_fn, test_pred_fn = get_pred_fn(args)\n\n\ndef prepare_meta_batch(n, k, q, meta_batch_size):\n def prepare_meta_batch_(batch):\n x, y = batch\n # Reshape to `meta_batch_size` number of tasks. Each task contains\n # n*k support samples to train the fast model on and q*k query samples to\n # evaluate the fast model on and generate meta-gradients\n x = x.reshape(meta_batch_size, n * k + q * k, num_input_channels, x.shape[-2], x.shape[-1])\n # Move to device\n x = x.double().to(device)\n # Create label\n y = create_nshot_task_label(k, q).cuda().repeat(meta_batch_size)\n return x, y\n\n return prepare_meta_batch_\n\n\nReduceLRCallback = [ReduceLROnPlateau(patience=10, factor=0.5, monitor=f'val_loss', index=i)\n for i in range(len(meta_optimisers))]\nReduceLRCallback = CallbackList(ReduceLRCallback)\nif args.track_snr:\n snr_callbacks = CallbackList([SNRAccumulator(model_idx, idx) for idx, model_idx in enumerate(meta_models)])\nelse:\n snr_callbacks = Callback()\n\nhash = ''.join([chr(random.randint(97, 122)) for _ in range(3)])\ncallbacks = [\n EvaluateFewShot(\n eval_fn=fit_fn,\n num_tasks=args.eval_batches,\n n_shot=args.n,\n k_way=args.k,\n q_queries=args.q,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_meta_batch(args.n, args.k, args.q, args.meta_batch_size),\n loss_fn=loss_fn,\n # MAML kwargs\n inner_train_steps=args.inner_val_steps,\n inner_lr=args.inner_lr,\n device=device,\n order=args.order,\n model_params=model_params,\n pred_fn=test_pred_fn\n ),\n EvaluateFewShot(\n eval_fn=fit_fn,\n num_tasks=args.eval_batches,\n n_shot=args.n,\n k_way=args.k,\n q_queries=args.q,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_meta_batch(args.n, args.k, args.q, args.meta_batch_size),\n prefix=\"val_logprobs_\",\n loss_fn=F.nll_loss,\n # MAML kwargs\n inner_train_steps=args.inner_val_steps,\n inner_lr=args.inner_lr,\n device=device,\n order=args.order,\n model_params=model_params,\n pred_fn=logmeanexp_preds\n ),\n EnsembleCheckpoint(\n filepath=PATH + f'/models/maml_ens/mgpu_{param_str}.pth',\n monitor=f'val_{args.n}-shot_{args.k}-way_acc',\n hash=hash\n ),\n AccumulateSNR(eval_fn=fit_fn,\n num_tasks=args.eval_batches,\n n_shot=args.n,\n k_way=args.k,\n q_queries=args.q,\n taskloader=evaluation_taskloader,\n prepare_batch=prepare_meta_batch(args.n, args.k, args.q, args.meta_batch_size),\n loss_fn=loss_fn,\n # MAML kwargs\n inner_train_steps=args.inner_val_steps,\n inner_lr=args.inner_lr,\n device=device,\n order=args.order,\n model_params=model_params,\n pred_fn=test_pred_fn,\n n_batches=30\n ),\n # snr_callbacks,\n ReduceLRCallback,\n CSVLogger(PATH + f'/logs/maml_ens/mgpu_{param_str}.csv',\n hash=hash),\n]\n\nfit(\n meta_models,\n meta_optimisers,\n loss_fn,\n epochs=args.epochs,\n dataloader=background_taskloader,\n prepare_batch=prepare_meta_batch(args.n, args.k, args.q, args.meta_batch_size),\n callbacks=callbacks,\n metrics=['categorical_accuracy'],\n fit_function=fit_fn,\n n_models=args.n_models,\n fit_function_kwargs={'n_shot': args.n, 'k_way': args.k, 'q_queries': args.q,\n 'train': True, 'order': args.order, 'device': device,\n 'inner_train_steps': args.inner_train_steps,\n 'inner_lr': args.inner_lr, 'model_params': model_params,\n 'pred_fn': train_pred_fn},\n)\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
klecknerlab/muvi | [
"f7731af71d316678f9b52dd8771718d8d97bb02c"
] | [
"muvi/distortion.py"
] | [
"#!/usr/bin/python3\n#\n# Copyright 2020 Dustin Kleckner\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Known coordinate spaces:\n# raw: texture coordinates (0-1), in raw volume\n# corrected: texture coordinates (0-1), in corrected space\n# physical:\nimport numpy as np\n\nclass DistortionModel:\n '''Used to convert betwenn coordinate systems for distorted volumes.\n There are three basic coordinate systems you should be aware of:\n\n * `\"physical\"`: The real coordinates in physical space\n - x = (-Lx/2 -- +Lx/2) = (u - 1/2) * Lx\n - y = (-Ly/2 -- +Ly/2) = (v - 1/2) * Ly\n - z = (-Lz/2 -- +Lz/2) = (w - 1/2) * Lz\n\n * `\"raw\"`: A normalized coordinate in the raw imaged volume (corresponds directly\n to a pixel in a volume).\n - u' = up = (0 -- 1)\n - v' = vp = (0 -- 1)\n - w' = wp = (0 -- 1)\n\n * `\"corrected\"`: A normalized coordinate in a distortion corrected space.\n - u = (0 -- 1)\n - v = (0 -- 1)\n - w = (0 -- 1)\n\n * `\"index\"`: The indices of the volume. Note that the order of the indices\n is reversed, as in accessing the volume-array. (e.g. vol[k, j, i])\n - k = (0 -- Nz-1)\n - j = (0 -- Ny-1)\n - i = (0 -- Nx-1)\n\n * `\"index-xyz\"`: The indices, in \"natural\" order. This is what is\n returned by TrackPy, among other things.\n - i = (0 -- Nx-1)\n - j = (0 -- Ny-1)\n - k = (0 -- Nz-1)\n\n The main job of this class is to connect raw to corrected coordinates,\n which in general depends on the physical camera setup.\n\n The base model (refered to as \"simple\" in the glsl shader), assumes camera\n perspective and scanning angle distortion.\n\n Note: the GLSL shader fragment code for this model is\n \"perspective_model_simple.glsl\" in [MUVI SOURCE]/view/shaders\n '''\n\n NAME = 'simple'\n VARIABLES = {\n \"distortion_correction_factor\" : np.zeros(3, 'd'),\n \"vol_N\": np.ones(3, 'i'),\n \"vol_L\": np.ones(3, 'd'),\n }\n\n CORRECTED_SPACES = {\"physical\", \"corrected\"}\n RAW_SPACES = {\"raw\", \"index\", \"index-xyz\"}\n SPACES = CORRECTED_SPACES | RAW_SPACES\n\n def __init__(self, info):\n self.var = self.VARIABLES.copy()\n\n self.var['vol_N'] = np.array(info.get_list('Nx', 'Ny', 'Nz'), dtype='d')\n self.var['vol_L'] = np.array(info.get_list('Lx', 'Ly', 'Lz'), dtype='d')\n\n if 'Lx' in info and 'dx' in info:\n self.var[\"distortion_correction_factor\"][0] = info['Lx'] / info['dx']\n if 'Ly' in info and 'dy' in info:\n self.var[\"distortion_correction_factor\"][1] = info['Ly'] / info['dy']\n if 'Lz' in info and 'dz' in info:\n self.var[\"distortion_correction_factor\"][2] = info['Lz'] / info['dz']\n\n def convert(self, X, input=\"index\", output=\"physical\"):\n '''Convert coordinates between spaces.\n\n Paramaters\n ----------\n X : (..., 3) shaped array like\n The input coordinates.\n\n Keywords\n --------\n input : str (default: \"index\")\n The input coordinate space\n output : str (default: \"physical\")\n The output coordinate space\n\n Returns\n -------\n X' : (..., 3) shaped array\n The output coordinates, coverted to the new space.\n '''\n\n if input not in self.SPACES:\n raise ValueError(f'input keyword should be one of {self.SPACES}')\n\n if output not in self.SPACES:\n raise ValueError(f'output keyword should be one of {self.SPACES}')\n\n X = np.asarray(X, 'd')\n if X.shape[-1] != 3:\n raise ValueError('The last axis of the input coordinates must have size 3')\n\n\n N = self.var['vol_N']\n L = self.var[\"vol_L\"]\n\n if input in self.CORRECTED_SPACES:\n if input == \"physical\":\n U = X / L + 0.5\n else: # \"corrected\"\n U = X\n if output in self.RAW_SPACES:\n Up = self.corrected_to_raw(U)\n\n else:\n if input.startswith(\"index\"):\n if input != \"index-xyz\":\n X = X[::-1]\n Up = (X + 0.5) / N\n else: # \"raw\"\n Up = X\n if output in self.CORRECTED_SPACES:\n U = self.raw_to_corrected(Up)\n\n if output == \"index\":\n return (Up * N)[::-1] - 0.5\n elif output == \"index-xyz\":\n return (Up * N) - 0.5\n elif output == \"raw\":\n return Up\n elif output == \"physical\":\n return (U - 0.5) * L\n else: # \"corrected\"\n return U\n\n def update_data_frame(self, data, input=\"index-xyz\", output=\"physical\",\n columns=('x', 'y', 'z'), output_columns=('xc', 'yc', 'zc')):\n '''Perspective correct coordinates in a pandas DataFrame or\n equivalent object. Meant primarily to convert trackpy data.\n\n Parameters\n ----------\n data : DataFrame or equivalent\n The input data\n\n Keywords\n --------\n input, output: str (default: `\"index-xyz\"`, `\"physical\"`)\n The input/output coordinate format. Note that unlike `convert`,\n the default input format is `\"index-xyz\"`, as this is what\n trackpy uses!\n columns : iterable of 3 str (default: `('x', 'y', 'z')`)\n The input columns to use for x/y/z\n output_columns : iterable of 3 str (default: `('xc', 'yc', 'zc')`)\n The output columns to use for the resulting data\n '''\n X = np.empty((len(data), 3), 'd')\n\n for i, column in enumerate(columns):\n X[:, i] = data[column]\n\n Xc = self.convert(X, input, output)\n\n for i, column in enumerate(output_columns):\n data[column] = Xc[:, i]\n\n def raw_to_corrected(self, Up):\n '''Convert from raw to corrected coordinates.\n\n Parameters\n ----------\n Up: (..., 3) shaped array, or array like\n The raw coordinates.\n\n Returns\n -------\n U: (..., 3) shaped array\n The corrected coordinates.\n '''\n\n Up = np.asarray(Up, 'd')\n if Up.shape[-1] != 3:\n raise ValueError('The last axis of the input must have size 3')\n\n U = np.empty(Up.shape, 'd')\n\n dcf = self.var['distortion_correction_factor']\n eps = 0.25 * (dcf * (1 - 2*Up))\n eps_xy = eps[..., 0] + eps[..., 1]\n eps_z = eps[..., 2]\n\n U[..., 0] = Up[..., 0] * (1 + 2*eps_z ) - eps_z * (1 + 2*eps_xy)\n U[..., 1] = Up[..., 1] * (1 + 2*eps_z ) - eps_z * (1 + 2*eps_xy)\n U[..., 2] = Up[..., 2] * (1 + 2*eps_xy) - eps_xy * (1 + 2*eps_z )\n U /= (1 - 4*eps_xy*eps_z)[..., np.newaxis]\n\n return U\n\n def corrected_to_raw(self, U):\n '''Convert from corrected to raw coordinates.\n\n Parameters\n ----------\n U: (..., 3) shaped array\n The corrected coordinates.\n\n Returns\n -------\n Up: (..., 3) shaped array, or array like\n The raw coordinates.\n '''\n\n U = np.asarray(U)\n if U.shape[-1] != 3:\n raise ValueError('The last axis of the input must have size 3')\n\n Up = np.empty(U.shape, 'd')\n\n dcf = self.var['distortion_correction_factor']\n eps = 0.25 * (dcf * (1 - 2*U))\n eps_xy = eps[..., 0] + eps[..., 1]\n eps_z = eps[..., 2]\n\n Up[..., 0] = (U[..., 0] + eps_z ) / (1 + 2*eps_z )\n Up[..., 1] = (U[..., 1] + eps_z ) / (1 + 2*eps_z )\n Up[..., 2] = (U[..., 2] + eps_xy) / (1 + 2*eps_xy)\n\n return Up\n\n\ndistortion_models = {\n \"simple\": DistortionModel,\n}\n\ndef get_distortion_model(info):\n '''Given VolumeProperties object, return an appropriate initialized\n DistortionModel object.\n\n Parameters\n ----------\n info : VolumeProperties object\n\n Returns\n -------\n model : DistortionModel or derived class\n\n **Note:** presently there is only one distortion model, so it only returns\n this. In future iterations multiple models may be supported.\n '''\n return DistortionModel(info)\n\nif __name__ == \"__main__\":\n import random\n from muvi import VolumeProperties\n\n info = VolumeProperties(\n Nx = 50,\n Ny = 75,\n Nz = 100,\n Lx = 20,\n Ly = 30,\n Lz = 40,\n dx = -15,\n dy = 10,\n dz = 10\n )\n\n dm = DistortionModel(info)\n spaces = list(DistortionModel.SPACES)\n\n # Stress test 1: convert back and forth between 2\n for n in range(10):\n seq = random.sample(spaces, k=2)\n X0 = np.random.rand(50, 3)\n\n if seq[0].startswith(\"index\"):\n X0 *= dm.var['vol_N']\n elif seq[0] == \"physical\":\n X0 *= dm.var['vol_L']\n\n X = dm.convert(X0, seq[0], seq[1])\n X = dm.convert(X, seq[1], seq[0])\n\n print(f'{seq[0]:9s} <-> {seq[1]:9s}: {(X - X0).std()}')\n\n # Stress test 2: convert back and forth along a sequence of 3\n for n in range(20):\n seq = random.sample(spaces, k=3)\n X0 = np.random.rand(50, 3)\n\n if seq[0].startswith(\"index\"):\n X0 *= dm.var['vol_N']\n elif seq[0] == \"physical\":\n X0 *= dm.var['vol_L']\n\n X = dm.convert(X0, seq[0], seq[1])\n X = dm.convert(X, seq[1], seq[2])\n X = dm.convert(X, seq[2], seq[0])\n\n print(f'{seq[0]:9s} -> {seq[1]:9s} -> {seq[2]:9s} -> {seq[0]:9s}: {(X - X0).std()}')\n"
] | [
[
"numpy.asarray",
"numpy.ones",
"numpy.random.rand",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krishnatejakk/AUTOMATA | [
"fd0cf58058e39660f88d9d6b4101e30a497f6ce2"
] | [
"cords/test.py"
] | [
"import torch\n\nt = torch.rand(50, 100)\nchunked_t = torch.chunk(t, 5, dim=0)\nnew_t = []\nfor i in range(len(chunked_t)):\n new_t.append(torch.mean(chunked_t[i], dim=0).view(1, -1))\nnew_t = torch.cat(new_t, dim=0)\nprint()"
] | [
[
"torch.chunk",
"torch.mean",
"torch.rand",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PinstripeWillie/CorePy | [
"d31a6021ce5a8cb3ad3643d630bbeabef47c07aa"
] | [
"CorePycodes/Corepy_plotting.py"
] | [
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\nimport os\nimport corepytools as corepy\nimport matplotlib.patheffects as PathEffects\nimport json\nimport pandas as pd\n\n\nCoreOfStudy = 'TiptonJonas'\n\n\nRoot_path = os.path.dirname(os.getcwd())\nCorebeta=json.load(open(os.path.join(Root_path + '/CoreData/CoreBeta/' + CoreOfStudy + '.json')))\n\n\n## I need to fix this color selection part\n#corepy.ColorPalette(Corebeta['ColorScheme']) # I want to change color to json using chemofacies_color=json.load(open('ColorScheme.json'))\n\n\ninfile = open('chemocolor','rb')\nchemofacies_color= pickle.load(infile)\ninfile.close() \n\n#chemofacies_color2=json.load(open('ColorScheme.json'))\n\nFormation_names = '-'.join(Corebeta[\"Formation\"]+Corebeta[\"Formation_2\"]) # Would like to have Formation_names defined in Corebeta\n\ncoredata = corepy.OutputXRF(Corebeta['corename'],Formation_names) # This directs to the output file\n\n# This directs to the training dataset\ncoredata=coredata.sort_values(by=[Corebeta['Depth_model']])\n\n\ndirName=corepy.RootDir(Corebeta['corename'], Formation_names) \n\n## Plots made to evaluate chemofacies results \nfig, ((ax1, ax2,), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, sharex=False, sharey=False, figsize=(10,10))\n\nsns.scatterplot(x=Corebeta[\"Elements_plotted\"][1], y=Corebeta[\"Elements_plotted\"][2], hue=Corebeta[\"RockClassification\"],data=coredata, palette=chemofacies_color,ax=ax1, edgecolor='black')\nax1.legend([])\n\nsns.scatterplot(x=Corebeta[\"Elements_plotted\"][1], y=Corebeta[\"Elements_plotted\"][3], hue=Corebeta[\"RockClassification\"],data=coredata, palette=chemofacies_color,ax=ax2, edgecolor='black')\nax2.legend([])\n\nsns.scatterplot(x=Corebeta[\"Elements_plotted\"][0], y=Corebeta[\"Elements_plotted\"][4], hue=Corebeta[\"RockClassification\"],data=coredata, palette=chemofacies_color,ax=ax3, edgecolor='black')\nax3.legend([])\n\nsns.scatterplot(x=Corebeta[\"Elements_plotted\"][1], y=Corebeta[\"Elements_plotted\"][5], hue=Corebeta[\"RockClassification\"],data=coredata, palette=chemofacies_color,ax=ax4, edgecolor='black')\nax4.legend([])\n\nplt.savefig(os.path.join(dirName + '/' + Corebeta[\"corename\"] + '_' + Formation_names + '_CrossPlot' + '.png'),dpi = 300)\n\n\n##### Plot 2 plotted with respect to depth\n\nfig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15,15),sharey=True)\n\nplt.subplot(1, 9, 1)\nfor i in range(len(coredata)):\n Q = [0, 0, coredata[Corebeta[\"RockClassification\"]][i], coredata[Corebeta[\"RockClassification\"]][i]]\n Z = [coredata[Corebeta[\"Depth_model\"]][i]+Corebeta[\"XRF_resolution\"], coredata[Corebeta[\"Depth_model\"]][i], coredata[Corebeta[\"Depth_model\"]][i], coredata[Corebeta[\"Depth_model\"]][i]+Corebeta[\"XRF_resolution\"]]\n \n plt.fill(Q, Z,c=chemofacies_color[coredata[Corebeta[\"RockClassification\"]][i]], linewidth=0.0)\n plt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n plt.xlim((0,6))\n plt.xlabel(\"RockClass\", fontsize=18)\n plt.ylabel(Corebeta[\"Depth_model\"], fontsize=18)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14) \n \n \nplt.subplot(1,9,2)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][0]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\n#plt.xlim([25,40])\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][0], fontsize=18)\nplt.xlabel(os.path.join(Corebeta[\"Elements_plotted\"][0]), fontsize=18)\n \n\nplt.subplot(1, 9, 3)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][1]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\n#plt.xlim([0,10])\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][1], fontsize=18)\n\nplt.subplot(1,9, 4)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][2]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\n#plt.xlim([0,10])\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][2], fontsize=18)\n\n\nplt.subplot(1, 9, 5)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][3]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n#plt.xlim([0,175])\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][3], fontsize=18)\n\nplt.subplot(1,9,6)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][4]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n#plt.xlim([0,250])\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][4], fontsize=18)\n\nplt.subplot(1, 9, 7)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][5]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n#plt.xlim([0,250])\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][5], fontsize=18)\n\nplt.subplot(1,9,8)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][6]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n#plt.xlim([0,500])\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][6], fontsize=18)\n\nplt.subplot(1,9,9)\ny_av = corepy.movingaverage(coredata[Corebeta[\"Elements_plotted\"][7]], Corebeta[\"moving_avg\"])\naxs=plt.plot(y_av,coredata[Corebeta[\"Depth_model\"]], color='blue')\nplt.ylim((max(coredata[Corebeta[\"Depth_model\"]]),min(coredata[Corebeta[\"Depth_model\"]])))\n#plt.xlim([0,500])\nplt.yticks([])\nplt.xticks(fontsize=14)\nplt.xlabel(Corebeta[\"Elements_plotted\"][7], fontsize=18)\n\n\nplt.savefig(os.path.join(dirName + '/' + Corebeta[\"corename\"] + '_' + Formation_names + '_Elementlog' + '.png'),dpi = 300)"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gdhdang/pytorch-ssd | [
"c80b108cb212c517fc41cf78734612a997754450"
] | [
"vision/datasets/open_images.py"
] | [
"import numpy as np\nimport pathlib\nimport cv2\nimport pandas as pd\n\n\nclass OpenImagesDataset:\n\n def __init__(self, root,\n transform=None, target_transform=None,\n dataset_type=\"train\", balance_data=False):\n self.root = pathlib.Path(root)\n self.transform = transform\n self.target_transform = target_transform\n self.dataset_type = dataset_type.lower()\n\n self.data, self.class_names, self.class_dict = self._read_data()\n self.balance_data = balance_data\n self.min_image_num = -1\n if self.balance_data:\n self.data = self._balance_data()\n self.ids = [info['image_id'] for info in self.data]\n\n self.class_stat = None\n\n def _getitem(self, index):\n image_info = self.data[index]\n image = self._read_image(image_info['image_id'])\n boxes = image_info['boxes']\n boxes[:, 0] *= image.shape[1]\n boxes[:, 1] *= image.shape[0]\n boxes[:, 2] *= image.shape[1]\n boxes[:, 3] *= image.shape[0]\n labels = image_info['labels']\n if self.transform:\n image, boxes, labels = self.transform(image, boxes, labels)\n if self.target_transform:\n boxes, labels = self.target_transform(boxes, labels)\n return image_info['image_id'], image, boxes, labels\n\n def __getitem__(self, index):\n _, image, boxes, labels = self._getitem(index)\n return image, boxes, labels\n\n def get_annotation(self, index):\n \"\"\"To conform the eval_ssd implementation that is based on the VOC dataset.\"\"\"\n image_id, image, boxes, labels = self._getitem(index)\n is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8)\n return image_id, (boxes, labels, is_difficult)\n\n def get_image(self, index):\n image_info = self.data[index]\n image = self._read_image(image_info['image_id'])\n if self.transform:\n image, _ = self.transform(image)\n return image\n\n def _read_data(self):\n annotation_file = f\"{self.root}/sub-{self.dataset_type}-annotations-bbox.csv\"\n annotations = pd.read_csv(annotation_file)\n class_names = ['BACKGROUND'] + sorted(list(annotations['ClassName'].unique()))\n class_dict = {class_name: i for i, class_name in enumerate(class_names)}\n data = []\n for image_id, group in annotations.groupby(\"ImageID\"):\n boxes = group.loc[:, [\"XMin\", \"YMin\", \"XMax\", \"YMax\"]].values.astype(np.float32)\n labels = np.array([class_dict[name] for name in group[\"ClassName\"]])\n data.append({\n 'image_id': image_id,\n 'boxes': boxes,\n 'labels': labels\n })\n return data, class_names, class_dict\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n if self.class_stat is None:\n self.class_stat = {name: 0 for name in self.class_names[1:]}\n for example in self.data:\n for class_index in example['labels']:\n class_name = self.class_names[class_index]\n self.class_stat[class_name] += 1\n content = [\"Dataset Summary:\"\n f\"Number of Images: {len(self.data)}\",\n f\"Minimum Number of Images for a Class: {self.min_image_num}\",\n \"Label Distribution:\"]\n for class_name, num in self.class_stat.items():\n content.append(f\"\\t{class_name}: {num}\")\n return \"\\n\".join(content)\n\n def _read_image(self, image_id):\n image_file = self.root / self.dataset_type / f\"{image_id}.jpg\"\n image = cv2.imread(str(image_file))\n if image.shape[2] == 1:\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n def _balance_data(self):\n label_image_indexes = [set() for _ in range(len(self.class_names))]\n for i, image in enumerate(self.data):\n for label_id in image['labels']:\n label_image_indexes[label_id].add(i)\n label_stat = [len(s) for s in label_image_indexes]\n self.min_image_num = min(label_stat[1:])\n sample_image_indexes = set()\n for image_indexes in label_image_indexes[1:]:\n image_indexes = np.array(list(image_indexes))\n sub = np.random.permutation(image_indexes)[:self.min_image_num]\n sample_image_indexes.update(sub)\n sample_data = [self.data[i] for i in sample_image_indexes]\n return sample_data\n\n\n\n\n\n"
] | [
[
"numpy.random.permutation",
"numpy.array",
"pandas.read_csv",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ramonfmir/deblurring-public | [
"2fe98ef6c29913a369da83026a8e44c2e58514e9"
] | [
"cnn_denoiser/input_data.py"
] | [
"import sys\nsys.path.insert(0, \"../..\")\n\nimport cv2\nimport os\nimport glob\nimport numpy as np\nnp.set_printoptions(threshold=np.inf)\n\nimport scipy.misc as sc\nimport random\nfrom skimage import color\nfrom functools import partial\nfrom data_generator.blurring import corrupter\nfrom data_generator.blurring import contrast\n\n# loads the image from file into array\n# The unziped files of images must exits in the relative directory\n# ../datasets/4000unlabeledLP_same_dims_scaled\n\ndef load_images(train_path, image_size_x, image_size_y):\n images = []\n img_names = []\n path = os.path.join(train_path, '*g')\n\n # Get all files in the directory\n files = glob.glob(path)\n print('Now going to read files {}'.format(path))\n for fl in files:\n image = cv2.imread(fl)\n\n images.append(image)\n\n flbase = os.path.basename(fl)\n img_names.append(flbase)\n random.shuffle(images)\n\n # images: List of images in Array form;\n # img_names: The list of corresponding image file name;\n return data_set(images)\n\nclass data_set(object):\n def __init__(self, imgs):\n self.imgs = imgs\n self.train_set_pointer = 0\n\n # next_batch retunr tuple of unblurred image and corrupted image with set\n # blurring parameters\n def next_batch(self, batch_size):\n batch_start_index = self.train_set_pointer\n batch_end_index = batch_start_index + batch_size\n if (batch_end_index> len(self.imgs)):\n batch_start_index = 0\n batch_end_index = batch_start_index + batch_size\n self.train_set_pointer = 0\n else:\n self.train_set_pointer = batch_end_index\n batch = self.imgs[batch_start_index:batch_end_index]\n\n if self.train_set_pointer == 0:\n random.shuffle(self.imgs)\n\n # Apply blur to batch originals\n original, blurred = self.blur_batch(batch)\n\n # Normalise colour\n blurred = self.normalise_batch(blurred)\n original = self.normalise_batch(original)\n return original, blurred\n\n def normalise_image(self, image):\n return np.asarray(np.multiply(image.astype(np.float32), 1.0 / 255.0))\n\n def blur_batch(self, original_batch):\n goal_batch = []\n corrupted_batch = []\n for img in original_batch:\n goal, corrupted = corrupter.corrupt(img)\n goal = self.normalise_image(goal)\n corrupted = self.normalise_image(corrupted)\n goal_batch.append(goal)\n corrupted_batch.append(corrupted)\n\n return goal_batch, corrupted_batch\n"
] | [
[
"numpy.set_printoptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhaZhaFon/asteroid_config | [
"258f8b5ba40ed2bafa53f8adee54c921f4397288"
] | [
"egs/librimix/ConvTasNet/train.py"
] | [
"\n# original codebase: https://github.com/asteroid-team/asteroid\n# modified and re-distributed by Zifeng Zhao @ Peking University\n# 2022.03\n\nimport os\nimport argparse\nimport json\n\nimport torch\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom asteroid.models import ConvTasNet\nfrom asteroid.data import LibriMix\nfrom asteroid.engine.optimizers import make_optimizer\nfrom asteroid.engine.system import System\nfrom asteroid.losses import PITLossWrapper, pairwise_neg_sisdr\n\n# Keys which are not in the conf.yml file can be added here.\n# In the hierarchical dictionary created when parsing, the key `key` can be\n# found at dic['main_args'][key]\n\n# By default train.py will use all available GPUs. The `id` option in run.sh\n# will limit the number of available GPUs for train.py .\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--exp_dir\", default=\"exp/tmp\", help=\"Full path to save best validation model\")\n# 新增\nparser.add_argument(\"--conf\", type=str, required=True, help='Path to the config(.yaml) file')\nparser.add_argument(\"--resume\", type=str, help='Folder where the checkpoints were saved')\nparser.add_argument(\"--debug\", type=bool, help='debug mode')\n\ndef main(conf):\n \n print('>> 读取数据...')\n print(' >> 读取train_set...')\n train_set = LibriMix(\n csv_dir=conf[\"data\"][\"train_dir\"],\n task=conf[\"data\"][\"task\"],\n sample_rate=conf[\"data\"][\"sample_rate\"],\n n_src=conf[\"data\"][\"n_src\"],\n segment=conf[\"data\"][\"segment\"],\n )\n print(' >> 读取train_set...')\n val_set = LibriMix(\n csv_dir=conf[\"data\"][\"valid_dir\"],\n task=conf[\"data\"][\"task\"],\n sample_rate=conf[\"data\"][\"sample_rate\"],\n n_src=conf[\"data\"][\"n_src\"],\n segment=conf[\"data\"][\"segment\"],\n )\n\n print('>> 加载DataLoader...')\n print(' >> 加载train_loader...')\n train_loader = DataLoader(\n train_set,\n shuffle=True,\n batch_size=conf[\"training\"][\"batch_size\"],\n num_workers=conf[\"training\"][\"num_workers\"],\n drop_last=True,\n )\n print(' >> 加载val_loader...')\n val_loader = DataLoader(\n val_set,\n shuffle=False,\n batch_size=conf[\"training\"][\"batch_size\"],\n num_workers=conf[\"training\"][\"num_workers\"],\n drop_last=True,\n )\n conf[\"masknet\"].update({\"n_src\": conf[\"data\"][\"n_src\"]})\n\n print('>> 建立model')\n model = ConvTasNet(\n **conf[\"filterbank\"], **conf[\"masknet\"], sample_rate=conf[\"data\"][\"sample_rate\"]\n )\n print('>> 建立optimizer/scheduler')\n optimizer = make_optimizer(model.parameters(), **conf[\"optim\"])\n # Define scheduler\n scheduler = None\n if conf[\"training\"][\"half_lr\"]:\n scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)\n # Just after instantiating, save the args. Easy loading in the future.\n exp_dir = conf[\"main_args\"][\"exp_dir\"]\n print(f'>> 实验保存到{exp_dir}')\n os.makedirs(exp_dir, exist_ok=True)\n conf_path = os.path.join(exp_dir, conf['main_args']['conf'].split('/')[-1])\n with open(conf_path, \"w\") as outfile:\n yaml.safe_dump(conf, outfile)\n\n # Define Loss function.\n print(f'>> 建立system')\n loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from=\"pw_mtx\")\n system = System(\n model=model,\n loss_func=loss_func,\n optimizer=optimizer,\n train_loader=train_loader,\n val_loader=val_loader,\n scheduler=scheduler,\n config=conf,\n )\n\n # Define callbacks\n callbacks = []\n checkpoint_dir = os.path.join(exp_dir, \"checkpoints/\")\n checkpoint = ModelCheckpoint(\n checkpoint_dir, monitor=\"val_loss\", mode=\"min\", save_top_k=1, verbose=True\n )\n callbacks.append(checkpoint)\n if conf[\"training\"][\"early_stop\"]:\n callbacks.append(EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=30, verbose=True))\n\n # Don't ask GPU if they are not available.\n gpus = -1 if torch.cuda.is_available() else None\n distributed_backend = \"ddp\" if torch.cuda.is_available() else None\n\n resume_from_checkpoint = None\n if conf['main_args']['resume']:\n resume_from_checkpoint = os.path.join(conf['main_args']['resume'], 'last.ckpt')\n print(f'>> 加载断点 {resume_from_checkpoint}...')\n max_epoch = conf[\"training\"][\"epochs\"]\n if conf['main_args']['debug']:\n max_epoch = 5\n \n print('>> 建立trainer...')\n trainer = pl.Trainer(\n max_epochs=max_epoch,\n callbacks=callbacks,\n checkpoint_callback=checkpoint,\n default_root_dir=exp_dir,\n gpus=gpus,\n distributed_backend=distributed_backend,\n limit_train_batches=1.0, # Useful for fast experiment\n gradient_clip_val=5.0,\n resume_from_checkpoint=resume_from_checkpoint,\n )\n \n print('>> 启动trianer.fit()...')\n print('')\n print('### START TRAINING ###')\n print('')\n trainer.fit(system)\n\n best_k = {k: v.item() for k, v in checkpoint.best_k_models.items()}\n with open(os.path.join(exp_dir, \"best_k_models.json\"), \"w\") as f:\n json.dump(best_k, f, indent=0)\n\n state_dict = torch.load(checkpoint.best_model_path)\n system.load_state_dict(state_dict=state_dict[\"state_dict\"])\n system.cpu()\n\n print(f'>> trainer.fit()完成 best_model保存到{os.path.join(exp_dir, \"best_model.pth\")}')\n to_save = system.model.serialize()\n to_save.update(train_set.get_infos())\n torch.save(to_save, os.path.join(exp_dir, \"best_model.pth\"))\n \n print('')\n print('### TRAINING COMPLETED ###')\n print('')\n from IPython import embed\n embed()\n \n torch.cuda.empty_cache()\n print('')\n print('done.')\n\nif __name__ == \"__main__\":\n import yaml\n from pprint import pprint\n from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict\n\n # We start with opening the config file conf.yml as a dictionary from\n # which we can create parsers. Each top level key in the dictionary defined\n # by the YAML file creates a group in the parser.\n args = parser.parse_args()\n print('')\n print(f'>> 解析超参数 {parser}')\n with open(args.conf) as f:\n def_conf = yaml.safe_load(f)\n parser = prepare_parser_from_dict(def_conf, parser=parser)\n # Arguments are then parsed into a hierarchical dictionary (instead of\n # flat, as returned by argparse) to facilitate calls to the different\n # asteroid methods (see in main).\n # plain_args is the direct output of parser.parse_args() and contains all\n # the attributes in an non-hierarchical structure. It can be useful to also\n # have it so we included it here but it is not used.\n arg_dic, plain_args = parse_args_as_dict(parser, return_plain_args=True)\n pprint(arg_dic)\n main(arg_dic)\n"
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thaolmk54/LOGNet-VQA | [
"360d43c65bc854606cd9d4c06fd8bf20eb7be46b"
] | [
"exp_gqa/train.py"
] | [
"import os, sys\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport numpy as np\nimport argparse\nimport time\nimport logging\nfrom termcolor import colored\n\nimport torch.backends.cudnn as cudnn\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')\nlogFormatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\nrootLogger = logging.getLogger()\n\nfrom DataLoader import GQADataLoader\nfrom utils import todevice, MyDataParallel\nfrom validate import validate\n\nfrom model.LOGNet import LOGNet\nfrom config import cfg, cfg_from_file\n\n\ndef train(cfg):\n logging.info(\"Create train_loader and val_loader.........\")\n train_loader_kwargs = {\n 'question_pt': cfg.dataset.train_question,\n 'vocab_json': cfg.dataset.vocab_json,\n 'object_feature': cfg.dataset.train_object_feature,\n 'spatial_feature': cfg.dataset.train_spatial_feature,\n 'img_info': cfg.dataset.img_info,\n 'train_num': cfg.train.train_num,\n 'batch_size': cfg.train.batch_size,\n 'num_workers': cfg.num_workers,\n 'shuffle': True\n }\n train_loader = GQADataLoader(**train_loader_kwargs)\n logging.info(\"number of train instances: {}\".format(len(train_loader.dataset)))\n val_loader_kwargs = {\n 'question_pt': cfg.dataset.val_question,\n 'vocab_json': cfg.dataset.vocab_json,\n 'object_feature': cfg.dataset.val_object_feature,\n 'spatial_feature': cfg.dataset.val_spatial_feature,\n 'img_info': cfg.dataset.img_info,\n 'val_num': cfg.val.val_num,\n 'batch_size': cfg.train.batch_size,\n 'num_workers': cfg.num_workers,\n 'shuffle': False\n }\n val_loader = GQADataLoader(**val_loader_kwargs)\n logging.info(\"number of val instances: {}\".format(len(val_loader.dataset)))\n\n logging.info(\"Create model.........\")\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logging.info(\"device: {}\".format(device))\n\n model_kwargs = {\n 'img_size': cfg.train.vision_dim,\n 'max_step': cfg.train.net_length,\n 'vocab': train_loader.vocab,\n }\n logging.info(\"net_len: {}\".format(model_kwargs['max_step']))\n model_kwargs_tosave = {k: v for k, v in model_kwargs.items() if k != 'vocab'}\n model = LOGNet(cfg, **model_kwargs).to(device)\n pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n logging.info('num of params: {}'.format(pytorch_total_params))\n\n logging.info(model)\n if cfg.train.glove:\n logging.info('load glove vectors')\n model.input_unit.encoder_embed.weight.data.copy_(torch.from_numpy(train_loader.glove_matrix))\n\n if torch.cuda.device_count() > 1 and cfg.multi_gpus:\n model = model.cuda()\n logging.info(\"Using {} GPUs\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model, device_ids=None)\n ################################################################\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), cfg.train.lr)\n\n start_epoch = 0\n best_val = 0\n if cfg.train.restore:\n print(\"Restore checkpoint and optimizer...\")\n ckpt = os.path.join(cfg.dataset.save_dir, 'ckpt', 'model.pt')\n ckpt = torch.load(ckpt, map_location=lambda storage, loc: storage)\n start_epoch = ckpt['epoch'] + 1\n model.load_state_dict(ckpt['state_dict'])\n optimizer.load_state_dict(ckpt['optimizer'])\n criterion = nn.CrossEntropyLoss().to(device)\n logging.info(\"Start training........\")\n for epoch in range(start_epoch, cfg.train.max_epochs):\n logging.info('>>>>>> epoch {epoch} <<<<<<'.format(epoch=colored(\"{}\".format(epoch), \"green\", attrs=[\"bold\"])))\n model.train()\n # set learning rate warmup -> refer https://github.com/KaihuaTang/VQA2.0-Recent-Approachs-2018.pytorch/blob/master/train.py\n optimizer = lr_scheduling(cfg, epoch, optimizer)\n total_acc, count = 0, 0\n total_loss, avg_loss = 0.0, 0.0\n for i, batch in enumerate(train_loader):\n sorted_indices = np.argsort(-batch[4])\n for id_ in range(len(batch)):\n batch[id_] = batch[id_][sorted_indices]\n progress = epoch + i / len(train_loader)\n _, img_ids, answers, *batch_input = [todevice(x, device) for x in batch]\n answers = answers.cuda().squeeze()\n optimizer.zero_grad()\n logits, _ = model(*batch_input, vis=False)\n ##################### loss #####################\n loss = criterion(logits, answers)\n loss.backward()\n total_loss += loss.detach()\n avg_loss = total_loss / (i + 1)\n #################################################\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=8)\n optimizer.step()\n train_acc = batch_accuracy(logits, answers)\n total_acc += train_acc.sum().item()\n count += answers.size(0)\n avg_acc = total_acc / count\n sys.stdout.write(\n \"\\rProgress = {progress} ce_loss = {ce_loss} avg_loss = {avg_loss} train_acc = {train_acc} avg_acc = {avg_acc} exp: {exp_name}\".format(\n progress=colored(\"{:.3f}\".format(progress), \"green\", attrs=['bold']),\n ce_loss=colored(\"{:.4f}\".format(loss.item()), \"blue\", attrs=['bold']),\n avg_loss=colored(\"{:.4f}\".format(avg_loss), \"red\", attrs=['bold']),\n train_acc=colored(\"{:.4f}\".format(train_acc.mean().cpu().numpy()), \"blue\", attrs=['bold']),\n avg_acc=colored(\"{:.4f}\".format(avg_acc), \"red\", attrs=['bold']), exp_name=cfg.exp_name))\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n logging.info(\"Epoch = %s avg_loss = %.3f avg_acc = %.3f\" % (epoch, avg_loss, avg_acc))\n\n if cfg.val.flag:\n output_dir = os.path.join(cfg.dataset.save_dir, 'preds')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n else:\n assert os.path.isdir(output_dir)\n valid_acc = validate(cfg, model, val_loader, device)\n if valid_acc > best_val:\n best_val = valid_acc\n # Save best model\n ckpt_dir = os.path.join(cfg.dataset.save_dir, 'ckpt')\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n else:\n assert os.path.isdir(ckpt_dir)\n save_checkpoint(epoch, model, optimizer, model_kwargs_tosave, os.path.join(ckpt_dir, 'model.pt'))\n sys.stdout.write('\\n >>>>>> save to %s <<<<<< \\n' % (ckpt_dir))\n sys.stdout.flush()\n\n logging.info('~~~~~~ Valid Accuracy: %.4f ~~~~~~~' % valid_acc)\n sys.stdout.write('~~~~~~ Valid Accuracy: {valid_acc} ~~~~~~~\\n'.format(\n valid_acc=colored(\"{:.4f}\".format(valid_acc), \"red\", attrs=['bold'])))\n sys.stdout.flush()\n\n\ndef lr_scheduling(cfg, epoch, optimizer):\n if epoch < len(cfg.train.gradual_warmup_steps) and cfg.train.schedule_method == 'warm_up':\n all_rl = []\n for param_group in optimizer.param_groups:\n param_group['lr'] = cfg.train.gradual_warmup_steps[epoch]\n all_rl.append(param_group['lr'])\n print('Epoch {:03d}:'.format(epoch), ' Learning Rate: ', set(all_rl))\n elif (epoch in range(cfg.train.epoch_decay_start, cfg.train.max_epochs, cfg.train.lr_decay_step)) and cfg.train.schedule_method \\\n == 'warm_up':\n all_rl = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= cfg.train.lr_decay_rate\n all_rl.append(param_group['lr'])\n print('Epoch {:03d}:'.format(epoch), ' Learning Rate: ', set(all_rl))\n else:\n all_rl = []\n for param_group in optimizer.param_groups:\n all_rl.append(param_group['lr'])\n print('Epoch {:03d}:'.format(epoch), ' Learning Rate: ', set(all_rl))\n\n return optimizer\n\n\ndef batch_accuracy(pred, gt):\n \"\"\" Compute the accuracies for a batch of predictions and answers \"\"\"\n pred = pred.detach().argmax(1)\n correctness = (pred == gt)\n return correctness.float()\n\n\ndef save_checkpoint(epoch, model, optimizer, model_kwargs, filename):\n state = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'model_kwargs': model_kwargs,\n }\n time.sleep(10)\n torch.save(state, filename)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default='configs/gqa.yml', type=str)\n args = parser.parse_args()\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n\n if torch.cuda.is_available() and not cfg.multi_gpus:\n torch.cuda.set_device(cfg.gpu_id)\n\n # make logging.info display into both shell and file\n cfg.dataset.save_dir = os.path.join(cfg.dataset.save_dir, cfg.exp_name)\n if not os.path.exists(cfg.dataset.save_dir):\n os.makedirs(cfg.dataset.save_dir)\n else:\n assert os.path.isdir(cfg.dataset.save_dir)\n log_file = os.path.join(cfg.dataset.save_dir, \"log\")\n if not cfg.train.restore and not os.path.exists(log_file):\n os.mkdir(log_file)\n else:\n assert os.path.isdir(log_file)\n\n fileHandler = logging.FileHandler(os.path.join(log_file, 'stdout.log'), 'w+')\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n # cfg display\n for k, v in vars(cfg).items():\n logging.info(k + ':' + str(v))\n # join absolute paths of input files\n cfg.dataset.train_question = os.path.join(cfg.dataset.data_dir, cfg.dataset.train_question)\n cfg.dataset.val_question = os.path.join(cfg.dataset.data_dir, cfg.dataset.val_question)\n cfg.dataset.vocab_json = os.path.join(cfg.dataset.data_dir, cfg.dataset.vocab_json)\n cfg.dataset.train_object_feature = os.path.join(cfg.dataset.data_dir, cfg.dataset.train_object_feature)\n cfg.dataset.val_object_feature = os.path.join(cfg.dataset.data_dir, cfg.dataset.val_object_feature)\n cfg.dataset.train_spatial_feature = os.path.join(cfg.dataset.data_dir, cfg.dataset.train_spatial_feature)\n cfg.dataset.val_spatial_feature = os.path.join(cfg.dataset.data_dir, cfg.dataset.val_spatial_feature)\n cfg.dataset.img_info = os.path.join(cfg.dataset.data_dir, cfg.dataset.img_info)\n\n # set random seed\n def seed_torch(seed=cfg.seed):\n np.random.seed(seed)\n\n os.environ['PYTHONHASHSEED'] = str(cfg.seed)\n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(cfg.seed)\n torch.cuda.manual_seed_all(cfg.seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n seed_torch()\n\n cfg.train.flag = True\n train(cfg)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.load",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.device_count",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"numpy.argsort",
"torch.nn.DataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aravindh25/srm-event | [
"c5a4b0e765ee86a7a11f0c35b66acc6eeba3006d"
] | [
"captcha.py"
] | [
"from captcha.image import ImageCaptcha\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport random\r\nimport os\r\nnumber=['0','1','2','3','4','5','6','7','8','9']\r\nMAX_CAPTCHA=6\r\nWIDTH=100\r\nHEIGHT=30\r\nimage=ImageCaptcha(width=WIDTH,height=HEIGHT,font_sizes=[30])\r\ncaptcha_text=[]\r\nfor i in range (MAX_CAPTCHA):\r\n c=random.choice(number)\r\n captcha_text.append(c)\r\n print(captcha_text)\r\ncaptcha_text=''.join(captcha_text)\r\nprint(captcha_text) \r\ncaptcha=image.generate(captcha_text)\r\ncaptcha_image=Image.open(captcha)\r\ncaptcha_image=np.array(captcha_image)\r\nimage.write(captcha_text,str(i)+'_'+captcha_text+'.png')\r\nplt.imshow(captcha_image)\r\nplt.show()"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Eyalcohenx/tonic | [
"afc15c6fa23fed4f696f68f0acf961964b0172dc"
] | [
"tonic/torch/agents/ddpg.py"
] | [
"import torch\n\nfrom tonic import explorations, logger, replays # noqa\nfrom tonic.torch import agents, models, normalizers, updaters\n\n\ndef default_model():\n return models.ActorCriticWithTargets(\n actor=models.Actor(\n encoder=models.ObservationEncoder(),\n torso=models.MLP((256, 256), torch.nn.ReLU),\n head=models.DeterministicPolicyHead()),\n critic=models.Critic(\n encoder=models.ObservationActionEncoder(),\n torso=models.MLP((256, 256), torch.nn.ReLU),\n head=models.ValueHead()),\n observation_normalizer=normalizers.MeanStd())\n\n\nclass DDPG(agents.Agent):\n '''Deep Deterministic Policy Gradient.\n DDPG: https://arxiv.org/pdf/1509.02971.pdf\n '''\n\n def __init__(\n self, model=None, replay=None, exploration=None, actor_updater=None,\n critic_updater=None\n ):\n self.model = model or default_model()\n self.replay = replay or replays.Buffer()\n self.exploration = exploration or explorations.NormalActionNoise()\n self.actor_updater = actor_updater or \\\n updaters.DeterministicPolicyGradient()\n self.critic_updater = critic_updater or \\\n updaters.DeterministicQLearning()\n\n def initialize(self, observation_space, action_space, seed=None):\n super().initialize(seed=seed)\n self.model.initialize(observation_space, action_space)\n self.replay.initialize(seed)\n self.exploration.initialize(self._policy, action_space, seed)\n self.actor_updater.initialize(self.model)\n self.critic_updater.initialize(self.model)\n self.steps = 0\n\n def step(self, observations):\n # Get actions from the actor and exploration method.\n actions = self.exploration(observations, self.steps)\n\n # Keep some values for the next update.\n self.last_observations = observations.copy()\n self.last_actions = actions.copy()\n self.steps += len(observations)\n\n return actions\n\n def test_step(self, observations):\n # Greedy actions for testing.\n return self._greedy_actions(observations).numpy()\n\n def update(self, observations, rewards, resets, terminations):\n # Store the last transitions in the replay.\n self.replay.store(\n observations=self.last_observations, actions=self.last_actions,\n next_observations=observations, rewards=rewards, resets=resets,\n terminations=terminations)\n\n # Prepare to update the normalizers.\n if self.model.observation_normalizer:\n self.model.observation_normalizer.record(self.last_observations)\n if self.model.return_normalizer:\n self.model.return_normalizer.record(rewards)\n\n # Update the model if the replay is ready.\n if self.replay.ready():\n self._update()\n\n self.exploration.update(resets)\n\n def _greedy_actions(self, observations):\n observations = torch.as_tensor(observations, dtype=torch.float32)\n with torch.no_grad():\n return self.model.actor(observations)\n\n def _policy(self, observations):\n return self._greedy_actions(observations).numpy()\n\n def _update(self):\n keys = ('observations', 'actions', 'next_observations', 'rewards',\n 'discounts')\n\n # Update both the actor and the critic multiple times.\n for batch in self.replay.get(*keys):\n batch = {k: torch.as_tensor(v) for k, v in batch.items()}\n infos = self._update_actor_critic(**batch)\n\n for key in infos:\n for k, v in infos[key].items():\n logger.store(key + '/' + k, v.numpy())\n\n # Update the normalizers.\n if self.model.observation_normalizer:\n self.model.observation_normalizer.update()\n if self.model.return_normalizer:\n self.model.return_normalizer.update()\n\n def _update_actor_critic(\n self, observations, actions, next_observations, rewards, discounts\n ):\n critic_infos = self.critic_updater(\n observations, actions, next_observations, rewards, discounts)\n actor_infos = self.actor_updater(observations)\n self.model.update_targets()\n return dict(critic=critic_infos, actor=actor_infos)\n"
] | [
[
"torch.no_grad",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
biubiubiiu/derain-toolbox | [
"1669138d1aaa72c986d70d03f9cde7dbbbb70fa1"
] | [
"mmderain/models/losses/gradient_loss.py"
] | [
"# This code is taken from https://github.com/open-mmlab/mmediting\n# Modified by Raymond Wong\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..common import conv_gauss, pyr_downsample\nfrom ..registry import LOSSES\nfrom .pixelwise_loss import charbonnier_loss, l1_loss\n\n_reduction_modes = ['none', 'mean', 'sum']\n\n\[email protected]_module()\nclass GradientLoss(nn.Module):\n \"\"\"Gradient loss.\n\n Args:\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n \"\"\"\n\n def __init__(self, loss_weight=1.0, reduction='mean'):\n super().__init__()\n self.loss_weight = loss_weight\n self.reduction = reduction\n if self.reduction not in ['none', 'mean', 'sum']:\n raise ValueError(f'Unsupported reduction mode: {self.reduction}. '\n f'Supported ones are: {_reduction_modes}')\n\n def forward(self, pred, target, weight=None):\n \"\"\"\n Args:\n pred (Tensor): of shape (N, C, H, W). Predicted tensor.\n target (Tensor): of shape (N, C, H, W). Ground truth tensor.\n weight (Tensor, optional): of shape (N, C, H, W). Element-wise\n weights. Default: None.\n \"\"\"\n kx = torch.Tensor([[1, 0, -1], [2, 0, -2],\n [1, 0, -1]]).view(1, 1, 3, 3).to(target)\n ky = torch.Tensor([[1, 2, 1], [0, 0, 0],\n [-1, -2, -1]]).view(1, 1, 3, 3).to(target)\n\n pred_grad_x = F.conv2d(pred, kx, padding=1)\n pred_grad_y = F.conv2d(pred, ky, padding=1)\n target_grad_x = F.conv2d(target, kx, padding=1)\n target_grad_y = F.conv2d(target, ky, padding=1)\n\n loss = (\n l1_loss(\n pred_grad_x, target_grad_x, weight, reduction=self.reduction) +\n l1_loss(\n pred_grad_y, target_grad_y, weight, reduction=self.reduction))\n return loss * self.loss_weight\n\n\[email protected]_module()\nclass EdgeLoss(nn.Module):\n \"\"\"Edge loss.\n\n Paper: Multi-Stage Progressive Image Restoration.\n\n Args:\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n eps (float): A value used to control the curvature near zero.\n Default: 1e-12.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n \"\"\"\n\n def __init__(self, loss_weight=1.0, eps=1e-12, reduction='mean'):\n super().__init__()\n\n if reduction not in ['none', 'mean', 'sum']:\n raise ValueError(f'Unsupported reduction mode: {reduction}. '\n f'Supported ones are: {_reduction_modes}')\n\n self.loss_weight = loss_weight\n self.eps = eps\n self.reduction = reduction\n\n k = torch.Tensor([[.05, .25, .4, .25, .05]])\n self.kernel = torch.matmul(k.T, k).unsqueeze(0).repeat(3, 1, 1, 1)\n\n def laplacian(self, x):\n gauss_kernel = self.kernel.to(x)\n filtered = conv_gauss(x, gauss_kernel)\n down = pyr_downsample(filtered)\n new_filter = torch.zeros_like(filtered)\n new_filter[:, :, ::2, ::2] = down*4\n filtered = conv_gauss(new_filter, gauss_kernel)\n diff = x - filtered\n return diff\n\n def forward(self, pred, target, weight=None):\n return self.loss_weight * charbonnier_loss(\n self.laplacian(pred),\n self.laplacian(target),\n weight=weight,\n eps=self.eps\n )\n"
] | [
[
"torch.matmul",
"torch.nn.functional.conv2d",
"torch.zeros_like",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kgullikson88/holoviews | [
"942c5ac7db46d1bc04b21a1fcf837285bbc9bde8",
"942c5ac7db46d1bc04b21a1fcf837285bbc9bde8"
] | [
"holoviews/tests/plotting/matplotlib/test_vectorfieldplot.py",
"holoviews/tests/core/data/test_spatialpandas.py"
] | [
"import numpy as np\n\nfrom holoviews.core.spaces import HoloMap\nfrom holoviews.element import VectorField\n\nfrom .test_plot import TestMPLPlot, mpl_renderer\nfrom ..utils import ParamLogStream\n\n\nclass TestVectorFieldPlot(TestMPLPlot):\n\n ###########################\n # Styling mapping #\n ###########################\n\n def test_vectorfield_color_op(self):\n vectorfield = VectorField([(0, 0, 0, 1, '#000000'), (0, 1, 0, 1,'#FF0000'), (0, 2, 0, 1,'#00FF00')],\n vdims=['A', 'M', 'color']).options(color='color')\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_facecolors(), np.array([\n [0, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1]\n ]))\n\n def test_vectorfield_color_op_update(self):\n vectorfield = HoloMap({\n 0: VectorField([(0, 0, 0, 1, '#000000'), (0, 1, 0, 1, '#FF0000'), (0, 2, 0, 1, '#00FF00')],\n vdims=['A', 'M', 'color']),\n 1: VectorField([(0, 0, 0, 1, '#0000FF'), (0, 1, 0, 1, '#00FF00'), (0, 2, 0, 1, '#FF0000')],\n vdims=['A', 'M', 'color'])}).options(color='color')\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_facecolors(), np.array([\n [0, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1]\n ]))\n plot.update((1,))\n self.assertEqual(artist.get_facecolors(), np.array([\n [0, 0, 1, 1],\n [0, 1, 0, 1],\n [1, 0, 0, 1]\n ]))\n\n def test_vectorfield_linear_color_op_update(self):\n vectorfield = HoloMap({\n 0: VectorField([(0, 0, 0, 1, 0), (0, 1, 0, 1, 1), (0, 2, 0, 1, 2)],\n vdims=['A', 'M', 'color']),\n 1: VectorField([(0, 0, 0, 1, 3.2), (0, 1, 0, 1, 2), (0, 2, 0, 1, 4)],\n vdims=['A', 'M', 'color'])}).options(color='color', framewise=True)\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array(), np.array([0, 1, 2]))\n self.assertEqual(artist.get_clim(), (0, 2))\n plot.update((1,))\n self.assertEqual(artist.get_array(), np.array([3.2, 2, 4]))\n self.assertEqual(artist.get_clim(), (2, 4))\n\n def test_vectorfield_categorical_color_op(self):\n vectorfield = VectorField([(0, 0, 0, 1, 'A'), (0, 1, 0, 1, 'B'), (0, 2, 0, 1, 'C')],\n vdims=['A', 'M', 'color']).options(color='color')\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array(), np.array([0, 1, 2]))\n self.assertEqual(artist.get_clim(), (0, 2))\n\n def test_vectorfield_alpha_op(self):\n vectorfield = VectorField([(0, 0, 0, 1, 0), (0, 1, 0, 1, 0.2), (0, 2, 0, 1, 0.7)],\n vdims=['A', 'M', 'alpha']).options(alpha='alpha')\n with self.assertRaises(Exception):\n mpl_renderer.get_plot(vectorfield)\n\n def test_vectorfield_line_width_op(self):\n vectorfield = VectorField([(0, 0, 0, 1, 1), (0, 1, 0, 1, 4), (0, 2, 0, 1, 8)],\n vdims=['A', 'M', 'line_width']).options(linewidth='line_width')\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_linewidths(), [1, 4, 8])\n\n def test_vectorfield_line_width_op_update(self):\n vectorfield = HoloMap({\n 0: VectorField([(0, 0, 0, 1, 1), (0, 1, 0, 1, 4), (0, 2, 0, 1, 8)],\n vdims=['A', 'M', 'line_width']),\n 1: VectorField([(0, 0, 0, 1, 3), (0, 1, 0, 1, 2), (0, 2, 0, 1, 5)],\n vdims=['A', 'M', 'line_width'])}).options(linewidth='line_width')\n plot = mpl_renderer.get_plot(vectorfield)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_linewidths(), [1, 4, 8])\n plot.update((1,))\n self.assertEqual(artist.get_linewidths(), [3, 2, 5])\n \n def test_vectorfield_color_index_color_clash(self):\n vectorfield = VectorField([(0, 0, 0, 1, 0), (0, 1, 0, 1, 1), (0, 2, 0, 1, 2)],\n vdims=['A', 'M', 'color']).options(color='color', color_index='A')\n with ParamLogStream() as log:\n mpl_renderer.get_plot(vectorfield)\n log_msg = log.stream.read()\n warning = (\"Cannot declare style mapping for 'color' option \"\n \"and declare a color_index; ignoring the color_index.\\n\")\n self.assertEqual(log_msg, warning)\n",
"\"\"\"\nTests for the spatialpandas interface.\n\"\"\"\nfrom unittest import SkipTest\n\nimport numpy as np\n\ntry:\n import spatialpandas\n from spatialpandas.geometry import (\n LineDtype, PointDtype, PolygonDtype,\n MultiLineDtype, MultiPointDtype, MultiPolygonDtype\n )\nexcept Exception:\n spatialpandas = None\n\ntry:\n import dask.dataframe as dd\nexcept Exception:\n dd = None\n\nfrom holoviews.core.data import (\n Dataset, SpatialPandasInterface, DaskSpatialPandasInterface\n)\nfrom holoviews.core.data.interface import DataError\nfrom holoviews.element import Path, Points, Polygons\nfrom holoviews.element.comparison import ComparisonTestCase\n\nfrom .test_multiinterface import GeomTests\n\n\nclass RoundTripTests(ComparisonTestCase):\n\n datatype = None\n\n interface = None\n\n __test__ = False\n\n def test_point_roundtrip(self):\n points = Points([{'x': 0, 'y': 1, 'z': 0},\n {'x': 1, 'y': 0, 'z': 1}], ['x', 'y'],\n 'z', datatype=[self.datatype])\n self.assertIsInstance(points.data.geometry.dtype, PointDtype)\n roundtrip = points.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Points([{'x': 0, 'y': 1, 'z': 0},\n {'x': 1, 'y': 0, 'z': 1}], ['x', 'y'],\n 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n\n def test_multi_point_roundtrip(self):\n xs = [1, 2, 3, 2]\n ys = [2, 0, 7, 4]\n points = Points([{'x': xs, 'y': ys, 'z': 0},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 1}],\n ['x', 'y'], 'z', datatype=[self.datatype])\n self.assertIsInstance(points.data.geometry.dtype, MultiPointDtype)\n roundtrip = points.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Points([{'x': xs, 'y': ys, 'z': 0},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 1}],\n ['x', 'y'], 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n\n def test_line_roundtrip(self):\n xs = [1, 2, 3]\n ys = [2, 0, 7]\n path = Path([{'x': xs, 'y': ys, 'z': 1},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 2}],\n ['x', 'y'], 'z', datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, LineDtype)\n roundtrip = path.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Path([{'x': xs, 'y': ys, 'z': 1},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 2}],\n ['x', 'y'], 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n \n def test_multi_line_roundtrip(self):\n xs = [1, 2, 3, np.nan, 6, 7, 3]\n ys = [2, 0, 7, np.nan, 7, 5, 2]\n path = Path([{'x': xs, 'y': ys, 'z': 0},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 1}],\n ['x', 'y'], 'z', datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, MultiLineDtype)\n roundtrip = path.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Path([{'x': xs, 'y': ys, 'z': 0},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 1}],\n ['x', 'y'], 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n \n def test_polygon_roundtrip(self):\n xs = [1, 2, 3]\n ys = [2, 0, 7]\n poly = Polygons([{'x': xs, 'y': ys, 'z': 0},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 1}],\n ['x', 'y'], 'z', datatype=[self.datatype])\n self.assertIsInstance(poly.data.geometry.dtype, PolygonDtype)\n roundtrip = poly.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Polygons([{'x': xs+[1], 'y': ys+[2], 'z': 0},\n {'x': [3]+xs, 'y': [7]+ys, 'z': 1}],\n ['x', 'y'], 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n \n def test_multi_polygon_roundtrip(self):\n xs = [1, 2, 3, np.nan, 6, 7, 3]\n ys = [2, 0, 7, np.nan, 7, 5, 2]\n holes = [\n [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],\n []\n ]\n poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'z': 1},\n {'x': xs[::-1], 'y': ys[::-1], 'z': 2}],\n ['x', 'y'], 'z', datatype=[self.datatype])\n self.assertIsInstance(poly.data.geometry.dtype, MultiPolygonDtype)\n roundtrip = poly.clone(datatype=['multitabular'])\n self.assertEqual(roundtrip.interface.datatype, 'multitabular')\n expected = Polygons([{'x': [1, 2, 3, 1, np.nan, 6, 3, 7, 6],\n 'y': [2, 0, 7, 2, np.nan, 7, 2, 5, 7], 'holes': holes, 'z': 1},\n {'x': [3, 7, 6, 3, np.nan, 3, 1, 2, 3],\n 'y': [2, 5, 7, 2, np.nan, 7, 2, 0, 7], 'z': 2}],\n ['x', 'y'], 'z', datatype=['multitabular'])\n self.assertEqual(roundtrip, expected)\n\n\n\nclass SpatialPandasTest(GeomTests, RoundTripTests):\n \"\"\"\n Test of the SpatialPandasInterface.\n \"\"\"\n\n datatype = 'spatialpandas'\n\n interface = SpatialPandasInterface\n\n __test__ = True\n\n def setUp(self):\n if spatialpandas is None:\n raise SkipTest('SpatialPandasInterface requires spatialpandas, skipping tests')\n super(GeomTests, self).setUp()\n\n def test_array_points_iloc_index_rows_index_cols(self):\n arrays = [np.array([(1+i, i), (2+i, i), (3+i, i)]) for i in range(2)]\n mds = Dataset(arrays, kdims=['x', 'y'], datatype=[self.datatype])\n self.assertIs(mds.interface, self.interface)\n with self.assertRaises(DataError):\n mds.iloc[3, 0]\n\n def test_point_constructor(self):\n points = Points([{'x': 0, 'y': 1}, {'x': 1, 'y': 0}], ['x', 'y'],\n datatype=[self.datatype])\n self.assertIsInstance(points.data.geometry.dtype, PointDtype)\n self.assertEqual(points.data.iloc[0, 0].flat_values, np.array([0, 1]))\n self.assertEqual(points.data.iloc[1, 0].flat_values, np.array([1, 0]))\n\n def test_multi_point_constructor(self):\n xs = [1, 2, 3, 2]\n ys = [2, 0, 7, 4]\n points = Points([{'x': xs, 'y': ys}, {'x': xs[::-1], 'y': ys[::-1]}], ['x', 'y'],\n datatype=[self.datatype])\n self.assertIsInstance(points.data.geometry.dtype, MultiPointDtype)\n self.assertEqual(points.data.iloc[0, 0].buffer_values,\n np.array([1, 2, 2, 0, 3, 7, 2, 4]))\n self.assertEqual(points.data.iloc[1, 0].buffer_values,\n np.array([2, 4, 3, 7, 2, 0, 1, 2]))\n\n def test_line_constructor(self):\n xs = [1, 2, 3]\n ys = [2, 0, 7]\n path = Path([{'x': xs, 'y': ys}, {'x': xs[::-1], 'y': ys[::-1]}],\n ['x', 'y'], datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, LineDtype)\n self.assertEqual(path.data.iloc[0, 0].buffer_values,\n np.array([1, 2, 2, 0, 3, 7]))\n self.assertEqual(path.data.iloc[1, 0].buffer_values,\n np.array([3, 7, 2, 0, 1, 2]))\n\n def test_multi_line_constructor(self):\n xs = [1, 2, 3, np.nan, 6, 7, 3]\n ys = [2, 0, 7, np.nan, 7, 5, 2]\n path = Path([{'x': xs, 'y': ys}, {'x': xs[::-1], 'y': ys[::-1]}],\n ['x', 'y'], datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, MultiLineDtype)\n self.assertEqual(path.data.iloc[0, 0].buffer_values,\n np.array([1, 2, 2, 0, 3, 7, 6, 7, 7, 5, 3, 2]))\n self.assertEqual(path.data.iloc[1, 0].buffer_values,\n np.array([3, 2, 7, 5, 6, 7, 3, 7, 2, 0, 1, 2]))\n\n def test_polygon_constructor(self):\n xs = [1, 2, 3]\n ys = [2, 0, 7]\n holes = [\n [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]\n ]\n path = Polygons([{'x': xs, 'y': ys, 'holes': holes}, {'x': xs[::-1], 'y': ys[::-1]}],\n ['x', 'y'], datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, PolygonDtype)\n self.assertEqual(path.data.iloc[0, 0].buffer_values,\n np.array([1., 2., 2., 0., 3., 7., 1., 2., 1.5, 2., 2., 3.,\n 1.6, 1.6, 1.5, 2., 2.1, 4.5, 2.5, 5., 2.3, 3.5, 2.1, 4.5]))\n self.assertEqual(path.data.iloc[1, 0].buffer_values,\n np.array([3, 7, 1, 2, 2, 0, 3, 7]))\n\n def test_multi_polygon_constructor(self):\n xs = [1, 2, 3, np.nan, 6, 7, 3]\n ys = [2, 0, 7, np.nan, 7, 5, 2]\n holes = [\n [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],\n []\n ]\n path = Polygons([{'x': xs, 'y': ys, 'holes': holes},\n {'x': xs[::-1], 'y': ys[::-1]}],\n ['x', 'y'], datatype=[self.datatype])\n self.assertIsInstance(path.data.geometry.dtype, MultiPolygonDtype)\n self.assertEqual(path.data.iloc[0, 0].buffer_values,\n np.array([1., 2., 2., 0., 3., 7., 1., 2., 1.5, 2., 2., 3., 1.6, 1.6,\n 1.5, 2., 2.1, 4.5, 2.5, 5., 2.3, 3.5, 2.1, 4.5, 6., 7., 3.,\n 2., 7., 5., 6., 7. ]))\n self.assertEqual(path.data.iloc[1, 0].buffer_values,\n np.array([3, 2, 7, 5, 6, 7, 3, 2, 3, 7, 1, 2, 2, 0, 3, 7]))\n\n\nclass DaskSpatialPandasTest(GeomTests, RoundTripTests):\n \"\"\"\n Test of the DaskSpatialPandasInterface.\n \"\"\"\n\n datatype = 'dask_spatialpandas'\n\n interface = DaskSpatialPandasInterface\n\n __test__ = True\n\n def setUp(self):\n if spatialpandas is None:\n raise SkipTest('DaskSpatialPandasInterface requires spatialpandas, skipping tests')\n elif dd is None:\n raise SkipTest('DaskSpatialPandasInterface requires dask, skipping tests')\n super(GeomTests, self).setUp()\n\n def test_array_points_iloc_index_row(self):\n raise SkipTest(\"Not supported\")\n\n def test_array_points_iloc_index_rows(self):\n raise SkipTest(\"Not supported\")\n\n def test_array_points_iloc_index_rows_index_cols(self):\n raise SkipTest(\"Not supported\")\n\n def test_array_points_iloc_slice_rows(self):\n raise SkipTest(\"Not supported\")\n\n def test_array_points_iloc_slice_rows_no_start(self):\n raise SkipTest(\"Not supported\")\n \n def test_array_points_iloc_slice_rows_no_end(self):\n raise SkipTest(\"Not supported\")\n\n def test_array_points_iloc_slice_rows_no_stop(self):\n raise SkipTest(\"Not supported\")\n\n def test_multi_polygon_iloc_index_row(self):\n raise SkipTest(\"Not supported\")\n\n def test_multi_polygon_iloc_index_rows(self):\n raise SkipTest(\"Not supported\")\n\n def test_multi_polygon_iloc_slice_rows(self):\n raise SkipTest(\"Not supported\")\n\n def test_dict_dataset_add_dimension_values(self):\n raise SkipTest(\"Not supported\")\n\n def test_sort_by_value(self):\n raise SkipTest(\"Not supported\")\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akarazeev/REDE | [
"aa76367f91c9198e0d7427aa3898fe0fecd2c306"
] | [
"utils/dataset.py"
] | [
"# File name: dataset.py\n# Original file: https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py\n# Edited by: Anton Karazeev <[email protected]>\n#\n# This file is part of REDE project (https://github.com/akarazeev/REDE)\n\nfrom __future__ import print_function\nimport torch.utils.data as data\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nimport os\nimport os.path\nimport errno\nimport numpy as np\nimport torch\nimport codecs\nimport pickle\n\n\nclass REDE(data.Dataset):\n \"\"\"`REDE` Dataset.\n Args:\n root (string): Root directory of dataset where ``processed/full.pt``\n exists.\n train (bool, optional): If True, creates dataset for training,\n otherwise from testing.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n test_size (float, optional): A portion of the whole dataset that will be used\n for testing.\n test_indices (list, optional): List of indices that correspond to samples\n from test dataset.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n urls = [\n 'https://github.com/akarazeev/REDE/raw/master/data/rede/raw/1848-5-parameters.pkl',\n 'https://github.com/akarazeev/REDE/raw/master/data/rede/raw/1848-62-111-images.pkl',\n 'https://github.com/akarazeev/REDE/raw/master/data/rede/raw/1848-frequencies_modes.pkl'\n ]\n raw_folder = 'raw'\n processed_folder = 'processed'\n training_file = 'training.pt'\n test_file = 'test.pt'\n full_file = 'full.pt'\n\n def __init__(self, root, train=True, transform=None, test_size=0.2, test_indices=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.train = train\n self.test_size = test_size\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n self.full_images, self.full_parameters = torch.load(\n os.path.join(self.root, self.processed_folder, self.full_file))\n\n self.train_indices, self.test_indices = train_test_split(np.arange(len(self.full_images)), test_size=self.test_size)\n\n if not train:\n # Test dataset.\n error_message = 'Pass indices for test from train_dataset with correct length ({})'.format(len(self.test_indices))\n if test_indices is not None:\n # `test_indices` are passed.\n if len(self.test_indices) == len(test_indices):\n self.test_indices = test_indices\n else:\n raise RuntimeError(error_message)\n else:\n raise RuntimeError(error_message)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, parameters) where parameter is a tuple of (gap,\n width1, height, radius1, width2). E.g. (2.500000e-07, 0.000001,\n 7.000000e-07, 0.000018, 8.000000e-07)\n\n \"\"\"\n if self.train:\n img, parameters = self.full_images[self.train_indices[index]], self.full_parameters[self.train_indices[index]]\n else:\n img, parameters = self.full_images[self.test_indices[index]], self.full_parameters[self.test_indices[index]]\n\n # Doing this so that it is consistent with all other datasets\n # to return a PIL Image.\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, parameters.type_as(torch.FloatTensor())\n\n def __len__(self):\n if self.train:\n return len(self.train_indices)\n else:\n return len(self.test_indices)\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.root, self.processed_folder, self.full_file))\n\n def download(self):\n \"\"\"Download the REDE data if it doesn't exist in `processed_folder` already.\"\"\"\n from six.moves import urllib\n\n if self._check_exists():\n return\n\n # Make directories.\n try:\n os.makedirs(os.path.join(self.root, self.raw_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n try:\n os.makedirs(os.path.join(self.root, self.processed_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n # Download dataset.\n for url in self.urls:\n print('Downloading ' + url)\n data = urllib.request.urlopen(url)\n filename = url.rpartition('/')[2]\n file_path = os.path.join(self.root, self.raw_folder, filename)\n with open(file_path, 'wb') as f:\n f.write(data.read())\n\n # Process and save as torch files.\n print('Processing...')\n\n full_set = (\n read_file(os.path.join(self.root, self.raw_folder, '1848-62-111-images.pkl')),\n read_file(os.path.join(self.root, self.raw_folder, '1848-5-parameters.pkl'))\n # read_file(os.path.join(self.root, self.raw_folder, '1848-frequencies_modes.pkl'))\n )\n with open(os.path.join(self.root, self.processed_folder, self.full_file), 'wb') as f:\n torch.save(full_set, f)\n\n print('Done!')\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n tmp = 'train' if self.train is True else 'test'\n fmt_str += ' Split: {} (test_size: {})\\n'.format(tmp, self.test_size)\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\ndef read_file(path):\n with open(path, 'rb') as f:\n parsed = pickle.load(f)\n return torch.from_numpy(parsed)\n"
] | [
[
"torch.FloatTensor",
"torch.from_numpy",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cuichenx/controllable-text-attribute-transfer | [
"237ed9575be387340fa71f696f80f0d41967b98f"
] | [
"method/mymodel-yelp/main.py"
] | [
"# coding: utf-8\n# requirements: pytorch: 0.4\n# Author: Ke Wang\n# Contact: wangke17[AT]pku.edu.cn\nimport time\nimport argparse\nimport math\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport numpy\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 22, 'lines.markersize': 10})\n\nfrom matplotlib import pyplot as plt\n\n# Import your model files.\nfrom model import make_model, Classifier, NoamOpt, LabelSmoothing, fgim_attack\nfrom data import prepare_data, non_pair_data_loader, get_cuda, pad_batch_seuqences, id2text_sentence,\\\n to_var, calc_bleu, load_human_answer\nfrom sklearn.manifold import TSNE\nimport pylab\n\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n######################################################################################\n# Environmental parameters\n######################################################################################\nparser = argparse.ArgumentParser(description=\"Here is your model discription.\")\nparser.add_argument('--id_pad', type=int, default=0, help='')\nparser.add_argument('--id_unk', type=int, default=1, help='')\nparser.add_argument('--id_bos', type=int, default=2, help='')\nparser.add_argument('--id_eos', type=int, default=3, help='')\n\n######################################################################################\n# File parameters\n######################################################################################\nparser.add_argument('--task', type=str, default='yelp', help='Specify datasets.')\nparser.add_argument('--word_to_id_file', type=str, default='', help='')\nparser.add_argument('--data_path', type=str, default='', help='')\n\n######################################################################################\n# Model parameters\n######################################################################################\nparser.add_argument('--word_dict_max_num', type=int, default=5, help='')\nparser.add_argument('--batch_size', type=int, default=128, help='')\nparser.add_argument('--max_sequence_length', type=int, default=60)\nparser.add_argument('--num_layers_AE', type=int, default=2)\nparser.add_argument('--transformer_model_size', type=int, default=256)\nparser.add_argument('--transformer_ff_size', type=int, default=1024)\n\nparser.add_argument('--latent_size', type=int, default=256)\nparser.add_argument('--word_dropout', type=float, default=1.0)\nparser.add_argument('--embedding_dropout', type=float, default=0.5)\nparser.add_argument('--learning_rate', type=float, default=0.001)\nparser.add_argument('--label_size', type=int, default=1)\n\n\nargs = parser.parse_args()\n\nargs.if_load_from_checkpoint = True\nargs.plot_tsne = True\n# args.if_load_from_checkpoint = True\nargs.checkpoint_name = \"1616881927\"\n\n\n######################################################################################\n# End of hyper parameters\n######################################################################################\n\n\ndef add_log(ss):\n now_time = time.strftime(\"[%Y-%m-%d %H:%M:%S]: \", time.localtime())\n print(now_time + ss)\n with open(args.log_file, 'a') as f:\n f.write(now_time + str(ss) + '\\n')\n return\n\n\ndef add_output(ss):\n with open(args.output_file, 'a') as f:\n f.write(str(ss) + '\\n')\n return\n\n\ndef preparation():\n # set model save path\n if args.if_load_from_checkpoint:\n timestamp = args.checkpoint_name\n else:\n timestamp = str(int(time.time()))\n print(\"create new model save path: %s\" % timestamp)\n args.current_save_path = 'save/%s/' % timestamp\n args.log_file = args.current_save_path + time.strftime(\"log_%Y_%m_%d_%H_%M_%S.txt\", time.localtime())\n args.output_file = args.current_save_path + time.strftime(\"output_%Y_%m_%d_%H_%M_%S.txt\", time.localtime())\n print(\"create log file at path: %s\" % args.log_file)\n\n if os.path.exists(args.current_save_path):\n add_log(\"Load checkpoint model from Path: %s\" % args.current_save_path)\n else:\n os.makedirs(args.current_save_path)\n add_log(\"Path: %s is created\" % args.current_save_path)\n\n # set task type\n if args.task == 'yelp':\n args.data_path = '../../data/yelp/processed_files/'\n elif args.task == 'amazon':\n args.data_path = '../../data/amazon/processed_files/'\n elif args.task == 'imagecaption':\n pass\n else:\n raise TypeError('Wrong task type!')\n\n # prepare data\n args.id_to_word, args.vocab_size, \\\n args.train_file_list, args.train_label_list = prepare_data(\n data_path=args.data_path, max_num=args.word_dict_max_num, task_type=args.task\n )\n return\n\n\ndef train_iters(ae_model, dis_model):\n train_data_loader = non_pair_data_loader(\n batch_size=args.batch_size, id_bos=args.id_bos,\n id_eos=args.id_eos, id_unk=args.id_unk,\n max_sequence_length=args.max_sequence_length, vocab_size=args.vocab_size\n )\n train_data_loader.create_batches(args.train_file_list, args.train_label_list, if_shuffle=True)\n add_log(\"Start train process.\")\n ae_model.train()\n dis_model.train()\n\n ae_optimizer = NoamOpt(ae_model.src_embed[0].d_model, 1, 2000,\n torch.optim.Adam(ae_model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n dis_optimizer = torch.optim.Adam(dis_model.parameters(), lr=0.0001)\n\n ae_criterion = get_cuda(LabelSmoothing(size=args.vocab_size, padding_idx=args.id_pad, smoothing=0.1))\n dis_criterion = nn.BCELoss(size_average=True)\n\n for epoch in range(200):\n print('-' * 94)\n epoch_start_time = time.time()\n for it in range(train_data_loader.num_batch):\n batch_sentences, tensor_labels, \\\n tensor_src, tensor_src_mask, tensor_tgt, tensor_tgt_y, \\\n tensor_tgt_mask, tensor_ntokens = train_data_loader.next_batch()\n\n # Forward pass\n latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask, tensor_tgt_mask)\n\n # Loss calculation\n loss_rec = ae_criterion(out.contiguous().view(-1, out.size(-1)),\n tensor_tgt_y.contiguous().view(-1)) / tensor_ntokens.data\n\n ae_optimizer.optimizer.zero_grad()\n\n loss_rec.backward()\n ae_optimizer.step()\n\n # Classifier\n dis_lop = dis_model.forward(to_var(latent.clone()))\n\n loss_dis = dis_criterion(dis_lop, tensor_labels)\n\n dis_optimizer.zero_grad()\n loss_dis.backward()\n dis_optimizer.step()\n\n if it % 200 == 0:\n add_log(\n '| epoch {:3d} | {:5d}/{:5d} batches | rec loss {:5.4f} | dis loss {:5.4f} |'.format(\n epoch, it, train_data_loader.num_batch, loss_rec, loss_dis))\n\n print(id2text_sentence(tensor_tgt_y[0], args.id_to_word))\n generator_text = ae_model.greedy_decode(latent,\n max_len=args.max_sequence_length,\n start_id=args.id_bos)\n print(id2text_sentence(generator_text[0], args.id_to_word))\n\n add_log(\n '| end of epoch {:3d} | time: {:5.2f}s |'.format(\n epoch, (time.time() - epoch_start_time)))\n # Save model\n torch.save(ae_model.state_dict(), args.current_save_path + 'ae_model_params.pkl')\n torch.save(dis_model.state_dict(), args.current_save_path + 'dis_model_params.pkl')\n return\n\n\ndef eval_iters(ae_model, dis_model):\n eval_data_loader = non_pair_data_loader(\n batch_size=1, id_bos=args.id_bos,\n id_eos=args.id_eos, id_unk=args.id_unk,\n max_sequence_length=args.max_sequence_length, vocab_size=args.vocab_size\n )\n eval_file_list = [\n args.data_path + 'sentiment.test.0',\n args.data_path + 'sentiment.test.1',\n ]\n eval_label_list = [\n [0],\n [1],\n ]\n eval_data_loader.create_batches(eval_file_list, eval_label_list, if_shuffle=False)\n gold_ans = load_human_answer(args.data_path)\n assert len(gold_ans) == eval_data_loader.num_batch\n\n\n add_log(\"Start eval process.\")\n ae_model.eval()\n dis_model.eval()\n for it in range(eval_data_loader.num_batch):\n batch_sentences, tensor_labels, \\\n tensor_src, tensor_src_mask, tensor_tgt, tensor_tgt_y, \\\n tensor_tgt_mask, tensor_ntokens = eval_data_loader.next_batch()\n\n print(\"------------%d------------\" % it)\n print(id2text_sentence(tensor_tgt_y[0], args.id_to_word))\n print(\"origin_labels\", tensor_labels)\n\n latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask, tensor_tgt_mask)\n generator_text = ae_model.greedy_decode(latent,\n max_len=args.max_sequence_length,\n start_id=args.id_bos)\n print(id2text_sentence(generator_text[0], args.id_to_word))\n\n # Define target label\n target = get_cuda(torch.tensor([[1.0]], dtype=torch.float))\n if tensor_labels[0].item() > 0.5:\n target = get_cuda(torch.tensor([[0.0]], dtype=torch.float))\n print(\"target_labels\", target)\n\n modify_text = fgim_attack(dis_model, latent, target, ae_model, args.max_sequence_length, args.id_bos,\n id2text_sentence, args.id_to_word, gold_ans[it])\n add_output(modify_text)\n return\n\ndef fgim_step(model, origin_data, target, ae_model, max_sequence_length, id_bos,\n id2text_sentence, id_to_word, gold_ans, epsilon, step):\n \"\"\"Fast Gradient Iterative Methods\"\"\"\n\n dis_criterion = nn.BCELoss(size_average=False)\n\n it = 0\n data = origin_data\n while it < step:\n\n data = to_var(data.clone()) # (batch_size, seq_length, latent_size)\n # Set requires_grad attribute of tensor. Important for Attack\n data.requires_grad = True\n output = model.forward(data)\n loss = dis_criterion(output, target)\n model.zero_grad()\n loss.backward()\n data_grad = data.grad.data\n data = data - epsilon * data_grad\n it += 1\n epsilon = epsilon * 0.9\n\n generator_id = ae_model.greedy_decode(data,\n max_len=max_sequence_length,\n start_id=id_bos)\n generator_text = id2text_sentence(generator_id[0], id_to_word)\n print(\"| It {:2d} | dis model pred {:5.4f} |\".format(it, output[0].item()))\n print(generator_text)\n\n return data, generator_id\n\ndef tsne_plot_representation(latents, labels, name):\n \"\"\"Plot a 2-D visualization of the learned representations using t-SNE.\"\"\"\n mapped_X = TSNE(n_components=2).fit_transform(latents)\n pylab.figure(figsize=(12,12))\n \n neg_X = mapped_X[numpy.where(labels == 0)]\n pos_X = mapped_X[numpy.where(labels == 1)]\n pylab.scatter(neg_X[:, 0], neg_X[:, 1], c='blue', marker='.', label='negative')\n pylab.scatter(pos_X[:, 0], pos_X[:, 1], c='red', marker='.', label='positive')\n pylab.legend()\n\n pylab.xlim(mapped_X[:, 0].min(), mapped_X[:, 0].max())\n pylab.ylim(mapped_X[:, 1].min(), mapped_X[:, 1].max())\n pylab.savefig(f\"{name}.png\")\n\ndef plot_tsne(ae_model, dis_model, epsilon=2, step=0):\n eval_data_loader = non_pair_data_loader(\n batch_size=500, id_bos=args.id_bos,\n id_eos=args.id_eos, id_unk=args.id_unk,\n max_sequence_length=args.max_sequence_length, vocab_size=args.vocab_size\n )\n eval_file_list = [\n args.data_path + 'sentiment.test.0',\n args.data_path + 'sentiment.test.1',\n ]\n eval_label_list = [\n [0],\n [1],\n ]\n eval_data_loader.create_batches(eval_file_list, eval_label_list, if_shuffle=False)\n gold_ans = load_human_answer(args.data_path)\n\n ae_model.eval()\n dis_model.eval()\n latents, labels = [], []\n it = 0\n for _ in range(eval_data_loader.num_batch):\n batch_sentences, tensor_labels, \\\n tensor_src, tensor_src_mask, tensor_tgt, tensor_tgt_y, \\\n tensor_tgt_mask, tensor_ntokens = eval_data_loader.next_batch()\n print(\"------------%d------------\" % it) \n print(id2text_sentence(tensor_tgt_y[0], args.id_to_word))\n print(\"origin_labels\", tensor_labels[0].item())\n\n latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask, tensor_tgt_mask)\n\n # Define target label\n target = get_cuda(torch.ones((tensor_labels.size(0), 1), dtype=torch.float))\n target = target - tensor_labels\n\n if step > 0:\n latent, modified_text = fgim_step(dis_model, latent, target, ae_model, args.max_sequence_length, args.id_bos,\n id2text_sentence, args.id_to_word, gold_ans[it], epsilon, step)\n \n latents.append(latent)\n labels.append(tensor_labels)\n\n it += tensor_labels.size(0)\n\n latents = torch.cat(latents, dim=0).detach().cpu().numpy()\n labels = torch.cat(labels, dim=0).squeeze().detach().cpu().numpy()\n\n tsne_plot_representation(latents, labels, f\"tsne_step{step}_eps{epsilon}\")\n\nif __name__ == '__main__':\n preparation()\n\n ae_model = get_cuda(make_model(d_vocab=args.vocab_size,\n N=args.num_layers_AE,\n d_model=args.transformer_model_size,\n latent_size=args.latent_size,\n d_ff=args.transformer_ff_size,\n ))\n dis_model = get_cuda(Classifier(latent_size=args.latent_size, output_size=args.label_size))\n\n if args.if_load_from_checkpoint:\n # Load models' params from checkpoint\n ae_model.load_state_dict(torch.load(args.current_save_path + 'ae_model_params.pkl'))\n dis_model.load_state_dict(torch.load(args.current_save_path + 'dis_model_params.pkl'))\n else:\n train_iters(ae_model, dis_model)\n\n if args.plot_tsne:\n plot_tsne(ae_model, dis_model)\n for step in range(1, 5):\n for epsilon in range(2, 7):\n plot_tsne(ae_model, dis_model, epsilon=epsilon, step = step)\n\n else:\n eval_iters(ae_model, dis_model)\n\n print(\"Done!\")\n\n"
] | [
[
"torch.load",
"torch.cat",
"torch.tensor",
"torch.nn.BCELoss",
"sklearn.manifold.TSNE",
"matplotlib.rcParams.update",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gitAnto/multisensor_filters | [
"69f738c88193bd964b551b92bdca8817990d6e14"
] | [
"src/filter_node_kf.py"
] | [
"#!/usr/bin/env python\n#*********************************************************************\n#* MIT License\n#*\n#* Copyright (c) 2015 Antonio Petitti\n#*\n#* Permission is hereby granted, free of charge, to any person obtaining a copy\n#* of this software and associated documentation files (the \"Software\"), to deal\n#* in the Software without restriction, including without limitation the rights\n#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#* copies of the Software, and to permit persons to whom the Software is\n#* furnished to do so, subject to the following conditions:\n#* \n#* The above copyright notice and this permission notice shall be included in all\n#* copies or substantial portions of the Software.\n#* \n#* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#* SOFTWARE.\n#*********************************************************************\n\nPKG = 'multisensor_filters'\nNAME = 'filter_node_kf'\n\nimport roslib; roslib.load_manifest(PKG)\nimport rospy\n\nimport numpy as np\n\nfrom geometry_msgs.msg import PointStamped\n\nfrom common.filter_node import FilterNode\nfrom filters.filter_kf import FilterKF\n\nfrom math import pow\n\n\nclass FilterNodeKF(FilterNode):\n\t\t\n\tdef __init__(self, A, H, P, Q, filterRate):\n\t\tFilterNode.__init__(self)\n\t\tself.kf = FilterKF(A, H, P, Q)\n\n\t\tself.filterRate = filterRate\n\n\tdef runFilter(self):\n\n\t\tr = rospy.Rate(self.filterRate) \n\t\twhile not rospy.is_shutdown():\n\t\t\tif self.flag_reset:\n\t\t\t\tself.kf.reset(self.getReset())\n\t\t\t\tself.flag_reset = 0\n\n\t\t\tself.kf.iteration(self.getMeasures())\n\n\t\t\tself.pose_pub_.publish(self.kf.getState())\n\t\t\t\n\t\t\tperson_point = PointStamped()\n\t\t\tperson_point.header = self.kf.getState().header\n\t\t\tperson_point.header.stamp = rospy.Time.now()\n\t\t\tperson_point.point = self.kf.getState().pose.position\n\t\t\tself.point_pub_.publish(person_point)\n\n\t\t\tself.tf_person.sendTransform((self.kf.getState().pose.position.x,self.kf.getState().pose.position.y,0),\n \t\t(self.kf.getState().pose.orientation.x,self.kf.getState().pose.orientation.y,self.kf.getState().pose.orientation.z,self.kf.getState().pose.orientation.w),\n \t\trospy.Time.now(),\n \t\t\"person_link\",\n \t\tself.kf.getState().header.frame_id)\n\n\t\t\tr.sleep()\n\ndef filter_main(argv=None):\n\trospy.init_node(NAME, anonymous=False)\n\n\tfilterRate = rospy.get_param('filterRate',5)\n\tdt = 1.0/filterRate # 1/frequency\n\tsigma_0 = rospy.get_param('sigma_0',5.0)\n\tsigma_w = pow(dt*sigma_0,2)\n\n\tA_str = rospy.get_param('A')\n\tA = np.matrix(A_str)\n\n\tH_str = rospy.get_param('H')\n\tH = np.matrix(H_str)\n\t\n\tP_str = rospy.get_param('P')\n\tP = np.matrix(P_str)\n\t\n\tQ_str = rospy.get_param('Q')\n\tQ = np.matrix(Q_str)\n\n\t#TO-DO: add safety control on matrix dimensions\n\n\tfilter_ = FilterNodeKF(A, H, P, Q, filterRate)\n\tfilter_.runFilter()\n\nif __name__ == '__main__':\n\tfilter_main()\n"
] | [
[
"numpy.matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jenshnielsen/nanotune | [
"0f2a252d1986f9a5ff155fad626658f85aec3f3e"
] | [
"nanotune/data/plotting.py"
] | [
"import copy\nimport os\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\nfrom matplotlib.colors import LinearSegmentedColormap, ListedColormap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport nanotune as nt\nfrom nanotune.data.dataset import Dataset, default_coord_names\n\nAxesTuple = Tuple[matplotlib.axes.Axes, List[matplotlib.colorbar.Colorbar]]\nplot_params_type = Dict[str, Union[str, float, int, bool, List[float]]]\n\ndefault_plot_params: plot_params_type = {\n \"backend\": \"ps\",\n # 'text.latex.preamble': [r'\\usepackage{gensymb}'],\n \"image.origin\": \"lower\",\n \"axes.labelsize\": 10,\n \"axes.linewidth\": 0.8,\n \"axes.labelweight\": 10,\n \"axes.edgecolor\": \"grey\",\n \"axes.labelpad\": 0.4,\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.labelsize\": 10,\n \"ytick.labelsize\": 10,\n \"legend.numpoints\": 1,\n \"legend.markerscale\": 1,\n # 'legend.fontsize': 'x-small',\n # 'text.fontsize': 8,\n \"font.size\": 10,\n \"lines.linewidth\": 0.6,\n \"lines.markersize\": 5,\n \"savefig.dpi\": 300,\n \"axes.grid\": False,\n \"image.interpolation\": \"nearest\",\n \"text.usetex\": False,\n \"legend.fontsize\": 10,\n \"legend.labelspacing\": 0.5,\n \"legend.framealpha\": 0.8,\n \"figure.figsize\": [7.0, 5.0],\n \"font.family\": \"serif\",\n # 'pcolor.shading': 'auto,\n}\n\nlightblue = \"#6699CC\" # (255, 153, 204)\nblue = \"#336699\" # (51, 102, 153)\ndarkblue = \"#264D73\" # (38, 77, 115)\n\ncyan = \"#33BBEE\" # (51, 187, 238)\n\nlightteal = \"#00E6E6\" # (0, 230, 230)\nteal = \"#009988\" # (0, 153, 136)\ndarkteal = \"#006666\" # (0, 102, 102)\n\norange = \"#EE7733\" # (238, 119, 51)\n\nlightred = \"#FF531A\" # (255, 83, 26)\nred = \"#CC3311\" # (204, 51, 17)\ndarkred = \"#802000\" # (128, 32, 0)\n\nmagenta = \"#EE3377\" # (238, 51, 119)\ngrey = \"#BBBBBB\" # (187, 187, 187)\n\n\ncustom_cm = LinearSegmentedColormap.from_list(\n \"vivid_colorblind\", [darkblue, cyan, teal, red, orange]\n)\nplt.register_cmap(cmap=custom_cm)\nmatplotlib.rcParams[\"image.cmap\"] = \"vivid_colorblind\"\n\ncolors = [\n blue,\n red,\n cyan,\n orange,\n teal,\n lightblue,\n lightteal,\n lightred,\n darkblue,\n darkteal,\n darkred,\n grey,\n]\nmatplotlib.rcParams[\"axes.prop_cycle\"] = cycler(color=colors)\n# ['#003399', '#FF6633', '#996699', '#99CCFF', '#EE442F', '#F4D4D4',\n# '#63ACBE', '#9C9EB5', '#FDF0F2', '#ABC3C9'])\n\ncolors_dict = {\n \"lightblue\": lightblue,\n \"blue\": blue,\n \"darkblue\": darkblue,\n \"cyan\": cyan,\n \"lightteal\": lightteal,\n \"teal\": teal,\n \"darkteal\": darkteal,\n \"orange\": orange,\n \"lightred\": lightred,\n \"red\": red,\n \"darkred\": darkred,\n \"magenta\": magenta,\n \"grey\": grey,\n}\n\n\ndef plot_dataset(\n qc_run_id: int,\n db_name: str,\n save_figures: bool = True,\n db_folder: Optional[str] = None,\n plot_filtered_data: bool = False,\n plot_params: Optional[plot_params_type] = None,\n ax: Optional[matplotlib.axes.Axes] = None,\n colorbar: Optional[matplotlib.colorbar.Colorbar] = None,\n filename: Optional[str] = None,\n file_location: Optional[str] = None,\n) -> AxesTuple:\n \"\"\"\n If to be saved and no file location specified, the figure will be saved at\n os.path.join(nt.config['db_folder'], 'tuning_results', dataset.device_name)\n in both eps and png\n \"\"\"\n\n if plot_params is None:\n plot_params = default_plot_params\n matplotlib.rcParams.update(plot_params)\n if db_folder is None:\n _, db_folder = nt.get_database()\n\n dataset = Dataset(qc_run_id, db_name, db_folder=db_folder)\n\n if plot_filtered_data:\n data = dataset.filtered_data\n else:\n data = dataset.data\n\n if ax is None:\n fig_size = copy.deepcopy(plot_params[\"figure.figsize\"])\n fig_size[1] *= len(dataset.data) * 0.8 # type: ignore\n fig, ax = plt.subplots(\n len(dataset.data),\n 1,\n squeeze=False,\n figsize=fig_size,\n )\n\n colorbars: List[matplotlib.colorbar.Colorbar] = []\n\n fig_title = dataset.guid\n\n for r_i, read_meth in enumerate(dataset.readout_methods):\n c_name = default_coord_names[\"voltage\"][0]\n voltage_x = data[read_meth][c_name].values\n signal = data[read_meth].values.T\n\n if dataset.dimensions[read_meth] == 1:\n colorbar = None\n ax[r_i, 0].plot(\n voltage_x,\n signal,\n zorder=6,\n )\n ax[r_i, 0].set_xlabel(dataset.get_plot_label(read_meth, 0))\n ax[r_i, 0].set_ylabel(dataset.get_plot_label(read_meth, 1))\n ax[r_i, 0].set_title(str(fig_title))\n\n divider = make_axes_locatable(ax[r_i, 0])\n cbar_ax = divider.append_axes(\"right\", size=\"5%\", pad=0.06)\n cbar_ax.set_facecolor(\"none\")\n for caxis in [\"top\", \"bottom\", \"left\", \"right\"]:\n cbar_ax.spines[caxis].set_linewidth(0)\n cbar_ax.set_xticks([])\n cbar_ax.set_yticks([])\n colorbars.append(colorbars)\n\n ax[r_i, 0].figure.tight_layout()\n\n elif dataset.dimensions[read_meth] == 2:\n c_name = default_coord_names[\"voltage\"][1]\n voltage_y = data[read_meth][c_name].values\n colormesh = ax[r_i, 0].pcolormesh(\n voltage_x,\n voltage_y,\n signal,\n shading=\"auto\",\n )\n\n if colorbar is not None:\n colorbars.append(\n ax[r_i, 0].figure.colorbar(\n colormesh, ax=ax[r_i, 0], cax=colorbar.ax\n )\n )\n else:\n # colorbar = fig.colorbar(colormesh, ax=ax[r_i, 0])\n divider = make_axes_locatable(ax[r_i, 0])\n cbar_ax = divider.append_axes(\"right\", size=\"5%\", pad=0.06)\n colorbars.append(\n fig.colorbar(\n colormesh,\n ax=ax[r_i, 0],\n cax=cbar_ax,\n )\n )\n colorbars[-1].set_label(\n dataset.get_plot_label(read_meth, 2),\n rotation=-270,\n )\n\n ax[r_i, 0].set_xlabel(dataset.get_plot_label(read_meth, 0))\n ax[r_i, 0].set_ylabel(dataset.get_plot_label(read_meth, 1))\n ax[r_i, 0].set_title(str(fig_title))\n\n ax[r_i, 0].figure.tight_layout()\n\n else:\n raise NotImplementedError\n\n if save_figures:\n if file_location is None:\n file_location = os.path.join(\n nt.config[\"db_folder\"], \"tuning_results\", dataset.device_name\n )\n if not os.path.exists(file_location):\n os.makedirs(file_location)\n\n if filename is None:\n filename = \"dataset_\" + str(dataset.guid)\n else:\n filename = os.path.splitext(filename)[0]\n\n path = os.path.join(file_location, filename + \".png\")\n plt.savefig(path, format=\"png\", dpi=600, bbox_inches=\"tight\")\n return ax, colorbars\n"
] | [
[
"matplotlib.pyplot.register_cmap",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.rcParams.update",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NickYi1990/tabluar_buddy | [
"d60c25c72256ae6741fb4c3cfbdf3163b7cc0ca0"
] | [
"tabular_buddy/exploration/data_exploration.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom scipy.stats import skew\nfrom IPython.display import display\n\n\n####################################################################################\n# Display function\n####################################################################################\n\n\ndef show_missing_info(df):\n \"\"\"Show missing information\n\n Parameters\n ----------\n df: pandas dataframe\n Dataframe to be computed.\n Return\n ------\n df_info: pandas dataframe\n Dataframe contains missing information.\n \"\"\"\n df_missing = df.isnull().sum().sort_values(ascending=False)\n df_info = pd.concat(\n [\n pd.Series(df_missing.index.tolist()),\n pd.Series(df_missing.values),\n pd.Series(df[df_missing.index].dtypes.apply(lambda x: str(x)).values),\n pd.Series((df_missing / df.shape[0]).values),\n ],\n axis=1,\n ignore_index=True,\n )\n df_info.columns = [\"col_name\", \"missing_count\", \"col_type\", \"missing_rate\"]\n\n return df_info\n\n\ndef show_skewnewss_info(df):\n \"\"\"Show skewness information.\n\n Parameters\n ----------\n df: pandas dataframe\n Dataframe to be computed.\n Return\n ------\n df_info: pandas dataframe\n Dataframe contains missing information.\n \"\"\"\n numeric_cols = df.columns[df.dtypes != \"object\"].tolist()\n skew_value = []\n\n for i in numeric_cols:\n skew_value += [skew(df[i])]\n df_info = pd.concat(\n [\n pd.Series(numeric_cols),\n pd.Series(df.dtypes[df.dtypes != \"object\"].apply(lambda x: str(x)).values),\n pd.Series(skew_value),\n ],\n axis=1,\n )\n df_info.columns = [\"var_name\", \"col_type\", \"skew_value\"]\n df_info.sort_values(\"skew_value\", inplace=True, ascending=False)\n return df_info\n\n\n####################################################################################\n# UNIVERSAL BLOCK\n####################################################################################\n\n\ndef ka_get_NC_col_names(data):\n \"\"\"Get column names of category and numeric\n\n Parameters\n ----------\n data: dataframe\n\n Return:\n ----------\n numerics_cols: numeric column names\n category_cols: category column names\n\n \"\"\"\n numerics_cols = data.select_dtypes(exclude=[\"O\"]).columns.tolist()\n category_cols = data.select_dtypes(include=[\"O\"]).columns.tolist()\n return numerics_cols, category_cols\n\n\ndef ka_remove_duplicate_cols(df, **kwargs):\n \"\"\"Remove duplicate columns\n\n Parameters\n ----------\n df: pandas dataframe\n Features matrix\n\n **kwargs: all parameters in drop_duplicates function\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n take_last : deprecated\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy\n Return\n ------\n new pandas dataframe with \"unique columns\" and \"removed column names\"\n\n Example\n -------\n data_1_unique, removed_cols = ka_remove_duplicate_cols(data_1[numeric_cols])\n \"\"\"\n df_unique_columns = df.T.drop_duplicates(**kwargs).T\n return df_unique_columns, set(df.columns.tolist()) - set(df_unique_columns.columns.tolist())\n\n\n####################################################################################\n# CATEGORICAL BLOCK\n####################################################################################\n\n\ndef k_cat_explore(x: pd.Series):\n unique_cnt = x.nunique()\n value_cnts = x.value_counts(dropna=False)\n\n print(\"num of unique counts: {}\".format(unique_cnt))\n plt_value_cnts(value_cnts.iloc[:20], x.name)\n display(value_cnts.iloc[:20])\n\n return unique_cnt, value_cnts\n\n\ndef plt_value_cnts(value_cnts, name):\n ax = value_cnts.plot(kind=\"barh\", figsize=(10, 7), color=\"coral\", fontsize=13)\n ax.set_title(name)\n\n # create a list to collect the plt.patches data\n totals = []\n\n # find the values and append to list\n for i in ax.patches:\n totals.append(i.get_width())\n\n # set individual bar lables using above list\n total = sum(totals)\n\n # set individual bar lables using above list\n for i in ax.patches:\n # get_width pulls left or right; get_y pushes up or down\n ax.text(\n i.get_width() * 1,\n i.get_y() + 0.3,\n str(round((i.get_width() / total) * 100, 2)) + \"%\",\n fontsize=15,\n color=\"black\",\n )\n\n # invert for largest on top\n ax.invert_yaxis()\n ax.plot()\n\n\ndef ka_C_Binary_ratio(y, positive=1):\n \"\"\"Find the positive ration of dependent variable\n\n Parameters\n ----------\n y: pandas series\n binary dependent variable\n positive: 1 or 0\n identify which value is positive\n\n Return\n ------\n float value display positive rate\n \"\"\"\n return y.value_counts()[positive] / (y.value_counts().sum())\n"
] | [
[
"scipy.stats.skew",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
BalajiAI/PyTorch-GAN | [
"7764c06e87d7fc36bf0a33cffe7eadeba9fa0f10"
] | [
"CycleGAN/train_class.py"
] | [
"import torch\nimport torch.nn as nn\nimport itertools\nfrom gan_model import Generator,Discriminator\n\n\nclass CycleGAN(nn.Module):\n \n def __init__(self,nb_features,lr,beta1,beta2,lambda_cycle,device):\n super().__init__()\n self.G_AB = Generator(nb_features).to(device)\n self.G_BA = Generator(nb_features).to(device)\n self.D_A = Discriminator(nb_features).to(device)\n self.D_B = Discriminator(nb_features).to(device)\n self.adversarial_loss = nn.MSELoss()\n self.cycle_loss = nn.L1Loss()\n self.opt_G = torch.optim.Adam(itertools.chain(self.G_AB.parameters(),self.G_BA.parameters()),lr=lr,betas=(beta1,beta2))\n self.opt_D_A = torch.optim.Adam(self.D_A.parameters(),lr=lr,betas=(beta1,beta2))\n self.opt_D_B = torch.optim.Adam(self.D_B.parameters(),lr=lr,betas=(beta1,beta2))\n self.lambda_cycle = lambda_cycle\n \n def setup_input(self,real_A,real_B):\n self.real_A = real_A.to(device)\n self.real_B = real_B.to(device)\n self.fake_A = self.G_BA(self.real_B)\n self.fake_B = self.G_AB(self.real_A)\n \n def optimize_D(self):\n self.D_A.train()\n self.D_B.train()\n real_preds = self.D_A(self.real_A)\n fake_preds = self.D_A(self.fake_A.detach())\n real_loss = self.adversarial_loss(real_preds,torch.ones_like(real_preds,device=device))\n fake_loss = self.adversarial_loss(fake_preds,torch.zeros_like(fake_preds,device=device))\n loss_D_A = (real_loss+fake_loss)/2\n \n real_preds = self.D_B(self.real_B)\n fake_preds = self.D_B(self.fake_B.detach())\n real_loss = self.adversarial_loss(real_preds,torch.ones_like(real_preds,device=device))\n fake_loss = self.adversarial_loss(fake_preds,torch.zeros_like(fake_preds,device=device))\n loss_D_B = (real_loss+fake_loss)/2\n \n self.opt_D_A.zero_grad()\n loss_D_A.backward()\n self.opt_D_A.step()\n \n self.opt_D_B.zero_grad()\n loss_D_B.backward()\n self.opt_D_B.step()\n \n return (loss_D_A+loss_D_B)/2\n \n def optimize_G(self):\n self.G_AB.train()\n self.G_BA.train()\n fake_preds_A = self.D_A(self.fake_A)\n fake_preds_B = self.D_B(self.fake_B)\n adversarial_loss_G_AB = self.adversarial_loss(fake_preds_B,torch.ones_like(fake_preds_B,device=device))\n adversarial_loss_G_BA = self.adversarial_loss(fake_preds_A,torch.ones_like(fake_preds_A,device=device))\n adversarial_loss_G = (adversarial_loss_G_AB + adversarial_loss_G_BA)/2\n \n cycle_loss_G_AB = self.cycle_loss(self.real_A,self.G_BA(self.fake_B))\n cycle_loss_G_BA = self.cycle_loss(self.real_B,self.G_AB(self.fake_A))\n cycle_loss_G = (cycle_loss_G_AB + cycle_loss_G_BA)/2\n loss_G = adversarial_loss_G + (self.lambda_cycle*cycle_loss_G)\n \n self.opt_G.zero_grad()\n loss_G.backward()\n self.opt_G.step()\n \n return loss_G\n"
] | [
[
"torch.ones_like",
"torch.zeros_like",
"torch.nn.MSELoss",
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MohamedSebaie/DeepFake-Detection-Using-Pytorch | [
"b7c5a5965d058f72a660cc33248008d0d70193a5"
] | [
"DeepFake_Detection_Pip_Package/deepfake_detection/DeepFake_Utils.py"
] | [
"import subprocess\nimport sys\nimport os\nimport gdown\n\nBlazeFaceURL = 'https://drive.google.com/uc?id=1FhJrGLBnnRw_nq0p58NUzlN2rZI9BAQa'\nAnchorsURL = 'https://drive.google.com/uc?id=1oqK5yz5ZaWjLP9O_eXhvGL9RD6hOyS6d'\npytorchCVURL = 'https://drive.google.com/uc?id=18cBhKpkRflQmyXCVjy1DtVLIUnLPo2bO'\nResNextModelURL = 'https://drive.google.com/uc?id=1siAcM9uTEoLEeqKFxq5h-hjs4NoHS0SU'\nXcePtionModelURL = 'https://drive.google.com/uc?id=1--68J6Ipny937AFjJ_AKjhFXFnvnGecV'\n\npytorchcv = 'pytorchcv-0.0.55-py2.py3-none-any.whl'\nResNextModel = 'resnext.pth'\nXcePtionModel = 'xception.pth'\nBlazeFaceModel = 'blazeface.pth'\nAnchorsNpy = 'anchors.npy'\n\ngdown.download(pytorchCVURL, pytorchcv, quiet=False)\ngdown.download(ResNextModelURL, ResNextModel, quiet=False)\ngdown.download(XcePtionModelURL, XcePtionModel, quiet=False)\ngdown.download(BlazeFaceURL, BlazeFaceModel, quiet=False)\ngdown.download(AnchorsURL, AnchorsNpy, quiet=False)\n\n\noutput = '.\\\\pytorchcv-0.0.55-py2.py3-none-any.whl'\noutput1 = '.\\\\resnext.pth'\noutput2 = '.\\\\xception.pth'\noutput3 = '.\\\\blazeface.pth'\noutput4 = '.\\\\anchors.npy'\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(output)))\n__location1__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(output1)))\n__location2__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(output2)))\n__location3__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(output3)))\n__location4__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(output4)))\n\npytorchCVPackage = os.path.join(__location__, 'pytorchcv-0.0.55-py2.py3-none-any.whl')\nResNextModelWeights = os.path.join(__location1__, 'resnext.pth')\nXcePtionModelWeights = os.path.join(__location2__, 'xception.pth')\nblazefaceM = os.path.join(__location3__, 'blazeface.pth')\nanchorsN = os.path.join(__location4__, 'anchors.npy')\n\ndef install(package):\n try:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output))\n\ninstall(pytorchCVPackage)\ninstall(\"pytube\")\ninstall(\"moviepy\")\ninstall(\"youtube-dl\")\ninstall(\"pafy\")\n\n\nimport cv2\nimport torch\nimport warnings\nimport os,time,io\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nfrom pytube import YouTube\nfrom moviepy.editor import *\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torchvision.transforms import Normalize\nfrom pytorchcv.model_provider import get_model\nwarnings.filterwarnings(\"ignore\")\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nframes_per_video = 64 \ninput_size_resnext = 224\ninput_size_xception = 150\nspeed_test = True # you have to enable this manually\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\nnormalize_transform = Normalize(mean, std)\n############################################################################\nclass FaceExtractor:\n \"\"\"Wrapper for face extraction workflow.\"\"\"\n \n def __init__(self, video_read_fn, facedet):\n \"\"\"Creates a new FaceExtractor.\n\n Arguments:\n video_read_fn: a function that takes in a path to a video file\n and returns a tuple consisting of a NumPy array with shape\n (num_frames, H, W, 3) and a list of frame indices, or None\n in case of an error\n facedet: the face detector object\n \"\"\"\n self.video_read_fn = video_read_fn\n self.facedet = facedet\n \n def process_videos(self, input_dir, filenames, video_idxs):\n \"\"\"For the specified selection of videos, grabs one or more frames \n from each video, runs the face detector, and tries to find the faces \n in each frame.\n\n The frames are split into tiles, and the tiles from the different videos \n are concatenated into a single batch. This means the face detector gets\n a batch of size len(video_idxs) * num_frames * num_tiles (usually 3).\n\n Arguments:\n input_dir: base folder where the video files are stored\n filenames: list of all video files in the input_dir\n video_idxs: one or more indices from the filenames list; these\n are the videos we'll actually process\n\n Returns a list of dictionaries, one for each frame read from each video.\n\n This dictionary contains:\n - video_idx: the video this frame was taken from\n - frame_idx: the index of the frame in the video\n - frame_w, frame_h: original dimensions of the frame\n - faces: a list containing zero or more NumPy arrays with a face crop\n - scores: a list array with the confidence score for each face crop\n\n If reading a video failed for some reason, it will not appear in the \n output array. Note that there's no guarantee a given video will actually\n have num_frames results (as soon as a reading problem is encountered for \n a video, we continue with the next video).\n \"\"\"\n target_size = self.facedet.input_size\n\n videos_read = []\n frames_read = []\n frames = []\n tiles = []\n resize_info = []\n\n for video_idx in video_idxs:\n # Read the full-size frames from this video.\n filename = filenames[video_idx]\n video_path = os.path.join(input_dir, filename)\n result = self.video_read_fn(video_path)\n\n # Error? Then skip this video.\n if result is None: continue\n\n videos_read.append(video_idx)\n\n # Keep track of the original frames (need them later).\n my_frames, my_idxs = result\n frames.append(my_frames)\n frames_read.append(my_idxs)\n\n # Split the frames into several tiles. Resize the tiles to 128x128.\n my_tiles, my_resize_info = self._tile_frames(my_frames, target_size)\n tiles.append(my_tiles)\n resize_info.append(my_resize_info)\n\n # Put all the tiles for all the frames from all the videos into\n # a single batch.\n batch = np.concatenate(tiles)\n\n # Run the face detector. The result is a list of PyTorch tensors, \n # one for each image in the batch.\n all_detections = self.facedet.predict_on_batch(batch, apply_nms=False)\n\n result = []\n offs = 0\n for v in range(len(tiles)):\n # Not all videos may have the same number of tiles, so find which \n # detections go with which video.\n num_tiles = tiles[v].shape[0]\n detections = all_detections[offs:offs + num_tiles]\n offs += num_tiles\n\n # Convert the detections from 128x128 back to the original frame size.\n detections = self._resize_detections(detections, target_size, resize_info[v])\n\n # Because we have several tiles for each frame, combine the predictions\n # from these tiles. The result is a list of PyTorch tensors, but now one\n # for each frame (rather than each tile).\n num_frames = frames[v].shape[0]\n frame_size = (frames[v].shape[2], frames[v].shape[1])\n detections = self._untile_detections(num_frames, frame_size, detections)\n\n # The same face may have been detected in multiple tiles, so filter out\n # overlapping detections. This is done separately for each frame.\n detections = self.facedet.nms(detections)\n\n for i in range(len(detections)):\n # Crop the faces out of the original frame.\n faces = self._add_margin_to_detections(detections[i], frame_size, 0.2)\n faces = self._crop_faces(frames[v][i], faces)\n\n # Add additional information about the frame and detections.\n scores = list(detections[i][:, 16].cpu().numpy())\n frame_dict = { \"video_idx\": videos_read[v],\n \"frame_idx\": frames_read[v][i],\n \"frame_w\": frame_size[0],\n \"frame_h\": frame_size[1],\n \"faces\": faces, \n \"scores\": scores }\n result.append(frame_dict)\n\n # TODO: could also add:\n # - face rectangle in original frame coordinates\n # - the keypoints (in crop coordinates)\n\n return result\n\n def process_video(self, video_path):\n \"\"\"Convenience method for doing face extraction on a single video.\"\"\"\n input_dir = os.path.dirname(video_path)\n filenames = [ os.path.basename(video_path) ]\n return self.process_videos(input_dir, filenames, [0])\n\n def _tile_frames(self, frames, target_size):\n \"\"\"Splits each frame into several smaller, partially overlapping tiles\n and resizes each tile to target_size.\n\n After a bunch of experimentation, I found that for a 1920x1080 video,\n BlazeFace works better on three 1080x1080 windows. These overlap by 420\n pixels. (Two windows also work but it's best to have a clean center crop\n in there as well.)\n\n I also tried 6 windows of size 720x720 (horizontally: 720|360, 360|720;\n vertically: 720|1200, 480|720|480, 1200|720) but that gives many false\n positives when a window has no face in it.\n\n For a video in portrait orientation (1080x1920), we only take a single\n crop of the top-most 1080 pixels. If we split up the video vertically,\n then we might get false positives again.\n\n (NOTE: Not all videos are necessarily 1080p but the code can handle this.)\n\n Arguments:\n frames: NumPy array of shape (num_frames, height, width, 3)\n target_size: (width, height)\n\n Returns:\n - a new (num_frames * N, target_size[1], target_size[0], 3) array\n where N is the number of tiles used.\n - a list [scale_w, scale_h, offset_x, offset_y] that describes how\n to map the resized and cropped tiles back to the original image \n coordinates. This is needed for scaling up the face detections \n from the smaller image to the original image, so we can take the \n face crops in the original coordinate space. \n \"\"\"\n num_frames, H, W, _ = frames.shape\n\n # Settings for 6 overlapping windows:\n # split_size = 720\n # x_step = 480\n # y_step = 360\n # num_v = 2\n # num_h = 3\n\n # Settings for 2 overlapping windows:\n # split_size = min(H, W)\n # x_step = W - split_size\n # y_step = H - split_size\n # num_v = 1\n # num_h = 2 if W > H else 1\n\n split_size = min(H, W)\n x_step = (W - split_size) // 2\n y_step = (H - split_size) // 2\n num_v = 1\n num_h = 3 if W > H else 1\n\n splits = np.zeros((num_frames * num_v * num_h, target_size[1], target_size[0], 3), dtype=np.uint8)\n\n i = 0\n for f in range(num_frames):\n y = 0\n for v in range(num_v):\n x = 0\n for h in range(num_h):\n crop = frames[f, y:y+split_size, x:x+split_size, :]\n splits[i] = cv2.resize(crop, target_size, interpolation=cv2.INTER_AREA)\n x += x_step\n i += 1\n y += y_step\n\n resize_info = [split_size / target_size[0], split_size / target_size[1], 0, 0]\n return splits, resize_info\n\n def _resize_detections(self, detections, target_size, resize_info):\n \"\"\"Converts a list of face detections back to the original \n coordinate system.\n\n Arguments:\n detections: a list containing PyTorch tensors of shape (num_faces, 17) \n target_size: (width, height)\n resize_info: [scale_w, scale_h, offset_x, offset_y]\n \"\"\"\n projected = []\n target_w, target_h = target_size\n scale_w, scale_h, offset_x, offset_y = resize_info\n\n for i in range(len(detections)):\n detection = detections[i].clone()\n\n # ymin, xmin, ymax, xmax\n for k in range(2):\n detection[:, k*2 ] = (detection[:, k*2 ] * target_h - offset_y) * scale_h\n detection[:, k*2 + 1] = (detection[:, k*2 + 1] * target_w - offset_x) * scale_w\n\n # keypoints are x,y\n for k in range(2, 8):\n detection[:, k*2 ] = (detection[:, k*2 ] * target_w - offset_x) * scale_w\n detection[:, k*2 + 1] = (detection[:, k*2 + 1] * target_h - offset_y) * scale_h\n\n projected.append(detection)\n\n return projected \n \n def _untile_detections(self, num_frames, frame_size, detections):\n \"\"\"With N tiles per frame, there also are N times as many detections.\n This function groups together the detections for a given frame; it is\n the complement to tile_frames().\n \"\"\"\n combined_detections = []\n\n W, H = frame_size\n split_size = min(H, W)\n x_step = (W - split_size) // 2\n y_step = (H - split_size) // 2\n num_v = 1\n num_h = 3 if W > H else 1\n\n i = 0\n for f in range(num_frames):\n detections_for_frame = []\n y = 0\n for v in range(num_v):\n x = 0\n for h in range(num_h):\n # Adjust the coordinates based on the split positions.\n detection = detections[i].clone()\n if detection.shape[0] > 0:\n for k in range(2):\n detection[:, k*2 ] += y\n detection[:, k*2 + 1] += x\n for k in range(2, 8):\n detection[:, k*2 ] += x\n detection[:, k*2 + 1] += y\n\n detections_for_frame.append(detection)\n x += x_step\n i += 1\n y += y_step\n\n combined_detections.append(torch.cat(detections_for_frame))\n\n return combined_detections\n \n def _add_margin_to_detections(self, detections, frame_size, margin=0.2):\n \"\"\"Expands the face bounding box.\n\n NOTE: The face detections often do not include the forehead, which\n is why we use twice the margin for ymin.\n\n Arguments:\n detections: a PyTorch tensor of shape (num_detections, 17)\n frame_size: maximum (width, height)\n margin: a percentage of the bounding box's height\n\n Returns a PyTorch tensor of shape (num_detections, 17).\n \"\"\"\n offset = torch.round(margin * (detections[:, 2] - detections[:, 0]))\n detections = detections.clone()\n detections[:, 0] = torch.clamp(detections[:, 0] - offset*2, min=0) # ymin\n detections[:, 1] = torch.clamp(detections[:, 1] - offset, min=0) # xmin\n detections[:, 2] = torch.clamp(detections[:, 2] + offset, max=frame_size[1]) # ymax\n detections[:, 3] = torch.clamp(detections[:, 3] + offset, max=frame_size[0]) # xmax\n return detections\n \n def _crop_faces(self, frame, detections):\n \"\"\"Copies the face region(s) from the given frame into a set\n of new NumPy arrays.\n\n Arguments:\n frame: a NumPy array of shape (H, W, 3)\n detections: a PyTorch tensor of shape (num_detections, 17)\n\n Returns a list of NumPy arrays, one for each face crop. If there\n are no faces detected for this frame, returns an empty list.\n \"\"\"\n faces = []\n for i in range(len(detections)):\n ymin, xmin, ymax, xmax = detections[i, :4].cpu().numpy().astype(np.int)\n face = frame[ymin:ymax, xmin:xmax, :]\n faces.append(face)\n return faces\n\n def remove_large_crops(self, crops, pct=0.1):\n \"\"\"Removes faces from the results if they take up more than X% \n of the video. Such a face is likely a false positive.\n \n This is an optional postprocessing step. Modifies the original\n data structure.\n \n Arguments:\n crops: a list of dictionaries with face crop data\n pct: maximum portion of the frame a crop may take up\n \"\"\"\n for i in range(len(crops)):\n frame_data = crops[i]\n video_area = frame_data[\"frame_w\"] * frame_data[\"frame_h\"]\n faces = frame_data[\"faces\"]\n scores = frame_data[\"scores\"]\n new_faces = []\n new_scores = []\n for j in range(len(faces)):\n face = faces[j]\n face_H, face_W, _ = face.shape\n face_area = face_H * face_W\n if face_area / video_area < 0.1:\n new_faces.append(face)\n new_scores.append(scores[j])\n frame_data[\"faces\"] = new_faces\n frame_data[\"scores\"] = new_scores\n\n def keep_only_best_face(self, crops):\n \"\"\"For each frame, only keeps the face with the highest confidence. \n \n This gets rid of false positives, but obviously is problematic for \n videos with two people!\n\n This is an optional postprocessing step. Modifies the original\n data structure.\n \"\"\"\n for i in range(len(crops)):\n frame_data = crops[i]\n if len(frame_data[\"faces\"]) > 0:\n frame_data[\"faces\"] = frame_data[\"faces\"][:1]\n frame_data[\"scores\"] = frame_data[\"scores\"][:1]\n\n # TODO: def filter_likely_false_positives(self, crops):\n # if only some frames have more than 1 face, it's likely a false positive\n # if most frames have more than 1 face, it's probably two people\n # so find the % of frames with > 1 face; if > 0.X, keep the two best faces\n\n # TODO: def filter_by_score(self, crops, min_score) to remove any\n # crops with a confidence score lower than min_score\n\n # TODO: def sort_by_histogram(self, crops) for videos with 2 people.\n \n\n\nclass VideoReader:\n \"\"\"Helper class for reading one or more frames from a video file.\"\"\"\n\n def __init__(self, verbose=True, insets=(0, 0)):\n \"\"\"Creates a new VideoReader.\n\n Arguments:\n verbose: whether to print warnings and error messages\n insets: amount to inset the image by, as a percentage of \n (width, height). This lets you \"zoom in\" to an image \n to remove unimportant content around the borders. \n Useful for face detection, which may not work if the \n faces are too small.\n \"\"\"\n self.verbose = verbose\n self.insets = insets\n\n def read_frames(self, path, num_frames, jitter=0, seed=None):\n \"\"\"Reads frames that are always evenly spaced throughout the video.\n\n Arguments:\n path: the video file\n num_frames: how many frames to read, -1 means the entire video\n (warning: this will take up a lot of memory!)\n jitter: if not 0, adds small random offsets to the frame indices;\n this is useful so we don't always land on even or odd frames\n seed: random seed for jittering; if you set this to a fixed value,\n you probably want to set it only on the first video \n \"\"\"\n assert num_frames > 0\n\n capture = cv2.VideoCapture(path)\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n if frame_count <= 0: return None\n\n frame_idxs = np.linspace(0, frame_count - 1, num_frames, endpoint=True, dtype=np.int)\n if jitter > 0:\n np.random.seed(seed)\n jitter_offsets = np.random.randint(-jitter, jitter, len(frame_idxs))\n frame_idxs = np.clip(frame_idxs + jitter_offsets, 0, frame_count - 1)\n\n result = self._read_frames_at_indices(path, capture, frame_idxs)\n capture.release()\n return result\n\n def read_random_frames(self, path, num_frames, seed=None):\n \"\"\"Picks the frame indices at random.\n \n Arguments:\n path: the video file\n num_frames: how many frames to read, -1 means the entire video\n (warning: this will take up a lot of memory!)\n \"\"\"\n assert num_frames > 0\n np.random.seed(seed)\n\n capture = cv2.VideoCapture(path)\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n if frame_count <= 0: return None\n\n frame_idxs = sorted(np.random.choice(np.arange(0, frame_count), num_frames))\n result = self._read_frames_at_indices(path, capture, frame_idxs)\n\n capture.release()\n return result\n\n def read_frames_at_indices(self, path, frame_idxs):\n \"\"\"Reads frames from a video and puts them into a NumPy array.\n\n Arguments:\n path: the video file\n frame_idxs: a list of frame indices. Important: should be\n sorted from low-to-high! If an index appears multiple\n times, the frame is still read only once.\n\n Returns:\n - a NumPy array of shape (num_frames, height, width, 3)\n - a list of the frame indices that were read\n\n Reading stops if loading a frame fails, in which case the first\n dimension returned may actually be less than num_frames.\n\n Returns None if an exception is thrown for any reason, or if no\n frames were read.\n \"\"\"\n assert len(frame_idxs) > 0\n capture = cv2.VideoCapture(path)\n result = self._read_frames_at_indices(path, capture, frame_idxs)\n capture.release()\n return result\n\n def _read_frames_at_indices(self, path, capture, frame_idxs):\n try:\n frames = []\n idxs_read = []\n for frame_idx in range(frame_idxs[0], frame_idxs[-1] + 1):\n # Get the next frame, but don't decode if we're not using it.\n ret = capture.grab()\n if not ret:\n if self.verbose:\n print(\"Error grabbing frame %d from movie %s\" % (frame_idx, path))\n break\n\n # Need to look at this frame?\n current = len(idxs_read)\n if frame_idx == frame_idxs[current]:\n ret, frame = capture.retrieve()\n if not ret or frame is None:\n if self.verbose:\n print(\"Error retrieving frame %d from movie %s\" % (frame_idx, path))\n break\n\n frame = self._postprocess_frame(frame)\n frames.append(frame)\n idxs_read.append(frame_idx)\n\n if len(frames) > 0:\n return np.stack(frames), idxs_read\n if self.verbose:\n print(\"No frames read from movie %s\" % path)\n return None\n except:\n if self.verbose:\n print(\"Exception while reading movie %s\" % path)\n return None \n\n def read_middle_frame(self, path):\n \"\"\"Reads the frame from the middle of the video.\"\"\"\n capture = cv2.VideoCapture(path)\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n result = self._read_frame_at_index(path, capture, frame_count // 2)\n capture.release()\n return result\n\n def read_frame_at_index(self, path, frame_idx):\n \"\"\"Reads a single frame from a video.\n \n If you just want to read a single frame from the video, this is more\n efficient than scanning through the video to find the frame. However,\n for reading multiple frames it's not efficient.\n \n My guess is that a \"streaming\" approach is more efficient than a \n \"random access\" approach because, unless you happen to grab a keyframe, \n the decoder still needs to read all the previous frames in order to \n reconstruct the one you're asking for.\n\n Returns a NumPy array of shape (1, H, W, 3) and the index of the frame,\n or None if reading failed.\n \"\"\"\n capture = cv2.VideoCapture(path)\n result = self._read_frame_at_index(path, capture, frame_idx)\n capture.release()\n return result\n\n def _read_frame_at_index(self, path, capture, frame_idx):\n capture.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)\n ret, frame = capture.read() \n if not ret or frame is None:\n if self.verbose:\n print(\"Error retrieving frame %d from movie %s\" % (frame_idx, path))\n return None\n else:\n frame = self._postprocess_frame(frame)\n return np.expand_dims(frame, axis=0), [frame_idx]\n \n def _postprocess_frame(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n if self.insets[0] > 0:\n W = frame.shape[1]\n p = int(W * self.insets[0])\n frame = frame[:, p:-p, :]\n\n if self.insets[1] > 0:\n H = frame.shape[1]\n q = int(H * self.insets[1])\n frame = frame[q:-q, :, :]\n\n return frame\n\nclass BlazeBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1):\n super(BlazeBlock, self).__init__()\n\n self.stride = stride\n self.channel_pad = out_channels - in_channels\n\n # TFLite uses slightly different padding than PyTorch \n # on the depthwise conv layer when the stride is 2.\n if stride == 2:\n self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)\n padding = 0\n else:\n padding = (kernel_size - 1) // 2\n\n self.convs = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=in_channels, \n kernel_size=kernel_size, stride=stride, padding=padding, \n groups=in_channels, bias=True),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, \n kernel_size=1, stride=1, padding=0, bias=True),\n )\n\n self.act = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.stride == 2:\n h = F.pad(x, (0, 2, 0, 2), \"constant\", 0)\n x = self.max_pool(x)\n else:\n h = x\n\n if self.channel_pad > 0:\n x = F.pad(x, (0, 0, 0, 0, 0, self.channel_pad), \"constant\", 0)\n\n return self.act(self.convs(h) + x)\n\n\nclass BlazeFace(nn.Module):\n \"\"\"The BlazeFace face detection model from MediaPipe.\n \n The version from MediaPipe is simpler than the one in the paper; \n it does not use the \"double\" BlazeBlocks.\n\n Because we won't be training this model, it doesn't need to have\n batchnorm layers. These have already been \"folded\" into the conv \n weights by TFLite.\n\n The conversion to PyTorch is fairly straightforward, but there are \n some small differences between TFLite and PyTorch in how they handle\n padding on conv layers with stride 2.\n\n This version works on batches, while the MediaPipe version can only\n handle a single image at a time.\n\n Based on code from https://github.com/tkat0/PyTorch_BlazeFace/ and\n https://github.com/google/mediapipe/\n \"\"\"\n input_size = (128, 128)\n \n def __init__(self):\n super(BlazeFace, self).__init__()\n\n # These are the settings from the MediaPipe example graph\n # mediapipe/graphs/face_detection/face_detection_mobile_gpu.pbtxt\n self.num_classes = 1\n self.num_anchors = 896\n self.num_coords = 16\n self.score_clipping_thresh = 100.0\n self.x_scale = 128.0\n self.y_scale = 128.0\n self.h_scale = 128.0\n self.w_scale = 128.0\n self.min_score_thresh = 0.75\n self.min_suppression_threshold = 0.3\n\n self._define_layers()\n\n def _define_layers(self):\n self.backbone1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=24, kernel_size=5, stride=2, padding=0, bias=True),\n nn.ReLU(inplace=True),\n\n BlazeBlock(24, 24),\n BlazeBlock(24, 28),\n BlazeBlock(28, 32, stride=2),\n BlazeBlock(32, 36),\n BlazeBlock(36, 42),\n BlazeBlock(42, 48, stride=2),\n BlazeBlock(48, 56),\n BlazeBlock(56, 64),\n BlazeBlock(64, 72),\n BlazeBlock(72, 80),\n BlazeBlock(80, 88),\n )\n \n self.backbone2 = nn.Sequential(\n BlazeBlock(88, 96, stride=2),\n BlazeBlock(96, 96),\n BlazeBlock(96, 96),\n BlazeBlock(96, 96),\n BlazeBlock(96, 96),\n )\n\n self.classifier_8 = nn.Conv2d(88, 2, 1, bias=True)\n self.classifier_16 = nn.Conv2d(96, 6, 1, bias=True)\n\n self.regressor_8 = nn.Conv2d(88, 32, 1, bias=True)\n self.regressor_16 = nn.Conv2d(96, 96, 1, bias=True)\n \n def forward(self, x):\n # TFLite uses slightly different padding on the first conv layer\n # than PyTorch, so do it manually.\n x = F.pad(x, (1, 2, 1, 2), \"constant\", 0)\n \n b = x.shape[0] # batch size, needed for reshaping later\n\n x = self.backbone1(x) # (b, 88, 16, 16)\n h = self.backbone2(x) # (b, 96, 8, 8)\n \n # Note: Because PyTorch is NCHW but TFLite is NHWC, we need to\n # permute the output from the conv layers before reshaping it.\n \n c1 = self.classifier_8(x) # (b, 2, 16, 16)\n c1 = c1.permute(0, 2, 3, 1) # (b, 16, 16, 2)\n c1 = c1.reshape(b, -1, 1) # (b, 512, 1)\n\n c2 = self.classifier_16(h) # (b, 6, 8, 8)\n c2 = c2.permute(0, 2, 3, 1) # (b, 8, 8, 6)\n c2 = c2.reshape(b, -1, 1) # (b, 384, 1)\n\n c = torch.cat((c1, c2), dim=1) # (b, 896, 1)\n\n r1 = self.regressor_8(x) # (b, 32, 16, 16)\n r1 = r1.permute(0, 2, 3, 1) # (b, 16, 16, 32)\n r1 = r1.reshape(b, -1, 16) # (b, 512, 16)\n\n r2 = self.regressor_16(h) # (b, 96, 8, 8)\n r2 = r2.permute(0, 2, 3, 1) # (b, 8, 8, 96)\n r2 = r2.reshape(b, -1, 16) # (b, 384, 16)\n\n r = torch.cat((r1, r2), dim=1) # (b, 896, 16)\n return [r, c]\n\n def _device(self):\n \"\"\"Which device (CPU or GPU) is being used by this model?\"\"\"\n return self.classifier_8.weight.device\n \n def load_weights(self, path):\n self.load_state_dict(torch.load(path))\n self.eval() \n \n def load_anchors(self, path):\n self.anchors = torch.tensor(np.load(path), dtype=torch.float32, device=self._device())\n assert(self.anchors.ndimension() == 2)\n assert(self.anchors.shape[0] == self.num_anchors)\n assert(self.anchors.shape[1] == 4)\n\n def _preprocess(self, x):\n \"\"\"Converts the image pixels to the range [-1, 1].\"\"\"\n return x.float() / 127.5 - 1.0\n\n def predict_on_image(self, img):\n \"\"\"Makes a prediction on a single image.\n\n Arguments:\n img: a NumPy array of shape (H, W, 3) or a PyTorch tensor of\n shape (3, H, W). The image's height and width should be \n 128 pixels.\n\n Returns:\n A tensor with face detections.\n \"\"\"\n if isinstance(img, np.ndarray):\n img = torch.from_numpy(img).permute((2, 0, 1))\n\n return self.predict_on_batch(img.unsqueeze(0))[0]\n\n def predict_on_batch(self, x, apply_nms=True):\n \"\"\"Makes a prediction on a batch of images.\n\n Arguments:\n x: a NumPy array of shape (b, H, W, 3) or a PyTorch tensor of\n shape (b, 3, H, W). The height and width should be 128 pixels.\n apply_nms: pass False to not apply non-max suppression\n\n Returns:\n A list containing a tensor of face detections for each image in \n the batch. If no faces are found for an image, returns a tensor\n of shape (0, 17).\n\n Each face detection is a PyTorch tensor consisting of 17 numbers:\n - ymin, xmin, ymax, xmax\n - x,y-coordinates for the 6 keypoints\n - confidence score\n \"\"\"\n if isinstance(x, np.ndarray):\n x = torch.from_numpy(x).permute((0, 3, 1, 2))\n\n assert x.shape[1] == 3\n assert x.shape[2] == 128\n assert x.shape[3] == 128\n\n # 1. Preprocess the images into tensors:\n x = x.to(self._device())\n x = self._preprocess(x)\n\n # 2. Run the neural network:\n with torch.no_grad():\n out = self.__call__(x)\n\n # 3. Postprocess the raw predictions:\n detections = self._tensors_to_detections(out[0], out[1], self.anchors)\n\n # 4. Non-maximum suppression to remove overlapping detections:\n return self.nms(detections) if apply_nms else detections\n\n def nms(self, detections):\n \"\"\"Filters out overlapping detections.\"\"\"\n filtered_detections = []\n for i in range(len(detections)):\n faces = self._weighted_non_max_suppression(detections[i])\n faces = torch.stack(faces) if len(faces) > 0 else torch.zeros((0, 17), device=self._device())\n filtered_detections.append(faces)\n\n return filtered_detections\n \n def _tensors_to_detections(self, raw_box_tensor, raw_score_tensor, anchors):\n \"\"\"The output of the neural network is a tensor of shape (b, 896, 16)\n containing the bounding box regressor predictions, as well as a tensor \n of shape (b, 896, 1) with the classification confidences.\n\n This function converts these two \"raw\" tensors into proper detections.\n Returns a list of (num_detections, 17) tensors, one for each image in\n the batch.\n\n This is based on the source code from:\n mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc\n mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.proto\n \"\"\"\n assert raw_box_tensor.ndimension() == 3\n assert raw_box_tensor.shape[1] == self.num_anchors\n assert raw_box_tensor.shape[2] == self.num_coords\n\n assert raw_score_tensor.ndimension() == 3\n assert raw_score_tensor.shape[1] == self.num_anchors\n assert raw_score_tensor.shape[2] == self.num_classes\n\n assert raw_box_tensor.shape[0] == raw_score_tensor.shape[0]\n \n detection_boxes = self._decode_boxes(raw_box_tensor, anchors)\n \n thresh = self.score_clipping_thresh\n raw_score_tensor = raw_score_tensor.clamp(-thresh, thresh)\n detection_scores = raw_score_tensor.sigmoid().squeeze(dim=-1)\n \n # Note: we stripped off the last dimension from the scores tensor\n # because there is only has one class. Now we can simply use a mask\n # to filter out the boxes with too low confidence.\n mask = detection_scores >= self.min_score_thresh\n\n # Because each image from the batch can have a different number of\n # detections, process them one at a time using a loop.\n output_detections = []\n for i in range(raw_box_tensor.shape[0]):\n boxes = detection_boxes[i, mask[i]]\n scores = detection_scores[i, mask[i]].unsqueeze(dim=-1)\n output_detections.append(torch.cat((boxes, scores), dim=-1))\n\n return output_detections\n\n def _decode_boxes(self, raw_boxes, anchors):\n \"\"\"Converts the predictions into actual coordinates using\n the anchor boxes. Processes the entire batch at once.\n \"\"\"\n boxes = torch.zeros_like(raw_boxes)\n\n x_center = raw_boxes[..., 0] / self.x_scale * anchors[:, 2] + anchors[:, 0]\n y_center = raw_boxes[..., 1] / self.y_scale * anchors[:, 3] + anchors[:, 1]\n\n w = raw_boxes[..., 2] / self.w_scale * anchors[:, 2]\n h = raw_boxes[..., 3] / self.h_scale * anchors[:, 3]\n\n boxes[..., 0] = y_center - h / 2. # ymin\n boxes[..., 1] = x_center - w / 2. # xmin\n boxes[..., 2] = y_center + h / 2. # ymax\n boxes[..., 3] = x_center + w / 2. # xmax\n\n for k in range(6):\n offset = 4 + k*2\n keypoint_x = raw_boxes[..., offset ] / self.x_scale * anchors[:, 2] + anchors[:, 0]\n keypoint_y = raw_boxes[..., offset + 1] / self.y_scale * anchors[:, 3] + anchors[:, 1]\n boxes[..., offset ] = keypoint_x\n boxes[..., offset + 1] = keypoint_y\n\n return boxes\n\n def _weighted_non_max_suppression(self, detections):\n \"\"\"The alternative NMS method as mentioned in the BlazeFace paper:\n\n \"We replace the suppression algorithm with a blending strategy that\n estimates the regression parameters of a bounding box as a weighted\n mean between the overlapping predictions.\"\n\n The original MediaPipe code assigns the score of the most confident\n detection to the weighted detection, but we take the average score\n of the overlapping detections.\n\n The input detections should be a Tensor of shape (count, 17).\n\n Returns a list of PyTorch tensors, one for each detected face.\n \n This is based on the source code from:\n mediapipe/calculators/util/non_max_suppression_calculator.cc\n mediapipe/calculators/util/non_max_suppression_calculator.proto\n \"\"\"\n if len(detections) == 0: return []\n\n output_detections = []\n\n # Sort the detections from highest to lowest score.\n remaining = torch.argsort(detections[:, 16], descending=True)\n\n while len(remaining) > 0:\n detection = detections[remaining[0]]\n\n # Compute the overlap between the first box and the other \n # remaining boxes. (Note that the other_boxes also include\n # the first_box.)\n first_box = detection[:4]\n other_boxes = detections[remaining, :4]\n ious = overlap_similarity(first_box, other_boxes)\n\n # If two detections don't overlap enough, they are considered\n # to be from different faces.\n mask = ious > self.min_suppression_threshold\n overlapping = remaining[mask]\n remaining = remaining[~mask]\n\n # Take an average of the coordinates from the overlapping\n # detections, weighted by their confidence scores.\n weighted_detection = detection.clone()\n if len(overlapping) > 1:\n coordinates = detections[overlapping, :16]\n scores = detections[overlapping, 16:17]\n total_score = scores.sum()\n weighted = (coordinates * scores).sum(dim=0) / total_score\n weighted_detection[:16] = weighted\n weighted_detection[16] = total_score / len(overlapping)\n\n output_detections.append(weighted_detection)\n\n return output_detections \n\n\n# IOU code from https://github.com/amdegroot/ssd.pytorch/blob/master/layers/box_utils.py\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef overlap_similarity(box, other_boxes):\n \"\"\"Computes the IOU between a bounding box and set of other boxes.\"\"\"\n return jaccard(box.unsqueeze(0), other_boxes).squeeze(0)\n\n\n#######################################################################\nfacedet = BlazeFace().to(device)\nfacedet.load_weights(blazefaceM)\nfacedet.load_anchors(anchorsN)\n_ = facedet.train(False)\n\n\nvideo_reader = VideoReader()\nvideo_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video)\nface_extractor = FaceExtractor(video_read_fn, facedet)\n#######################################################################\ndef loadResNextModel():\n model_ft=models.resnet.ResNet(block=models.resnet.Bottleneck,layers=[3, 4, 6, 3], groups=32, width_per_group=4)\n num_ftrs = model_ft.fc.in_features \n model_ft.fc = torch.nn.Linear(2048, 1)\n model_ft.load_state_dict(torch.load(ResNextModelWeights, map_location=device))\n model_ft.to(device)\n model_ft.eval()\n return model_ft\n\nmodelResNeXt= loadResNextModel()\n \ndef loadXceptionModel():\n\n\n modelXception = get_model(\"xception\", pretrained=False)\n modelXception = nn.Sequential(*list(modelXception.children())[:-1]) # Remove original output layer (Except the last layer)\n modelXception[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)))\n\n class Head(torch.nn.Module):\n def __init__(self, in_f, out_f):\n super(Head, self).__init__()\n \n self.f = nn.Flatten()\n self.l = nn.Linear(in_f, 512)\n self.d = nn.Dropout(0.5)\n self.o = nn.Linear(512, out_f)\n self.b1 = nn.BatchNorm1d(in_f)\n self.b2 = nn.BatchNorm1d(512)\n self.r = nn.ReLU()\n\n def forward(self, x):\n x = self.f(x)\n x = self.b1(x)\n x = self.d(x)\n\n x = self.l(x)\n x = self.r(x)\n x = self.b2(x)\n x = self.d(x)\n\n out = self.o(x)\n return out\n\n class FCN(torch.nn.Module):\n def __init__(self, base, in_f):\n super(FCN, self).__init__()\n self.base = base\n self.h1 = Head(in_f, 1)\n def forward(self, x):\n x = self.base(x)\n return self.h1(x)\n \n modelXception = FCN(modelXception, 2048)\n modelXception = modelXception.to(device)\n modelXception.load_state_dict(torch.load(XcePtionModelWeights,map_location=device))\n modelXception.eval()\n return modelXception\n\nmodelXception=loadXceptionModel()\n################################################################################\ndef jsonMetaData(filePath):\n df=pd.read_json(filePath).T\n df=df.reset_index()\n df=df.rename(columns={\"index\":\"test_videos\"})\n df.drop(['split', 'original'], axis = 1, inplace = True) \n return df\n\n \ndef isotropically_resize_image(img, size, resample=cv2.INTER_AREA):\n h, w = img.shape[:2]\n if w > h:\n h = h * size // w\n w = size\n else:\n w = w * size // h\n h = size\n\n resized = cv2.resize(img, (w, h), interpolation=resample)\n return resized\n\n\ndef make_square_image(img):\n h, w = img.shape[:2]\n size = max(h, w)\n t = 0\n b = size - h\n l = 0\n r = size - w\n return cv2.copyMakeBorder(img, t, b, l, r, cv2.BORDER_CONSTANT, value=0)\n\n\n\ndef predict_on_video(video_path, input_size, model):\n try:\n # Find the faces for N frames in the video.\n faces = face_extractor.process_video(video_path)\n\n # Only look at one face per frame.\n face_extractor.keep_only_best_face(faces)\n \n if len(faces) > 0:\n # NOTE: When running on the CPU, the batch size must be fixed\n # or else memory usage will blow up. (Bug in PyTorch?)\n x = np.zeros((frames_per_video, input_size, input_size, 3), dtype=np.uint8)\n\n # If we found any faces, prepare them for the model.\n n = 0\n for frame_data in faces:\n for face in frame_data[\"faces\"]:\n # Resize to the model's required input size.\n # We keep the aspect ratio intact and add zero\n # padding if necessary. \n resized_face = isotropically_resize_image(face, input_size)\n resized_face = make_square_image(resized_face)\n\n if n < frames_per_video:\n x[n] = resized_face\n n += 1\n else:\n print(\"WARNING: have %d faces but batch size is %d\" % (n, batch_size))\n \n \n\n if n > 0:\n x = torch.tensor(x, device=device).float()\n\n # Preprocess the images.\n x = x.permute((0, 3, 1, 2))\n\n for i in range(len(x)):\n x[i] = normalize_transform(x[i] / 255.)\n\n # Make a prediction, then take the average.\n with torch.no_grad():\n y_pred = model(x)\n y_pred = torch.sigmoid(y_pred.squeeze())\n return y_pred[:n].mean().item()\n\n except Exception as e:\n print(\"Prediction error on video %s: %s\" % (video_path, str(e)))\n\n return 0.5\n\n\n\n\ndef predict_on_video_set(test_dir,videos, input_size, model):\n pred_list=[]\n for i in range(len(videos)):\n filename = videos[i]\n y_pred = predict_on_video(os.path.join(test_dir, filename), input_size, model)\n pred_list.append(y_pred)\n\n return pred_list\n\ndef DeepFake_Inference(video):\n start_time = time.time()\n r1=0.224\n r2=0.6124\n threshold=0.3\n total = r1 + r2\n r11 = r1/total\n r22 = r2/total\n\n pred_xception = predict_on_video(video, input_size_xception, modelXception)\n pred_resnext = predict_on_video(video, input_size_resnext, modelResNeXt)\n pred_ensembel = r22*pred_resnext + r11*pred_xception\n # print(f\"pred_resnext: {pred_resnext}\\npred_xception: {pred_xception}\\npredEnsembel: {[pred_ensembel]}\")\n elapsed = time.time() - start_time\n print(\"elapsedTime:\", round(elapsed,2), \" Second\")\n\n if pred_ensembel > threshold:\n return \"The Video is FAKE\"\n else:\n return \"The Video is REAL\"\n\n\n\ndef Inference_on_video(output_file_path, video_file_path):\n '''\n This function will perform action recognition on a video using the LRCN model.\n Args:\n video_file_path: The path of the video stored in the disk on which the action recognition is to be performed.\n output_file_path: The path where the ouput video with the predicted action being performed overlayed will be stored.\n SEQUENCE_LENGTH: The fixed number of frames of a video that can be passed to the model as one sequence.\n '''\n\n # Initialize the VideoCapture object to read from the video file.\n video_reader = cv2.VideoCapture(video_file_path)\n\n # Get the width and height of the video.\n original_video_width = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n original_video_height = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Initialize the VideoWriter Object to store the output video in the disk.\n video_writer = cv2.VideoWriter(output_file_path, cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), \n video_reader.get(cv2.CAP_PROP_FPS), (original_video_width, original_video_height))\n\n class_prediction = DeepFake_Inference(video_file_path)\n print(class_prediction)\n while video_reader.isOpened():\n\n # Read the frame.\n ok, frame = video_reader.read() \n \n # Check if frame is not read properly then break the loop.\n if not ok:\n break\n\n image = frame.copy()\n framee = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n textsize = cv2.getTextSize(class_prediction, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]\n textX = int((frame.shape[1] - textsize[0]) / 2)\n textY = int((frame.shape[0] + textsize[1]) / 2)\n\n cv2.putText(frame, class_prediction, (textX, textY), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n\n # Write The frame into the disk using the VideoWriter Object.\n\n video_writer.write(frame)\n # time.sleep(2)\n # Release the VideoCapture and VideoWriter objects.\n video_reader.release()\n video_writer.release()"
] | [
[
"numpy.expand_dims",
"numpy.linspace",
"torch.cat",
"torch.load",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.clamp",
"torch.nn.Dropout",
"numpy.clip",
"numpy.arange",
"torch.round",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"numpy.load",
"torch.argsort",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.nn.BatchNorm1d",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Linear",
"pandas.read_json",
"torch.stack",
"numpy.random.seed",
"torch.nn.Flatten",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Strikewolf/openpilot5 | [
"3ece1cd3e07d6a27ac32ce205f55092538108389"
] | [
"common/transformations/coordinates.py"
] | [
"import numpy as np\n\"\"\"\nCoordinate transformation module. All methods accept arrays as input\nwith each row as a position.\n\"\"\"\n\n\n\na = 6378137\nb = 6356752.3142\nesq = 6.69437999014 * 0.001\ne1sq = 6.73949674228 * 0.001\n\n\ndef geodetic2ecef(geodetic):\n geodetic = np.array(geodetic)\n input_shape = geodetic.shape\n geodetic = np.atleast_2d(geodetic)\n lat = (np.pi/180)*geodetic[:,0]\n lon = (np.pi/180)*geodetic[:,1]\n alt = geodetic[:,2]\n\n xi = np.sqrt(1 - esq * np.sin(lat)**2)\n x = (a / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (a / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (a / xi * (1 - esq) + alt) * np.sin(lat)\n ecef = np.array([x, y, z]).T\n return ecef.reshape(input_shape)\n\n\ndef ecef2geodetic(ecef):\n \"\"\"\n Convert ECEF coordinates to geodetic using ferrari's method\n \"\"\"\n def ferrari(x, y, z):\n # ferrari's method\n r = np.sqrt(x * x + y * y)\n Esq = a * a - b * b\n F = 54 * b * b * z * z\n G = r * r + (1 - esq) * z * z - esq * Esq\n C = (esq * esq * F * r * r) / (pow(G, 3))\n S = np.cbrt(1 + C + np.sqrt(C * C + 2 * C))\n P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)\n Q = np.sqrt(1 + 2 * esq * esq * P)\n r_0 = -(P * esq * r) / (1 + Q) + np.sqrt(0.5 * a * a*(1 + 1.0 / Q) - \\\n P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)\n U = np.sqrt(pow((r - esq * r_0), 2) + z * z)\n V = np.sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)\n Z_0 = b * b * z / (a * V)\n h = U * (1 - b * b / (a * V))\n lat = (180/np.pi)*np.arctan((z + e1sq * Z_0) / r)\n lon = (180/np.pi)*np.arctan2(y, x)\n return lat, lon, h\n\n geodetic = []\n ecef = np.array(ecef)\n input_shape = ecef.shape\n ecef = np.atleast_2d(ecef)\n for p in ecef:\n geodetic.append(ferrari(*p))\n geodetic = np.array(geodetic)\n return geodetic.reshape(input_shape)\n\n\n\nclass LocalCoord(object):\n \"\"\"\n Allows conversions to local frames. In this case NED.\n That is: North East Down from the start position in\n meters.\n \"\"\"\n def __init__(self, init_geodetic, init_ecef):\n self.init_ecef = init_ecef\n lat, lon, _ = (np.pi/180)*np.array(init_geodetic)\n self.ned2ecef_matrix = np.array([[-np.sin(lat)*np.cos(lon), -np.sin(lon), -np.cos(lat)*np.cos(lon)],\n [-np.sin(lat)*np.sin(lon), np.cos(lon), -np.cos(lat)*np.sin(lon)],\n [np.cos(lat), 0, -np.sin(lat)]])\n self.ecef2ned_matrix = self.ned2ecef_matrix.T\n\n @classmethod\n def from_geodetic(cls, init_geodetic):\n init_ecef = geodetic2ecef(init_geodetic)\n return LocalCoord(init_geodetic, init_ecef)\n\n @classmethod\n def from_ecef(cls, init_ecef):\n init_geodetic = ecef2geodetic(init_ecef)\n return LocalCoord(init_geodetic, init_ecef)\n\n\n def ecef2ned(self, ecef):\n ecef = np.array(ecef)\n return np.dot(self.ecef2ned_matrix, (ecef - self.init_ecef).T).T\n\n def ned2ecef(self, ned):\n ned = np.array(ned)\n # Transpose so that init_ecef will broadcast correctly for 1d or 2d ned.\n return (np.dot(self.ned2ecef_matrix, ned.T).T + self.init_ecef)\n\n def geodetic2ned(self, geodetic):\n ecef = geodetic2ecef(geodetic)\n return self.ecef2ned(ecef)\n\n def ned2geodetic(self, ned):\n ecef = self.ned2ecef(ned)\n return ecef2geodetic(ecef)\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.arctan",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.atleast_2d",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexandrwang/6882project | [
"3723bda72baacded3f0b436e1cce171d084f43ab"
] | [
"strens/module.py"
] | [
"import numpy as np\nfrom random import choice\n\nfrom pybrain.rl.learners.valuebased.interface import ActionValueInterface\n\n\n'''\nUpdates to Q are given by\nQ(s, a) = E[R(s, a)] + gamma * sum_s'[T(s, a, s') * max_a' Q(s', a')]\n\nwhere we infer the transition probabilities\nT(s, a, s') = p(s' | s, a)\n\nand use the ML estimate for E[R(s, a)] = average of rewards received\nfrom (s, a)\n'''\n\nclass ActionModule(ActionValueInterface):\n '''The module that keeps track of the Q(s, a) estimates\n as well as the posterior distribution on T(s, a, s') and\n the ML estimates of E[R(s, a)]'''\n\n def __init__(self, numStates, numActions, alphas=None, gamma=0.5):\n self.numRows = numStates\n self.numColumns = numActions\n\n # Q table\n # HACK: setting these all to 1's to allow for exploration\n self.actionTable = np.ones((numStates, numActions))\n\n # I have no idea how to set this\n self.gamma = gamma\n\n # TODO: set actual alphas\n if alphas is None:\n alphas = np.ones(numStates)\n self.alphas = alphas\n\n self.transitionProbs = np.zeros((numStates, numActions, numStates))\n self.initTransProbs()\n\n # The quantities we need to maintain in accordance with the Strens paper\n self.visitCount = np.zeros((numStates, numActions))\n self.sumRewards = np.zeros((numStates, numActions))\n\n self.rewardGammaAlpha = 10.\n self.rewardGammaBeta = 1.\n\n self.muNot = 0.\n self.nNot = 1.\n\n\n self.sumSqRewards = np.zeros((numStates, numActions))\n self.successorStates = [[set() for _ in xrange(numActions)] for _ in xrange(numStates)]\n self.transitionCount = np.zeros((numStates, numActions, numStates))\n\n # ones for our prior lol\n # HACK: try different priors\n self.transitionDirichletParams = np.ones((numStates, numActions, numStates))\n\n # ML estimates, updated using prioritized sweeping\n self.expectedReward = np.zeros((numStates, numActions))\n self.discountedReturn = np.zeros((numStates, numActions))\n\n def update(self, state, action, newstate, reward):\n self.visitCount[state][action] += 1\n self.sumRewards[state][action] += reward\n self.sumSqRewards[state][action] += reward * reward\n self.successorStates[state][action].add(newstate)\n self.transitionCount[state][action][newstate] += 1\n\n # update transition probability params\n self.transitionDirichletParams[state][action][newstate] += 1\n\n # update reward expectation parameters\n \n # update Qs according to formula\n self.updateAllQValues()\n\n # print \"TRANSITION PROBABILITIES FROM:\", state\n # print self.transitionDirichletParams[state, :, :]\n\n\n # print \"Q TABLE from:\", state\n # print self.actionTable[state]\n \n\n def updateAllQValues(self):\n newQvalues = np.zeros((self.numStates, self.numActions))\n\n for s in xrange(self.numStates):\n for a in xrange(self.numActions):\n newQvalues[s][a] = self.getUpdatedQValue(s, a)\n\n self.actionTable[:] = newQvalues\n\n def getUpdatedQValue(self, state, action):\n transitionProb = np.random.dirichlet(self.transitionDirichletParams[state, action, :])\n normalizer = sum(float(transitionProb[s]) for s in self.successorStates[state][action])\n def sumArg(otherState):\n return transitionProb[otherState] / normalizer * self.actionTable[otherState][self.getMaxAction(otherState)]\n\n\n # calculate appropriate alpha and beta for the Gamma\n\n n = float(self.visitCount[state, action])\n alpha = self.rewardGammaAlpha + 0.5 * n\n\n meanX = float(self.sumRewards[state, action]) / n if n > 0 else 0.\n nNot = self.nNot\n muNot = self.muNot\n\n part1 = 0.5 * n * meanX * meanX - meanX * self.sumRewards[state, action] + 0.5 * self.sumSqRewards[state, action]\n part2 = 0.5 * n * nNot * (meanX - muNot) * (meanX - muNot) / (n + nNot)\n\n # print part1, part2\n\n beta = self.rewardGammaBeta + part1 + part2\n\n # print \"ALPHA:\", alpha, \"BETA:\", beta\n\n tau = np.random.gamma(alpha, 1. / beta)\n\n muNot = n / (n + self.nNot) * meanX + self.nNot / (n + self.nNot) * self.muNot\n sigmasqNot = n * tau + self.nNot * tau\n\n mu = np.random.normal(muNot, 1./np.sqrt(sigmasqNot))\n\n expectedStateAction = np.random.normal(mu, 1./np.sqrt(tau))\n # print \"XMEAN:\", meanX\n # print \"MU:\", mu, \"SIGMA:\", 1/np.sqrt(tau)\n # expectedStateAction = meanX\n\n return expectedStateAction + self.gamma * sum(sumArg(s) for s in self.successorStates[state][action])\n # print \"STATE:\", state, \"ACTION:\", action, \"UPDATED EXP REWARD:\", self.actionTable[state][action]\n # print \"TRANSITION PROBABILITIES FROM:\", state\n # for a in xrange(self.numActions):\n # for s in xrange(self.numStates):\n # print \"newstate\", s, \"action\", a, \"probability:\", float(self.transitionDirichletParams[state, a, s]) / normalizer\n # print self.transitionDirichletParams[state, :, :]\n\n @property\n def numActions(self):\n return self.numColumns\n\n @property\n def numStates(self):\n return self.numRows\n\n def initTransProbs(self):\n for s in xrange(self.numStates):\n for a in xrange(self.numActions):\n self.transitionProbs[s,a,:] = np.random.dirichlet(self.alphas)\n\n def getMaxAction(self, state):\n possible = self.actionTable[state]\n best = np.where(possible == max(possible))[0]\n return choice(best)\n"
] | [
[
"numpy.sqrt",
"numpy.ones",
"numpy.random.gamma",
"numpy.zeros",
"numpy.random.dirichlet"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TingFree/NLPer-Arsenal | [
"17f34ec68c83babf8c3e5959fed14b9f251f869f"
] | [
"codes/nlper/models/text_clf.py"
] | [
"r\"\"\"\n各种文本分类模型的实现\n\"\"\"\n\nimport os\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModel\nfrom transformers.models.bert.modeling_bert import BertModel\nfrom transformers import DataCollatorWithPadding, get_linear_schedule_with_warmup\nfrom codes.nlper.modules import MLP\nfrom codes.nlper.utils import DatasetCLF, Dict2Obj\nfrom codes.nlper.utils import load_nlp_data, save_data\nfrom codes.nlper.modules.modeling_outputs import TextCLFOutput, LightningOutput\nfrom codes.nlper import mini_pytorch_lightning as mpl\n\n\nclass LightningCLF(mpl.StandardModel):\n def __init__(self, model, tokenizer, configs: Dict2Obj, metrics, convert_fn):\n super(LightningCLF, self).__init__(configs, metrics)\n self.configs = configs\n self.aux_configs = Dict2Obj()\n self.metrics = metrics\n self.model = model\n self.tokenizer = tokenizer\n self.convert_fn = convert_fn\n\n def training_step(self, batch, batch_idx):\n labels = batch['labels']\n outputs = self.model(**batch)\n logits = outputs.logits\n loss = F.cross_entropy(logits.view(-1, self.configs.num_class),\n labels.view(-1))\n return LightningOutput(loss=loss)\n\n def validation_step(self, batch, batch_idx):\n labels = batch['labels']\n outputs = self.model(**batch)\n logits = outputs.logits\n loss = F.cross_entropy(logits.view(-1, self.configs.num_class),\n labels.view(-1))\n batch_preds = logits.argmax(1).cpu().tolist()\n batch_golds = labels.cpu().tolist()\n return LightningOutput(\n loss=loss,\n preds=batch_preds,\n golds=batch_golds\n )\n\n def validation_epoch_end(self, outputs):\n epoch_preds, epoch_golds = [], []\n for batch_outputs in outputs:\n epoch_preds += batch_outputs.preds\n epoch_golds += batch_outputs.golds\n self.metrics.scores(epoch_golds, epoch_preds)\n self.metrics.print_values()\n return self.metrics.return_target_score()\n\n def test_step(self, batch, batch_idx):\n outputs = self.model(**batch)\n logits = outputs.logits\n # prob, pred\n return LightningOutput(\n probs = F.softmax(logits, dim=-1).cpu().tolist(),\n preds = logits.argmax(1).cpu().tolist()\n )\n\n def test_epoch_end(self, outputs):\n probs, preds = [], []\n for batch_outputs in outputs:\n probs += [' '.join([str(p) for p in prob]) for prob in batch_outputs.probs]\n preds += batch_outputs.preds\n save_data(probs, os.path.join(self.configs.out_dir, 'test_pred.probs.txt'))\n save_data(preds, os.path.join(self.configs.out_dir, 'test_pred.txt'))\n\n def configure_optimizers(self):\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': self.configs.weight_decay},\n {'params': [p for n, p in self.named_parameters()if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters,\n lr=self.configs.lr)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n self.configs.warmup_steps,\n self.configs.trainer_args.max_epochs * self.aux_configs.num_train_batch)\n return optimizer, scheduler\n\n def prepare_data(self) -> None:\n \"\"\" check & load data, the format of each line is 'text label', separated by tab and 'label'\n must be int, such as 0~num_labels-1\n \"\"\"\n train_file = self.configs.train_file\n val_file = self.configs.val_file\n test_file = self.configs.test_file\n self.collate_fn = DataCollatorWithPadding(tokenizer=self.tokenizer)\n if self.convert_fn:\n self._train_data = self.convert_fn(train_file, load_label=True)\n self._val_data = self.convert_fn(val_file, load_label=True)\n self._test_data = self.convert_fn(test_file, load_label=self.configs.is_eval_test)\n else:\n self._train_data = load_nlp_data(train_file, task_name=self.configs.task_name)\n self._val_data = load_nlp_data(val_file, task_name=self.configs.task_name)\n self._test_data = load_nlp_data(test_file, task_name=self.configs.task_name)\n\n def train_dataloader(self):\n self.train_data = DatasetCLF(self._train_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=True)\n return DataLoader(self.train_data,\n batch_size=self.configs.train_batch_size,\n collate_fn=self.collate_fn,\n shuffle=True,\n num_workers=16)\n\n def val_dataloader(self):\n self.val_data = DatasetCLF(self._val_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=True)\n return DataLoader(self.val_data,\n batch_size=self.configs.val_batch_size,\n collate_fn=self.collate_fn,\n num_workers=16)\n\n def test_dataloader(self):\n self.test_data = DatasetCLF(self._test_data,\n self.tokenizer,\n self.configs.max_len,\n load_label=self.configs.is_eval_test)\n return DataLoader(self.test_data,\n batch_size=self.configs.val_batch_size,\n collate_fn=self.collate_fn,\n num_workers=16)\n\n\nclass BertCLF(nn.Module):\n def __init__(self, args):\n super(BertCLF, self).__init__()\n self.bert = AutoModel.from_pretrained(args.pretrained_model)\n self.dropout = nn.Dropout(self.bert.config.hidden_dropout_prob)\n self.clf = MLP([self.bert.config.hidden_size, args.num_class],\n 'tanh',\n dropout=args.dropout)\n\n def forward(self, input_ids, attention_mask, token_type_ids, **kwargs):\n outputs = self.bert(input_ids, attention_mask, token_type_ids)\n logits = self.clf(outputs[1])\n\n return TextCLFOutput(\n logits = logits,\n sequenceEmb = outputs[1]\n )\n"
] | [
[
"torch.nn.Dropout",
"torch.optim.AdamW",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kakaba2009/MachineLearning | [
"26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33"
] | [
"python/src/market/LoadStockData.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport urllib\nimport matplotlib.dates as mdates\n\ndef graph_data(stock):\n\n stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=99y/csv'\n\n source_code = urllib.request.urlopen(stock_price_url).read().decode()\n\n stock_data = []\n split_source = source_code.split('\\n')\n\n for line in split_source:\n split_line = line.split(',')\n if len(split_line) == 6:\n if 'values' not in line:\n stock_data.append(line)\n \n date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data,\n delimiter=',',\n unpack=True,\n # %Y = full year. 2015\n # %y = partial year 15\n # %m = number month\n # %d = number day\n # %H = hours\n # %M = minutes\n # %S = seconds\n # 12-06-2014\n # %m-%d-%Y\n converters={})\n \n return date, closep\n \ndate, close = graph_data('AAPL')\n\nprint(date)\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ludydoo/ta | [
"20caf3e255ac39d1307dc099db24c6f36ba5ca05"
] | [
"test/integration/wrapper.py"
] | [
"import unittest\n\nimport pandas as pd\n\nimport ta\n\n\nclass TestWrapper(unittest.TestCase):\n\n _filename = \"test/data/datas.csv\"\n\n @classmethod\n def setUpClass(cls):\n cls._df = pd.read_csv(cls._filename, sep=\",\")\n\n @classmethod\n def tearDownClass(cls):\n del cls._df\n\n def test_general(self):\n # Clean nan values\n df = ta.utils.dropna(self._df)\n\n # Add all ta features filling nans values\n ta.add_all_ta_features(\n df=df,\n open=\"Open\",\n high=\"High\",\n low=\"Low\",\n close=\"Close\",\n volume=\"Volume_BTC\",\n fillna=True,\n )\n\n # Add all ta features not filling nans values\n ta.add_all_ta_features(\n df=df,\n open=\"Open\",\n high=\"High\",\n low=\"Low\",\n close=\"Close\",\n volume=\"Volume_BTC\",\n fillna=False,\n )\n\n # Check added ta features are all numerical values after filling nans\n input_cols = self._df.columns\n df_with_ta = ta.add_all_ta_features(\n df=df,\n open=\"Open\",\n high=\"High\",\n low=\"Low\",\n close=\"Close\",\n volume=\"Volume_BTC\",\n fillna=True,\n )\n ta_cols = [c for c in df_with_ta.columns if c not in input_cols]\n self.assertTrue(\n df_with_ta[ta_cols]\n .apply(lambda series: pd.to_numeric(series, errors=\"coerce\"))\n .notnull()\n .all()\n .all()\n )\n\n self.assertTrue(df_with_ta.shape[1] == 94)\n\n def test_only_vectorized(self):\n # Clean nan values\n df = ta.utils.dropna(self._df)\n\n # Add all ta features filling nans values\n df_vectorized = ta.add_all_ta_features(\n df=df,\n open=\"Open\",\n high=\"High\",\n low=\"Low\",\n close=\"Close\",\n volume=\"Volume_BTC\",\n fillna=True,\n vectorized=True\n )\n\n self.assertTrue(df_vectorized.shape[1] == 76)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"pandas.read_csv",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rbpisupati/data-repo | [
"a86cbb0a3d66e139973787eb4fbea4692adfa7f2"
] | [
"dash-app/modules/data_viewer.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objects as go\nfrom dash.dependencies import Input, Output, State\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport json\nfrom datetime import datetime as dt\nfrom glob import glob\n\nfrom app import app\n\n# def read_data(data_dir):\n# all_submissions = glob(data_dir)\n# # for ef in \n\n# return(all_submissions)\n\n# layout = html.P(\"This is the content of the home page!\")\n\ndf = pd.DataFrame({\n 'a': [1, 2, 3],\n 'b': [4, 1, 4],\n 'c': ['x', 'y', 'z'],\n})\n\nlayout = html.Div([\n dcc.Dropdown(\n id='sim-dropdown',\n options=[{'label': i, 'value': i} for i in df['c'].unique()],\n value='a'\n ),\n html.Hr(),\n html.Div(id='sim-output'),\n])\n\[email protected](Output('sim-output', 'children'),\n [Input('sim-dropdown', 'value')])\ndef update_output_1(value):\n # Safely reassign the filter to a new variable\n filtered_df = df[df['c'] == str(value)]\n return len(filtered_df)"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Remnan13/pandas | [
"aaa69d1bb6ed2539f2dbdba842d83a2f90860aa0"
] | [
"pandas/tests/sparse/series/test_series.py"
] | [
"# pylint: disable-msg=E1101,W0612\n\nimport operator\nfrom datetime import datetime\n\nimport pytest\n\nfrom numpy import nan\nimport numpy as np\nimport pandas as pd\n\n\nfrom pandas import Series, DataFrame, bdate_range, isna, compat\nfrom pandas.errors import PerformanceWarning\nfrom pandas.tseries.offsets import BDay\nimport pandas.util.testing as tm\nimport pandas.util._test_decorators as td\nfrom pandas.compat import range, PY36\nfrom pandas.core.reshape.util import cartesian_product\n\nfrom pandas.core.sparse.api import SparseDtype\nimport pandas.core.sparse.frame as spf\n\nfrom pandas._libs.sparse import BlockIndex, IntIndex\nfrom pandas.core.sparse.api import SparseSeries\nfrom pandas.tests.series.test_api import SharedWithSparse\n\n\ndef _test_data1():\n # nan-based\n arr = np.arange(20, dtype=float)\n index = np.arange(20)\n arr[:2] = nan\n arr[5:10] = nan\n arr[-3:] = nan\n\n return arr, index\n\n\ndef _test_data2():\n # nan-based\n arr = np.arange(15, dtype=float)\n index = np.arange(15)\n arr[7:12] = nan\n arr[-1:] = nan\n return arr, index\n\n\ndef _test_data1_zero():\n # zero-based\n arr, index = _test_data1()\n arr[np.isnan(arr)] = 0\n return arr, index\n\n\ndef _test_data2_zero():\n # zero-based\n arr, index = _test_data2()\n arr[np.isnan(arr)] = 0\n return arr, index\n\n\nclass TestSparseSeries(SharedWithSparse):\n\n series_klass = SparseSeries\n # SharedWithSparse tests use generic, series_klass-agnostic assertion\n _assert_series_equal = staticmethod(tm.assert_sp_series_equal)\n\n def setup_method(self, method):\n arr, index = _test_data1()\n\n date_index = bdate_range('1/1/2011', periods=len(index))\n\n self.bseries = SparseSeries(arr, index=index, kind='block',\n name='bseries')\n self.ts = self.bseries\n\n self.btseries = SparseSeries(arr, index=date_index, kind='block')\n\n self.iseries = SparseSeries(arr, index=index, kind='integer',\n name='iseries')\n\n arr, index = _test_data2()\n self.bseries2 = SparseSeries(arr, index=index, kind='block')\n self.iseries2 = SparseSeries(arr, index=index, kind='integer')\n\n arr, index = _test_data1_zero()\n self.zbseries = SparseSeries(arr, index=index, kind='block',\n fill_value=0, name='zbseries')\n self.ziseries = SparseSeries(arr, index=index, kind='integer',\n fill_value=0)\n\n arr, index = _test_data2_zero()\n self.zbseries2 = SparseSeries(arr, index=index, kind='block',\n fill_value=0)\n self.ziseries2 = SparseSeries(arr, index=index, kind='integer',\n fill_value=0)\n\n def test_constructor_dict_input(self):\n # gh-16905\n constructor_dict = {1: 1.}\n index = [0, 1, 2]\n\n # Series with index passed in\n series = pd.Series(constructor_dict)\n expected = SparseSeries(series, index=index)\n\n result = SparseSeries(constructor_dict, index=index)\n tm.assert_sp_series_equal(result, expected)\n\n # Series with index and dictionary with no index\n expected = SparseSeries(series)\n\n result = SparseSeries(constructor_dict)\n tm.assert_sp_series_equal(result, expected)\n\n def test_constructor_dict_order(self):\n # GH19018\n # initialization ordering: by insertion order if python>= 3.6, else\n # order by value\n d = {'b': 1, 'a': 0, 'c': 2}\n result = SparseSeries(d)\n if PY36:\n expected = SparseSeries([1, 0, 2], index=list('bac'))\n else:\n expected = SparseSeries([0, 1, 2], index=list('abc'))\n tm.assert_sp_series_equal(result, expected)\n\n def test_constructor_dtype(self):\n arr = SparseSeries([np.nan, 1, 2, np.nan])\n assert arr.dtype == SparseDtype(np.float64)\n assert np.isnan(arr.fill_value)\n\n arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)\n assert arr.dtype == SparseDtype(np.float64, 0)\n assert arr.fill_value == 0\n\n arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)\n assert arr.dtype == SparseDtype(np.int64, np.nan)\n assert np.isnan(arr.fill_value)\n\n arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n def test_iteration_and_str(self):\n [x for x in self.bseries]\n str(self.bseries)\n\n def test_construct_DataFrame_with_sp_series(self):\n # it works!\n df = DataFrame({'col': self.bseries})\n\n # printing & access\n df.iloc[:1]\n df['col']\n df.dtypes\n str(df)\n\n tm.assert_sp_series_equal(df['col'], self.bseries, check_names=False)\n\n result = df.iloc[:, 0]\n tm.assert_sp_series_equal(result, self.bseries, check_names=False)\n\n # blocking\n expected = Series({'col': 'float64:sparse'})\n result = df.ftypes\n tm.assert_series_equal(expected, result)\n\n def test_constructor_preserve_attr(self):\n arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n s = pd.SparseSeries(arr, name='x')\n assert s.dtype == SparseDtype(np.int64)\n assert s.fill_value == 0\n\n def test_series_density(self):\n # GH2803\n ts = Series(np.random.randn(10))\n ts[2:-2] = nan\n sts = ts.to_sparse()\n density = sts.density # don't die\n assert density == 4 / 10.0\n\n def test_sparse_to_dense(self):\n arr, index = _test_data1()\n series = self.bseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='bseries'))\n\n # see gh-14647\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n series = self.bseries.to_dense(sparse_only=True)\n\n indexer = np.isfinite(arr)\n exp = Series(arr[indexer], index=index[indexer], name='bseries')\n tm.assert_series_equal(series, exp)\n\n series = self.iseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='iseries'))\n\n arr, index = _test_data1_zero()\n series = self.zbseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='zbseries'))\n\n series = self.ziseries.to_dense()\n tm.assert_series_equal(series, Series(arr))\n\n def test_to_dense_fill_value(self):\n s = pd.Series([1, np.nan, np.nan, 3, np.nan])\n res = SparseSeries(s).to_dense()\n tm.assert_series_equal(res, s)\n\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([1, np.nan, 0, 3, 0])\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])\n res = SparseSeries(s).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n def test_dense_to_sparse(self):\n series = self.bseries.to_dense()\n bseries = series.to_sparse(kind='block')\n iseries = series.to_sparse(kind='integer')\n tm.assert_sp_series_equal(bseries, self.bseries)\n tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)\n assert iseries.name == self.bseries.name\n\n assert len(series) == len(bseries)\n assert len(series) == len(iseries)\n assert series.shape == bseries.shape\n assert series.shape == iseries.shape\n\n # non-NaN fill value\n series = self.zbseries.to_dense()\n zbseries = series.to_sparse(kind='block', fill_value=0)\n ziseries = series.to_sparse(kind='integer', fill_value=0)\n tm.assert_sp_series_equal(zbseries, self.zbseries)\n tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)\n assert ziseries.name == self.zbseries.name\n\n assert len(series) == len(zbseries)\n assert len(series) == len(ziseries)\n assert series.shape == zbseries.shape\n assert series.shape == ziseries.shape\n\n def test_to_dense_preserve_name(self):\n assert (self.bseries.name is not None)\n result = self.bseries.to_dense()\n assert result.name == self.bseries.name\n\n def test_constructor(self):\n # test setup guys\n assert np.isnan(self.bseries.fill_value)\n assert isinstance(self.bseries.sp_index, BlockIndex)\n assert np.isnan(self.iseries.fill_value)\n assert isinstance(self.iseries.sp_index, IntIndex)\n\n assert self.zbseries.fill_value == 0\n tm.assert_numpy_array_equal(self.zbseries.values.values,\n self.bseries.to_dense().fillna(0).values)\n\n # pass SparseSeries\n def _check_const(sparse, name):\n # use passed series name\n result = SparseSeries(sparse)\n tm.assert_sp_series_equal(result, sparse)\n assert sparse.name == name\n assert result.name == name\n\n # use passed name\n result = SparseSeries(sparse, name='x')\n tm.assert_sp_series_equal(result, sparse, check_names=False)\n assert result.name == 'x'\n\n _check_const(self.bseries, 'bseries')\n _check_const(self.iseries, 'iseries')\n _check_const(self.zbseries, 'zbseries')\n\n # Sparse time series works\n date_index = bdate_range('1/1/2000', periods=len(self.bseries))\n s5 = SparseSeries(self.bseries, index=date_index)\n assert isinstance(s5, SparseSeries)\n\n # pass Series\n bseries2 = SparseSeries(self.bseries.to_dense())\n tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)\n\n # pass dict?\n\n # don't copy the data by default\n values = np.ones(self.bseries.npoints)\n sp = SparseSeries(values, sparse_index=self.bseries.sp_index)\n sp.sp_values[:5] = 97\n assert values[0] == 97\n\n assert len(sp) == 20\n assert sp.shape == (20, )\n\n # but can make it copy!\n sp = SparseSeries(values, sparse_index=self.bseries.sp_index,\n copy=True)\n sp.sp_values[:5] = 100\n assert values[0] == 97\n\n assert len(sp) == 20\n assert sp.shape == (20, )\n\n def test_constructor_scalar(self):\n data = 5\n sp = SparseSeries(data, np.arange(100))\n sp = sp.reindex(np.arange(200))\n assert (sp.loc[:99] == data).all()\n assert isna(sp.loc[100:]).all()\n\n data = np.nan\n sp = SparseSeries(data, np.arange(100))\n assert len(sp) == 100\n assert sp.shape == (100, )\n\n def test_constructor_ndarray(self):\n pass\n\n def test_constructor_nonnan(self):\n arr = [0, 0, 0, nan, nan]\n sp_series = SparseSeries(arr, fill_value=0)\n tm.assert_numpy_array_equal(sp_series.values.values, np.array(arr))\n assert len(sp_series) == 5\n assert sp_series.shape == (5, )\n\n def test_constructor_empty(self):\n # see gh-9272\n sp = SparseSeries()\n assert len(sp.index) == 0\n assert sp.shape == (0, )\n\n def test_copy_astype(self):\n cop = self.bseries.astype(np.float64)\n assert cop is not self.bseries\n assert cop.sp_index is self.bseries.sp_index\n assert cop.dtype == SparseDtype(np.float64)\n\n cop2 = self.iseries.copy()\n\n tm.assert_sp_series_equal(cop, self.bseries)\n tm.assert_sp_series_equal(cop2, self.iseries)\n\n # test that data is copied\n cop[:5] = 97\n assert cop.sp_values[0] == 97\n assert self.bseries.sp_values[0] != 97\n\n # correct fill value\n zbcop = self.zbseries.copy()\n zicop = self.ziseries.copy()\n\n tm.assert_sp_series_equal(zbcop, self.zbseries)\n tm.assert_sp_series_equal(zicop, self.ziseries)\n\n # no deep copy\n view = self.bseries.copy(deep=False)\n view.sp_values[:5] = 5\n assert (self.bseries.sp_values[:5] == 5).all()\n\n def test_shape(self):\n # see gh-10452\n assert self.bseries.shape == (20, )\n assert self.btseries.shape == (20, )\n assert self.iseries.shape == (20, )\n\n assert self.bseries2.shape == (15, )\n assert self.iseries2.shape == (15, )\n\n assert self.zbseries2.shape == (15, )\n assert self.ziseries2.shape == (15, )\n\n def test_astype(self):\n result = self.bseries.astype(SparseDtype(np.int64, 0))\n expected = (self.bseries.to_dense()\n .fillna(0)\n .astype(np.int64)\n .to_sparse(fill_value=0))\n tm.assert_sp_series_equal(result, expected)\n\n def test_astype_all(self):\n orig = pd.Series(np.array([1, 2, 3]))\n s = SparseSeries(orig)\n\n types = [np.float64, np.float32, np.int64,\n np.int32, np.int16, np.int8]\n for typ in types:\n dtype = SparseDtype(typ)\n res = s.astype(dtype)\n assert res.dtype == dtype\n tm.assert_series_equal(res.to_dense(), orig.astype(typ))\n\n def test_kind(self):\n assert self.bseries.kind == 'block'\n assert self.iseries.kind == 'integer'\n\n def test_to_frame(self):\n # GH 9850\n s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')\n exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_sp_frame_equal(s.to_frame(), exp)\n\n exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)\n\n s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)\n exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},\n default_fill_value=0)\n\n tm.assert_sp_frame_equal(s.to_frame(), exp)\n exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)\n\n def test_pickle(self):\n def _test_roundtrip(series):\n unpickled = tm.round_trip_pickle(series)\n tm.assert_sp_series_equal(series, unpickled)\n tm.assert_series_equal(series.to_dense(), unpickled.to_dense())\n\n self._check_all(_test_roundtrip)\n\n def _check_all(self, check_func):\n check_func(self.bseries)\n check_func(self.iseries)\n check_func(self.zbseries)\n check_func(self.ziseries)\n\n def test_getitem(self):\n def _check_getitem(sp, dense):\n for idx, val in compat.iteritems(dense):\n tm.assert_almost_equal(val, sp[idx])\n\n for i in range(len(dense)):\n tm.assert_almost_equal(sp[i], dense[i])\n # j = np.float64(i)\n # assert_almost_equal(sp[j], dense[j])\n\n # API change 1/6/2012\n # negative getitem works\n # for i in xrange(len(dense)):\n # assert_almost_equal(sp[-i], dense[-i])\n\n _check_getitem(self.bseries, self.bseries.to_dense())\n _check_getitem(self.btseries, self.btseries.to_dense())\n\n _check_getitem(self.zbseries, self.zbseries.to_dense())\n _check_getitem(self.iseries, self.iseries.to_dense())\n _check_getitem(self.ziseries, self.ziseries.to_dense())\n\n # exception handling\n pytest.raises(Exception, self.bseries.__getitem__,\n len(self.bseries) + 1)\n\n # index not contained\n pytest.raises(Exception, self.btseries.__getitem__,\n self.btseries.index[-1] + BDay())\n\n def test_get_get_value(self):\n tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])\n assert self.bseries.get(len(self.bseries) + 1) is None\n\n dt = self.btseries.index[10]\n result = self.btseries.get(dt)\n expected = self.btseries.to_dense()[dt]\n tm.assert_almost_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n tm.assert_almost_equal(\n self.bseries.get_value(10), self.bseries[10])\n\n def test_set_value(self):\n\n idx = self.btseries.index[7]\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n self.btseries.set_value(idx, 0)\n assert self.btseries[idx] == 0\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n self.iseries.set_value('foobar', 0)\n assert self.iseries.index[-1] == 'foobar'\n assert self.iseries['foobar'] == 0\n\n def test_getitem_slice(self):\n idx = self.bseries.index\n res = self.bseries[::2]\n assert isinstance(res, SparseSeries)\n\n expected = self.bseries.reindex(idx[::2])\n tm.assert_sp_series_equal(res, expected)\n\n res = self.bseries[:5]\n assert isinstance(res, SparseSeries)\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))\n\n res = self.bseries[5:]\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))\n\n # negative indices\n res = self.bseries[:-3]\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))\n\n def test_take(self):\n def _compare_with_dense(sp):\n dense = sp.to_dense()\n\n def _compare(idx):\n dense_result = dense.take(idx).values\n sparse_result = sp.take(idx)\n assert isinstance(sparse_result, SparseSeries)\n tm.assert_almost_equal(dense_result,\n sparse_result.values.values)\n\n _compare([1., 2., 3., 4., 5., 0.])\n _compare([7, 2, 9, 0, 4])\n _compare([3, 6, 3, 4, 7])\n\n self._check_all(_compare_with_dense)\n\n pytest.raises(Exception, self.bseries.take,\n [0, len(self.bseries) + 1])\n\n # Corner case\n # XXX: changed test. Why wsa this considered a corner case?\n sp = SparseSeries(np.ones(10) * nan)\n exp = pd.Series(np.repeat(nan, 5))\n tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp.to_sparse())\n\n with tm.assert_produces_warning(FutureWarning):\n sp.take([1, 5], convert=True)\n\n with tm.assert_produces_warning(FutureWarning):\n sp.take([1, 5], convert=False)\n\n def test_numpy_take(self):\n sp = SparseSeries([1.0, 2.0, 3.0])\n indices = [1, 2]\n\n tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),\n np.take(sp.to_dense(), indices, axis=0))\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.take,\n sp, indices, out=np.empty(sp.shape))\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.take,\n sp, indices, out=None, mode='clip')\n\n def test_setitem(self):\n self.bseries[5] = 7.\n assert self.bseries[5] == 7.\n\n def test_setslice(self):\n self.bseries[5:10] = 7.\n tm.assert_series_equal(self.bseries[5:10].to_dense(),\n Series(7., index=range(5, 10),\n name=self.bseries.name))\n\n def test_operators(self):\n\n def _check_op(a, b, op):\n sp_result = op(a, b)\n adense = a.to_dense() if isinstance(a, SparseSeries) else a\n bdense = b.to_dense() if isinstance(b, SparseSeries) else b\n dense_result = op(adense, bdense)\n tm.assert_almost_equal(sp_result.to_dense(), dense_result)\n\n def check(a, b):\n _check_op(a, b, operator.add)\n _check_op(a, b, operator.sub)\n _check_op(a, b, operator.truediv)\n _check_op(a, b, operator.floordiv)\n _check_op(a, b, operator.mul)\n\n _check_op(a, b, lambda x, y: operator.add(y, x))\n _check_op(a, b, lambda x, y: operator.sub(y, x))\n _check_op(a, b, lambda x, y: operator.truediv(y, x))\n _check_op(a, b, lambda x, y: operator.floordiv(y, x))\n _check_op(a, b, lambda x, y: operator.mul(y, x))\n\n # NaN ** 0 = 1 in C?\n # _check_op(a, b, operator.pow)\n # _check_op(a, b, lambda x, y: operator.pow(y, x))\n\n check(self.bseries, self.bseries)\n check(self.iseries, self.iseries)\n check(self.bseries, self.iseries)\n\n check(self.bseries, self.bseries2)\n check(self.bseries, self.iseries2)\n check(self.iseries, self.iseries2)\n\n # scalar value\n check(self.bseries, 5)\n\n # zero-based\n check(self.zbseries, self.zbseries * 2)\n check(self.zbseries, self.zbseries2)\n check(self.ziseries, self.ziseries2)\n\n # with dense\n result = self.bseries + self.bseries.to_dense()\n tm.assert_sp_series_equal(result, self.bseries + self.bseries)\n\n def test_binary_operators(self):\n\n # skipping for now #####\n import pytest\n pytest.skip(\"skipping sparse binary operators test\")\n\n def _check_inplace_op(iop, op):\n tmp = self.bseries.copy()\n\n expected = op(tmp, self.bseries)\n iop(tmp, self.bseries)\n tm.assert_sp_series_equal(tmp, expected)\n\n inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']\n for op in inplace_ops:\n _check_inplace_op(getattr(operator, \"i%s\" % op),\n getattr(operator, op))\n\n @pytest.mark.parametrize(\"values, op, fill_value\", [\n ([True, False, False, True], operator.invert, True),\n ([True, False, False, True], operator.invert, False),\n ([0, 1, 2, 3], operator.pos, 0),\n ([0, 1, 2, 3], operator.neg, 0),\n ([0, np.nan, 2, 3], operator.pos, np.nan),\n ([0, np.nan, 2, 3], operator.neg, np.nan),\n ])\n def test_unary_operators(self, values, op, fill_value):\n # https://github.com/pandas-dev/pandas/issues/22835\n values = np.asarray(values)\n if op is operator.invert:\n new_fill_value = not fill_value\n else:\n new_fill_value = op(fill_value)\n s = SparseSeries(values,\n fill_value=fill_value,\n index=['a', 'b', 'c', 'd'],\n name='name')\n result = op(s)\n expected = SparseSeries(op(values),\n fill_value=new_fill_value,\n index=['a', 'b', 'c', 'd'],\n name='name')\n tm.assert_sp_series_equal(result, expected)\n\n def test_abs(self):\n s = SparseSeries([1, 2, -3], name='x')\n expected = SparseSeries([1, 2, 3], name='x')\n result = s.abs()\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = np.abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')\n expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,\n fill_value=2, name='x')\n result = s.abs()\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = np.abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n def test_reindex(self):\n def _compare_with_series(sps, new_index):\n spsre = sps.reindex(new_index)\n\n series = sps.to_dense()\n seriesre = series.reindex(new_index)\n seriesre = seriesre.to_sparse(fill_value=sps.fill_value)\n\n tm.assert_sp_series_equal(spsre, seriesre)\n tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())\n\n _compare_with_series(self.bseries, self.bseries.index[::2])\n _compare_with_series(self.bseries, list(self.bseries.index[::2]))\n _compare_with_series(self.bseries, self.bseries.index[:10])\n _compare_with_series(self.bseries, self.bseries.index[5:])\n\n _compare_with_series(self.zbseries, self.zbseries.index[::2])\n _compare_with_series(self.zbseries, self.zbseries.index[:10])\n _compare_with_series(self.zbseries, self.zbseries.index[5:])\n\n # special cases\n same_index = self.bseries.reindex(self.bseries.index)\n tm.assert_sp_series_equal(self.bseries, same_index)\n assert same_index is not self.bseries\n\n # corner cases\n sp = SparseSeries([], index=[])\n # TODO: sp_zero is not used anywhere...remove?\n sp_zero = SparseSeries([], index=[], fill_value=0) # noqa\n _compare_with_series(sp, np.arange(10))\n\n # with copy=False\n reindexed = self.bseries.reindex(self.bseries.index, copy=True)\n reindexed.sp_values[:] = 1.\n assert (self.bseries.sp_values != 1.).all()\n\n reindexed = self.bseries.reindex(self.bseries.index, copy=False)\n reindexed.sp_values[:] = 1.\n tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))\n\n def test_sparse_reindex(self):\n length = 10\n\n def _check(values, index1, index2, fill_value):\n first_series = SparseSeries(values, sparse_index=index1,\n fill_value=fill_value)\n reindexed = first_series.sparse_reindex(index2)\n assert reindexed.sp_index is index2\n\n int_indices1 = index1.to_int_index().indices\n int_indices2 = index2.to_int_index().indices\n\n expected = Series(values, index=int_indices1)\n expected = expected.reindex(int_indices2).fillna(fill_value)\n tm.assert_almost_equal(expected.values, reindexed.sp_values)\n\n # make sure level argument asserts\n # TODO: expected is not used anywhere...remove?\n expected = expected.reindex(int_indices2).fillna(fill_value) # noqa\n\n def _check_with_fill_value(values, first, second, fill_value=nan):\n i_index1 = IntIndex(length, first)\n i_index2 = IntIndex(length, second)\n\n b_index1 = i_index1.to_block_index()\n b_index2 = i_index2.to_block_index()\n\n _check(values, i_index1, i_index2, fill_value)\n _check(values, b_index1, b_index2, fill_value)\n\n def _check_all(values, first, second):\n _check_with_fill_value(values, first, second, fill_value=nan)\n _check_with_fill_value(values, first, second, fill_value=0)\n\n index1 = [2, 4, 5, 6, 8, 9]\n values1 = np.arange(6.)\n\n _check_all(values1, index1, [2, 4, 5])\n _check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])\n _check_all(values1, index1, [0, 1])\n _check_all(values1, index1, [0, 1, 7, 8, 9])\n _check_all(values1, index1, [])\n\n first_series = SparseSeries(values1,\n sparse_index=IntIndex(length, index1),\n fill_value=nan)\n with tm.assert_raises_regex(TypeError,\n 'new index must be a SparseIndex'):\n reindexed = first_series.sparse_reindex(0) # noqa\n\n def test_repr(self):\n # TODO: These aren't used\n bsrepr = repr(self.bseries) # noqa\n isrepr = repr(self.iseries) # noqa\n\n def test_iter(self):\n pass\n\n def test_truncate(self):\n pass\n\n def test_fillna(self):\n pass\n\n def test_groupby(self):\n pass\n\n def test_reductions(self):\n def _compare_with_dense(obj, op):\n sparse_result = getattr(obj, op)()\n series = obj.to_dense()\n dense_result = getattr(series, op)()\n assert sparse_result == dense_result\n\n to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']\n\n def _compare_all(obj):\n for op in to_compare:\n _compare_with_dense(obj, op)\n\n _compare_all(self.bseries)\n\n self.bseries.sp_values[5:10] = np.NaN\n _compare_all(self.bseries)\n\n _compare_all(self.zbseries)\n self.zbseries.sp_values[5:10] = np.NaN\n _compare_all(self.zbseries)\n\n series = self.zbseries.copy()\n series.fill_value = 2\n _compare_all(series)\n\n nonna = Series(np.random.randn(20)).to_sparse()\n _compare_all(nonna)\n\n nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)\n _compare_all(nonna2)\n\n def test_dropna(self):\n sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)\n\n sp_valid = sp.dropna()\n\n expected = sp.to_dense().dropna()\n expected = expected[expected != 0]\n exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')\n tm.assert_sp_array_equal(sp_valid.values, exp_arr)\n tm.assert_index_equal(sp_valid.index, expected.index)\n assert len(sp_valid.sp_values) == 2\n\n result = self.bseries.dropna()\n expected = self.bseries.to_dense().dropna()\n assert not isinstance(result, SparseSeries)\n tm.assert_series_equal(result, expected)\n\n def test_homogenize(self):\n def _check_matches(indices, expected):\n data = {}\n for i, idx in enumerate(indices):\n data[i] = SparseSeries(idx.to_int_index().indices,\n sparse_index=idx, fill_value=np.nan)\n # homogenized is only valid with NaN fill values\n homogenized = spf.homogenize(data)\n\n for k, v in compat.iteritems(homogenized):\n assert (v.sp_index.equals(expected))\n\n indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),\n BlockIndex(10, [0], [10])]\n expected1 = BlockIndex(10, [2, 6], [2, 3])\n _check_matches(indices1, expected1)\n\n indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]\n expected2 = indices2[0]\n _check_matches(indices2, expected2)\n\n # must have NaN fill value\n data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,\n fill_value=0)}\n with tm.assert_raises_regex(TypeError, \"NaN fill value\"):\n spf.homogenize(data)\n\n def test_fill_value_corner(self):\n cop = self.zbseries.copy()\n cop.fill_value = 0\n result = self.bseries / cop\n\n assert np.isnan(result.fill_value)\n\n cop2 = self.zbseries.copy()\n cop2.fill_value = 1\n result = cop2 / cop\n # 1 / 0 is inf\n assert np.isinf(result.fill_value)\n\n def test_fill_value_when_combine_const(self):\n # GH12723\n s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))\n\n exp = s.fillna(0).add(2)\n res = s.add(2, fill_value=0)\n tm.assert_series_equal(res, exp)\n\n def test_shift(self):\n series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))\n\n shifted = series.shift(0)\n # assert shifted is not series\n tm.assert_sp_series_equal(shifted, series)\n\n f = lambda s: s.shift(1)\n _dense_series_compare(series, f)\n\n f = lambda s: s.shift(-2)\n _dense_series_compare(series, f)\n\n series = SparseSeries([nan, 1., 2., 3., nan, nan],\n index=bdate_range('1/1/2000', periods=6))\n f = lambda s: s.shift(2, freq='B')\n _dense_series_compare(series, f)\n\n f = lambda s: s.shift(2, freq=BDay())\n _dense_series_compare(series, f)\n\n def test_shift_nan(self):\n # GH 12908\n orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])\n sparse = orig.to_sparse()\n\n tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse(),\n check_kind=False)\n\n tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())\n\n sparse = orig.to_sparse(fill_value=0)\n tm.assert_sp_series_equal(\n sparse.shift(0),\n orig.shift(0).to_sparse(fill_value=sparse.fill_value)\n )\n tm.assert_sp_series_equal(sparse.shift(1),\n orig.shift(1).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(2),\n orig.shift(2).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(3),\n orig.shift(3).to_sparse(fill_value=0),\n check_kind=False)\n\n tm.assert_sp_series_equal(sparse.shift(-1),\n orig.shift(-1).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-2),\n orig.shift(-2).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-3),\n orig.shift(-3).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-4),\n orig.shift(-4).to_sparse(fill_value=0),\n check_kind=False)\n\n def test_shift_dtype(self):\n # GH 12908\n orig = pd.Series([1, 2, 3, 4], dtype=np.int64)\n\n sparse = orig.to_sparse()\n tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())\n\n sparse = orig.to_sparse(fill_value=np.nan)\n tm.assert_sp_series_equal(sparse.shift(0),\n orig.shift(0).to_sparse(fill_value=np.nan))\n # shift(1) or more span changes dtype to float64\n # XXX: SparseSeries doesn't need to shift dtype here.\n # Do we want to astype in shift, for backwards compat?\n # If not, document it.\n tm.assert_sp_series_equal(sparse.shift(1).astype('f8'),\n orig.shift(1).to_sparse(kind='integer'))\n tm.assert_sp_series_equal(sparse.shift(2).astype('f8'),\n orig.shift(2).to_sparse(kind='integer'))\n tm.assert_sp_series_equal(sparse.shift(3).astype('f8'),\n orig.shift(3).to_sparse(kind='integer'))\n\n tm.assert_sp_series_equal(sparse.shift(-1).astype('f8'),\n orig.shift(-1).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-2).astype('f8'),\n orig.shift(-2).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-3).astype('f8'),\n orig.shift(-3).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-4).astype('f8'),\n orig.shift(-4).to_sparse(),\n check_kind=False)\n\n @pytest.mark.parametrize(\"fill_value\", [\n 0,\n 1,\n np.nan\n ])\n @pytest.mark.parametrize(\"periods\", [0, 1, 2, 3, -1, -2, -3, -4])\n def test_shift_dtype_fill_value(self, fill_value, periods):\n # GH 12908\n orig = pd.Series([1, 0, 0, 4], dtype=np.dtype('int64'))\n\n sparse = orig.to_sparse(fill_value=fill_value)\n\n result = sparse.shift(periods)\n expected = orig.shift(periods).to_sparse(fill_value=fill_value)\n\n tm.assert_sp_series_equal(result, expected,\n check_kind=False,\n consolidate_block_indices=True)\n\n def test_combine_first(self):\n s = self.bseries\n\n result = s[::2].combine_first(s)\n result2 = s[::2].combine_first(s.to_dense())\n\n expected = s[::2].to_dense().combine_first(s.to_dense())\n expected = expected.to_sparse(fill_value=s.fill_value)\n\n tm.assert_sp_series_equal(result, result2)\n tm.assert_sp_series_equal(result, expected)\n\n @pytest.mark.parametrize('deep', [True, False])\n @pytest.mark.parametrize('fill_value', [0, 1, np.nan, None])\n def test_memory_usage_deep(self, deep, fill_value):\n values = [1.0] + [fill_value] * 20\n sparse_series = SparseSeries(values, fill_value=fill_value)\n dense_series = Series(values)\n sparse_usage = sparse_series.memory_usage(deep=deep)\n dense_usage = dense_series.memory_usage(deep=deep)\n\n assert sparse_usage < dense_usage\n\n\nclass TestSparseHandlingMultiIndexes(object):\n\n def setup_method(self, method):\n miindex = pd.MultiIndex.from_product(\n [[\"x\", \"y\"], [\"10\", \"20\"]], names=['row-foo', 'row-bar'])\n micol = pd.MultiIndex.from_product(\n [['a', 'b', 'c'], [\"1\", \"2\"]], names=['col-foo', 'col-bar'])\n dense_multiindex_frame = pd.DataFrame(\n index=miindex, columns=micol).sort_index().sort_index(axis=1)\n self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)\n\n def test_to_sparse_preserve_multiindex_names_columns(self):\n sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()\n sparse_multiindex_frame = sparse_multiindex_frame.copy()\n tm.assert_index_equal(sparse_multiindex_frame.columns,\n self.dense_multiindex_frame.columns)\n\n def test_round_trip_preserve_multiindex_names(self):\n sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()\n round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()\n tm.assert_frame_equal(self.dense_multiindex_frame,\n round_trip_multiindex_frame,\n check_column_type=True,\n check_names=True)\n\n\[email protected]_if_no_scipy\[email protected](\n \"ignore:the matrix subclass:PendingDeprecationWarning\"\n)\nclass TestSparseSeriesScipyInteraction(object):\n # Issue 8048: add SparseSeries coo methods\n\n def setup_method(self, method):\n import scipy.sparse\n # SparseSeries inputs used in tests, the tests rely on the order\n self.sparse_series = []\n s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])\n s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),\n (1, 2, 'a', 1),\n (1, 1, 'b', 0),\n (1, 1, 'b', 1),\n (2, 1, 'b', 0),\n (2, 1, 'b', 1)],\n names=['A', 'B', 'C', 'D'])\n self.sparse_series.append(s.to_sparse())\n\n ss = self.sparse_series[0].copy()\n ss.index.names = [3, 0, 1, 2]\n self.sparse_series.append(ss)\n\n ss = pd.Series([\n nan\n ] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()\n for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):\n ss[k] = v\n self.sparse_series.append(ss)\n\n # results used in tests\n self.coo_matrices = []\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))\n self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],\n [(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]\n self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]\n\n def test_to_coo_text_names_integer_row_levels_nosort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_integer_row_levels_sort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': [0, 1],\n 'column_levels': [2, 3],\n 'sort_labels': True}\n result = (self.coo_matrices[1], self.ils[1], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': ['A', 'B', 'C'],\n 'column_levels': ['D'],\n 'sort_labels': False}\n result = (self.coo_matrices[2], self.ils[2], self.jls[1])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_integer_names_integer_row_levels_nosort(self):\n ss = self.sparse_series[1]\n kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_text_row_levels_nosort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_bad_partition_nonnull_intersection(self):\n ss = self.sparse_series[0]\n pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])\n\n def test_to_coo_bad_partition_small_union(self):\n ss = self.sparse_series[0]\n pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D'])\n\n def test_to_coo_nlevels_less_than_two(self):\n ss = self.sparse_series[0]\n ss.index = np.arange(len(ss.index))\n pytest.raises(ValueError, ss.to_coo)\n\n def test_to_coo_bad_ilevel(self):\n ss = self.sparse_series[0]\n pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])\n\n def test_to_coo_duplicate_index_entries(self):\n ss = pd.concat([self.sparse_series[0],\n self.sparse_series[0]]).to_sparse()\n pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])\n\n def test_from_coo_dense_index(self):\n ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)\n check = self.sparse_series[2]\n tm.assert_sp_series_equal(ss, check)\n\n def test_from_coo_nodense_index(self):\n ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)\n check = self.sparse_series[2]\n check = check.dropna().to_sparse()\n tm.assert_sp_series_equal(ss, check)\n\n def test_from_coo_long_repr(self):\n # GH 13114\n # test it doesn't raise error. Formatting is tested in test_format\n import scipy.sparse\n\n sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))\n repr(sparse)\n\n def _run_test(self, ss, kwargs, check):\n results = ss.to_coo(**kwargs)\n self._check_results_to_coo(results, check)\n # for every test, also test symmetry property (transpose), switch\n # row_levels and column_levels\n d = kwargs.copy()\n d['row_levels'] = kwargs['column_levels']\n d['column_levels'] = kwargs['row_levels']\n results = ss.to_coo(**d)\n results = (results[0].T, results[2], results[1])\n self._check_results_to_coo(results, check)\n\n def _check_results_to_coo(self, results, check):\n (A, il, jl) = results\n (A_result, il_result, jl_result) = check\n # convert to dense and compare\n tm.assert_numpy_array_equal(A.todense(), A_result.todense())\n # or compare directly as difference of sparse\n # assert(abs(A - A_result).max() < 1e-12) # max is failing in python\n # 2.6\n assert il == il_result\n assert jl == jl_result\n\n def test_concat(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse1 = pd.SparseSeries(val1, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, name='y', kind=kind)\n\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)\n\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, fill_value=0, kind=kind)\n tm.assert_sp_series_equal(res, exp,\n consolidate_block_indices=True)\n\n def test_concat_axis1(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x')\n sparse2 = pd.SparseSeries(val2, name='y')\n\n res = pd.concat([sparse1, sparse2], axis=1)\n exp = pd.concat([pd.Series(val1, name='x'),\n pd.Series(val2, name='y')], axis=1)\n exp = pd.SparseDataFrame(exp)\n tm.assert_sp_frame_equal(res, exp)\n\n def test_concat_different_fill(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse1 = pd.SparseSeries(val1, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pd.concat([sparse2, sparse1])\n exp = pd.concat([pd.Series(val2), pd.Series(val1)])\n exp = pd.SparseSeries(exp, kind=kind, fill_value=0)\n tm.assert_sp_series_equal(res, exp)\n\n def test_concat_axis1_different_fill(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x')\n sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)\n\n res = pd.concat([sparse1, sparse2], axis=1)\n exp = pd.concat([pd.Series(val1, name='x'),\n pd.Series(val2, name='y')], axis=1)\n assert isinstance(res, pd.SparseDataFrame)\n tm.assert_frame_equal(res.to_dense(), exp)\n\n def test_concat_different_kind(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x', kind='integer')\n sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind='integer')\n tm.assert_sp_series_equal(res, exp)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pd.concat([sparse2, sparse1])\n exp = pd.concat([pd.Series(val2), pd.Series(val1)])\n exp = pd.SparseSeries(exp, kind='block', fill_value=0)\n tm.assert_sp_series_equal(res, exp)\n\n def test_concat_sparse_dense(self):\n # use first input's fill_value\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse = pd.SparseSeries(val1, name='x', kind=kind)\n dense = pd.Series(val2, name='y')\n\n res = pd.concat([sparse, dense])\n exp = pd.concat([pd.Series(val1), dense])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n res = pd.concat([dense, sparse, dense])\n exp = pd.concat([dense, pd.Series(val1), dense])\n exp = exp.astype(\"Sparse\")\n tm.assert_series_equal(res, exp)\n\n sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)\n dense = pd.Series(val2, name='y')\n\n res = pd.concat([sparse, dense])\n exp = pd.concat([pd.Series(val1), dense])\n exp = exp.astype(SparseDtype(exp.dtype, 0))\n tm.assert_series_equal(res, exp)\n\n res = pd.concat([dense, sparse, dense])\n exp = pd.concat([dense, pd.Series(val1), dense])\n exp = exp.astype(SparseDtype(exp.dtype, 0))\n tm.assert_series_equal(res, exp)\n\n def test_value_counts(self):\n vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]\n dense = pd.Series(vals, name='xx')\n\n sparse = pd.SparseSeries(vals, name='xx')\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n sparse = pd.SparseSeries(vals, name='xx', fill_value=0)\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_value_counts_dup(self):\n vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]\n\n # numeric op may cause sp_values to include the same value as\n # fill_value\n dense = pd.Series(vals, name='xx') / 0.\n sparse = pd.SparseSeries(vals, name='xx') / 0.\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]\n\n dense = pd.Series(vals, name='xx') * 0.\n sparse = pd.SparseSeries(vals, name='xx') * 0.\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_value_counts_int(self):\n vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]\n dense = pd.Series(vals, name='xx')\n\n # fill_value is np.nan, but should not be included in the result\n sparse = pd.SparseSeries(vals, name='xx')\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n sparse = pd.SparseSeries(vals, name='xx', fill_value=0)\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_isna(self):\n # GH 8276\n s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')\n\n res = s.isna()\n exp = pd.SparseSeries([True, True, False, False, True], name='xxx',\n fill_value=True)\n tm.assert_sp_series_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',\n fill_value=0.)\n res = s.isna()\n assert isinstance(res, pd.SparseSeries)\n exp = pd.Series([True, False, False, False, False], name='xxx')\n tm.assert_series_equal(res.to_dense(), exp)\n\n def test_notna(self):\n # GH 8276\n s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')\n\n res = s.notna()\n exp = pd.SparseSeries([False, False, True, True, False], name='xxx',\n fill_value=False)\n tm.assert_sp_series_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',\n fill_value=0.)\n res = s.notna()\n assert isinstance(res, pd.SparseSeries)\n exp = pd.Series([False, True, True, True, True], name='xxx')\n tm.assert_series_equal(res.to_dense(), exp)\n\n\ndef _dense_series_compare(s, f):\n result = f(s)\n assert (isinstance(result, SparseSeries))\n dense_result = f(s.to_dense())\n tm.assert_series_equal(result.to_dense(), dense_result)\n\n\nclass TestSparseSeriesAnalytics(object):\n\n def setup_method(self, method):\n arr, index = _test_data1()\n self.bseries = SparseSeries(arr, index=index, kind='block',\n name='bseries')\n\n arr, index = _test_data1_zero()\n self.zbseries = SparseSeries(arr, index=index, kind='block',\n fill_value=0, name='zbseries')\n\n def test_cumsum(self):\n result = self.bseries.cumsum()\n expected = SparseSeries(self.bseries.to_dense().cumsum())\n tm.assert_sp_series_equal(result, expected)\n\n result = self.zbseries.cumsum()\n expected = self.zbseries.to_dense().cumsum().to_sparse()\n tm.assert_series_equal(result, expected)\n\n axis = 1 # Series is 1-D, so only axis = 0 is valid.\n msg = \"No axis named {axis}\".format(axis=axis)\n with tm.assert_raises_regex(ValueError, msg):\n self.bseries.cumsum(axis=axis)\n\n def test_numpy_cumsum(self):\n result = np.cumsum(self.bseries)\n expected = SparseSeries(self.bseries.to_dense().cumsum())\n tm.assert_sp_series_equal(result, expected)\n\n result = np.cumsum(self.zbseries)\n expected = self.zbseries.to_dense().cumsum().to_sparse()\n tm.assert_series_equal(result, expected)\n\n msg = \"the 'dtype' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.cumsum,\n self.bseries, dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.cumsum,\n self.zbseries, out=result)\n\n def test_numpy_func_call(self):\n # no exception should be raised even though\n # numpy passes in 'axis=None' or `axis=-1'\n funcs = ['sum', 'cumsum', 'var', 'mean',\n 'prod', 'cumprod', 'std', 'argsort',\n 'min', 'max']\n for func in funcs:\n for series in ('bseries', 'zbseries'):\n getattr(np, func)(getattr(self, series))\n\n def test_deprecated_numpy_func_call(self):\n # NOTE: These should be add to the 'test_numpy_func_call' test above\n # once the behavior of argmin/argmax is corrected.\n funcs = ['argmin', 'argmax']\n for func in funcs:\n for series in ('bseries', 'zbseries'):\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n getattr(np, func)(getattr(self, series))\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n getattr(getattr(self, series), func)()\n\n def test_deprecated_reindex_axis(self):\n # https://github.com/pandas-dev/pandas/issues/17833\n with tm.assert_produces_warning(FutureWarning) as m:\n self.bseries.reindex_axis([0, 1, 2])\n assert 'reindex' in str(m[0].message)\n\n\[email protected](\n 'datetime_type', (np.datetime64,\n pd.Timestamp,\n lambda x: datetime.strptime(x, '%Y-%m-%d')))\ndef test_constructor_dict_datetime64_index(datetime_type):\n # GH 9456\n dates = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']\n values = [42544017.198965244, 1234565, 40512335.181958228, -1]\n\n result = SparseSeries(dict(zip(map(datetime_type, dates), values)))\n expected = SparseSeries(values, map(pd.Timestamp, dates))\n\n tm.assert_sp_series_equal(result, expected)\n\n\ndef test_to_sparse():\n # https://github.com/pandas-dev/pandas/issues/22389\n arr = pd.SparseArray([1, 2, None, 3])\n result = pd.Series(arr).to_sparse()\n assert len(result) == 4\n tm.assert_sp_array_equal(result.values, arr, check_kind=False)\n\n\ndef test_constructor_mismatched_raises():\n msg = \"Length of passed values is 2, index implies 3\"\n with tm.assert_raises_regex(ValueError, msg):\n SparseSeries([1, 2], index=[1, 2, 3])\n\n\ndef test_block_deprecated():\n s = SparseSeries([1])\n with tm.assert_produces_warning(FutureWarning):\n s.block\n"
] | [
[
"pandas.util.testing.assert_sp_frame_equal",
"pandas.Series",
"numpy.take",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas._libs.sparse.IntIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.SparseDataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.cumsum",
"pandas.SparseSeries",
"pandas.util.testing.assert_index_equal",
"pandas.core.sparse.api.SparseDtype",
"numpy.random.randn",
"pandas.compat.iteritems",
"numpy.dtype",
"pandas.compat.range",
"pandas.util.testing.round_trip_pickle",
"pandas.isna",
"pandas.util.testing.assert_sp_series_equal",
"pandas.tseries.offsets.BDay",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.core.sparse.frame.homogenize",
"pandas._libs.sparse.BlockIndex",
"numpy.repeat",
"pandas.concat",
"pandas.bdate_range",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.assert_sp_array_equal",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas.SparseArray",
"numpy.isfinite",
"numpy.abs",
"pandas.util.testing.assert_raises_regex",
"numpy.ones",
"pandas.core.sparse.api.SparseSeries.from_coo",
"numpy.isinf",
"numpy.empty",
"pandas.core.sparse.api.SparseSeries"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
wenhuchen/LogicNLG | [
"e986516e5b6d310219215510b3fe1603d03215cd"
] | [
"Model.py"
] | [
"import torch.optim as optim\nfrom torch import nn\nimport torch\nfrom torch import autograd\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom transformers import BertModel\n\n\nclass PositionalEmbedding(nn.Module):\n\n def __init__(self, d_model, max_len=512):\n super(PositionalEmbedding, self).__init__()\n\n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, d_model).float()\n pe.require_grad = False\n\n position = torch.arange(0, max_len).float().unsqueeze(1)\n div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()\n\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n return self.pe[:, :x.size(1)]\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super(MultiHeadAttention, self).__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k)\n self.w_ks = nn.Linear(d_model, n_head * d_k)\n self.w_vs = nn.Linear(d_model, n_head * d_v)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = nn.Linear(n_head * d_v, d_model)\n nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n\n output, attn = self.attention(q, k, v, mask=mask)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise\n self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n return output\n\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super(ScaledDotProductAttention, self).__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n\n attn = torch.bmm(q, k.transpose(1, 2))\n attn = attn / self.temperature\n\n if mask is not None:\n attn = attn.masked_fill(mask, -np.inf)\n\n attn = self.softmax(attn)\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n\n\ndef get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):\n ''' Sinusoid position encoding table '''\n\n def cal_angle(position, hid_idx):\n return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)\n\n def get_posi_angle_vec(position):\n return [cal_angle(position, hid_j) for hid_j in range(d_hid)]\n\n sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])\n\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\n\n if padding_idx is not None:\n # zero vector for padding dimension\n sinusoid_table[padding_idx] = 0.\n\n return torch.FloatTensor(sinusoid_table)\n\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input, enc_input, mask=slf_attn_mask)\n\n enc_output *= non_pad_mask\n\n enc_output = self.pos_ffn(enc_output)\n enc_output *= non_pad_mask\n\n return enc_output, enc_slf_attn\n\n\nclass DecoderLayer(nn.Module):\n ''' Compose with three layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(DecoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None):\n dec_output, dec_slf_attn = self.slf_attn(\n dec_input, dec_input, dec_input, mask=slf_attn_mask)\n dec_output *= non_pad_mask\n\n dec_output, dec_enc_attn = self.enc_attn(\n dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)\n dec_output *= non_pad_mask\n\n dec_output = self.pos_ffn(dec_output)\n dec_output *= non_pad_mask\n\n return dec_output, dec_slf_attn, dec_enc_attn\n\n def step_forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None):\n dec_query = dec_input[:, -1, :].unsqueeze(1)\n slf_attn_mask = slf_attn_mask[:, -1, :].unsqueeze(1)\n dec_enc_attn_mask = dec_enc_attn_mask[:, -1, :].unsqueeze(1)\n non_pad_mask = non_pad_mask[:, -1, :].unsqueeze(1)\n\n dec_output, dec_slf_attn = self.slf_attn(\n dec_query, dec_input, dec_input, mask=slf_attn_mask)\n dec_output *= non_pad_mask\n\n dec_output, dec_enc_attn = self.enc_attn(\n dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)\n dec_output *= non_pad_mask\n\n dec_output = self.pos_ffn(dec_output)\n dec_output *= non_pad_mask\n\n return dec_output\n\n\ndef get_non_pad_mask(seq):\n assert seq.dim() == 2\n return seq.ne(0).type(torch.float).unsqueeze(-1)\n\n\ndef get_attn_key_pad_mask(seq_k, seq_q):\n ''' For masking out the padding part of key sequence. '''\n\n # Expand to fit the shape of key query attention matrix.\n len_q = seq_q.size(1)\n padding_mask = seq_k.eq(0)\n padding_mask = padding_mask.unsqueeze(1).expand(-1, len_q, -1).type(torch.bool)\n return padding_mask\n\n\ndef get_subsequent_mask(seq):\n ''' For masking out the subsequent info. '''\n\n sz_b, len_s = seq.size()\n subsequent_mask = torch.triu(torch.ones((len_s, len_s), device=seq.device), diagonal=1).type(torch.bool)\n subsequent_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1) # b x ls x ls\n\n return subsequent_mask\n\n\nclass BERTGen(nn.Module):\n\n def __init__(self, vocab_size, dim, layers, head, modelpath):\n super(BERTGen, self).__init__()\n self.encoder = BertModel.from_pretrained(modelpath)\n self.model = TableDecoder(vocab_size, dim, layers, dim, head)\n\n def forward(self, trg_inp, caption):\n src_feat = self.encoder(caption)[0]\n tgt_feat = self.encoder(trg_inp)[0]\n\n src_feat = src_feat.repeat(tgt_feat.shape[0], 1, 1)\n logits = self.model(trg_inp, src_feat, tgt_feat)\n return logits\n\n def encode(self, caption):\n return self.encoder(caption)[0]\n\n def decode(self, trg_inp, src_feat, tgt_feat):\n return self.model(trg_inp, src_feat, tgt_feat)\n\n\nclass TableDecoder(nn.Module):\n\n def __init__(self, vocab_size, d_word_vec, n_layers, d_model, n_head, dropout=0.1, copy=False, with_bert=True):\n super(TableDecoder, self).__init__()\n d_k = d_model // n_head\n d_v = d_model // n_head\n d_inner = d_model * 4\n self.vocab_size = vocab_size\n\n self.dec_stack = nn.ModuleList([\n DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n self.tgt_word_prj = nn.Linear(d_model, vocab_size, bias=False)\n\n def forward(self, tgt_seq, src_feat, tgt_feat):\n src_length = src_feat.shape[1]\n tgt_length = tgt_seq.shape[1]\n\n slf_mask = torch.zeros_like(tgt_seq).type(torch.bool).to(tgt_seq.device)\n slf_attn_mask = torch.zeros_like(tgt_seq).unsqueeze(2).repeat(\n 1, 1, tgt_length).type(torch.bool).to(tgt_seq.device)\n non_pad_mask = (1 - slf_mask.float()).unsqueeze(-1)\n dec_enc_attn_mask = slf_mask.unsqueeze(2).repeat(1, 1, src_length)\n\n dec_output = tgt_feat\n for layer in self.dec_stack:\n dec_output, _, _ = layer(dec_output, src_feat,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask,\n dec_enc_attn_mask=dec_enc_attn_mask)\n\n logits = self.tgt_word_prj(dec_output)\n return logits\n\n\nclass TableInfusing(nn.Module):\n def __init__(self, vocab_size, full_vocab_size, d_word_vec, n_layers, n_head, dropout=0.1):\n super(TableInfusing, self).__init__()\n self.embed = nn.Embedding(vocab_size, d_word_vec, padding_idx=0)\n\n self.vocab_size = vocab_size\n self.full_vocab_size = full_vocab_size\n\n self.field_encoder = nn.LSTM(d_word_vec, d_word_vec)\n d_inner = 4 * d_word_vec\n d_k, d_v = d_word_vec // n_head, d_word_vec // n_head\n self.discount = 0.99\n\n self.enc_stack = nn.ModuleList([\n EncoderLayer(d_word_vec, d_inner, n_head, d_k, d_v)\n for _ in range(n_layers)])\n\n self.dec_stack = nn.ModuleList([\n DecoderLayer(d_word_vec, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n self.post_word_emb = PositionalEmbedding(d_model=d_word_vec)\n\n self.copy_gate = nn.Sequential(nn.Linear(d_word_vec, 1), nn.Sigmoid())\n\n self.tgt_word_prj = nn.Linear(d_word_vec, vocab_size, bias=False)\n\n def forward(self, seqs_in, table_in, table_scatters, lookups, line_nos, fields, indexes):\n enc_inp = self.encode(table_in, lookups, line_nos, fields, indexes)\n logits = self.decode(seqs_in, enc_inp, table_scatters)\n\n return logits\n\n def encode(self, table_in, lookups, line_nos, fields, indexes):\n field_emb = self.embed(fields).transpose(1, 0)\n\n out, hidden = self.field_encoder(field_emb)\n out = out.transpose(1, 0)\n\n field_mask = (fields != 0).unsqueeze(-1).float()\n out = out * field_mask\n\n extracted = torch.gather(out, 1, indexes[:, :, None].repeat(1, 1, out.shape[-1]))\n\n field_emb = torch.gather(extracted, 1, lookups[:, :, None].repeat(1, 1, extracted.shape[-1]))\n\n line_no_emb = self.embed(line_nos)\n\n word_emb = self.embed(table_in)\n\n cell_emb = field_emb + line_no_emb + word_emb\n\n src_slf_mask = (table_in == 0)\n\n src_src_mask = src_slf_mask.unsqueeze(1).expand(-1, src_slf_mask.shape[1], -1)\n src_non_pad_mask = (1 - src_slf_mask.float()).unsqueeze(-1)\n\n enc_inp = cell_emb\n for layer in self.enc_stack:\n enc_inp, _ = layer(enc_inp, src_non_pad_mask, src_src_mask)\n enc_inp *= src_non_pad_mask\n\n return enc_inp\n\n def decode(self, seqs_in, enc_inp, table_scatters):\n batch_size, length = seqs_in.shape[0], seqs_in.shape[1]\n\n tgt_emb = self.embed(seqs_in)\n dec_inp = tgt_emb + self.post_word_emb(seqs_in)\n\n src_slf_mask = (table_scatters == 0)\n tgt_slf_mask = (seqs_in == 0)\n\n non_pad_mask = get_non_pad_mask(seqs_in)\n\n slf_attn_mask_subseq = get_subsequent_mask(seqs_in)\n slf_attn_mask_keypad = get_attn_key_pad_mask(seq_k=seqs_in, seq_q=seqs_in)\n slf_attn_mask = (slf_attn_mask_keypad + slf_attn_mask_subseq).gt(0)\n dec_enc_attn_mask = src_slf_mask.unsqueeze(1).expand(batch_size, length, -1).type(torch.bool)\n\n for layer in self.dec_stack:\n dec_inp, _, _ = layer(dec_inp, enc_inp,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask,\n dec_enc_attn_mask=dec_enc_attn_mask)\n\n gate = self.copy_gate(dec_inp)\n\n scores = torch.bmm(dec_inp, enc_inp.transpose(2, 1))\n oov_vocab_prob = torch.softmax(scores, -1)\n\n in_vocab_prob = torch.softmax(self.tgt_word_prj(dec_inp), -1)\n\n size = self.full_vocab_size - self.vocab_size\n add_on_prob = (1 - self.discount) / size\n add_on = torch.FloatTensor(batch_size, length, size).fill_(add_on_prob).to(in_vocab_prob.device)\n\n full_prob = torch.cat([in_vocab_prob * (1 - gate) * self.discount, add_on], -1)\n\n full_prob = full_prob.scatter_add(2, table_scatters.unsqueeze(1).repeat(1, length, 1), oov_vocab_prob * gate)\n full_logits = torch.log(full_prob)\n\n return full_logits\n\n\nclass Ranker(nn.Module):\n ''' A decoder model with self attention mechanism. '''\n\n def __init__(self, vocab_size, d_word_vec, n_layers, d_model, n_head, dropout=0.1):\n super(Ranker, self).__init__()\n d_k = d_model // n_head\n d_v = d_model // n_head\n d_inner = d_model * 4\n\n self.word_emb = nn.Embedding(vocab_size, d_word_vec, padding_idx=0)\n\n self.post_word_emb = PositionalEmbedding(d_model=d_word_vec)\n\n self.enc_stack = nn.ModuleList(\n [EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n self.dec_stack = nn.ModuleList(\n [DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n self.tgt_word_prj = nn.Linear(d_model, 2, bias=True)\n\n def forward(self, prog, sent):\n # -- Prepare masks\n non_pad_mask = get_non_pad_mask(sent)\n slf_attn_mask = get_attn_key_pad_mask(seq_k=sent, seq_q=sent)\n # -- Forward Word Embedding\n enc_output = self.word_emb(sent) + self.post_word_emb(sent)\n\n for enc_layer in self.enc_stack:\n enc_output, enc_slf_attn = enc_layer(\n enc_output,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n non_pad_mask = get_non_pad_mask(prog)\n slf_attn_mask = get_attn_key_pad_mask(seq_k=prog, seq_q=prog)\n dec_enc_attn_mask = get_attn_key_pad_mask(seq_k=sent, seq_q=prog)\n # -- Forward\n dec_output = self.word_emb(prog) + self.post_word_emb(prog)\n\n for dec_layer in self.dec_stack:\n dec_output, dec_slf_attn, dec_enc_attn = dec_layer(\n dec_output, enc_output,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask,\n dec_enc_attn_mask=dec_enc_attn_mask)\n\n logits = self.tgt_word_prj(dec_output[:, 0])\n return logits\n\n def prob(self, prog, sent):\n logits = self.forward(prog, sent)\n prob = torch.softmax(logits, -1)\n return prob[:, 1]\n\n\nclass BERTRanker(nn.Module):\n def __init__(self, model_class, model_name_or_path, config, cache_dir='/tmp/'):\n super(BERTRanker, self).__init__()\n self.base = model_class.from_pretrained(\n model_name_or_path,\n from_tf=bool(\".ckpt\" in model_name_or_path),\n config=config,\n cache_dir=cache_dir if cache_dir else None,\n )\n self.proj = nn.Linear(768, 2)\n\n def forward(self, input_tokens, input_types, input_masks):\n inputs = {\"input_ids\": input_tokens, \"token_type_ids\": input_types, \"attention_mask\": input_masks}\n _, text_representation = self.base(**inputs)\n logits = self.proj(text_representation)\n return logits\n\n def prob(self, input_tokens, input_types, input_masks):\n inputs = {\"input_ids\": input_tokens, \"token_type_ids\": input_types, \"attention_mask\": input_masks}\n _, text_representation = self.base(**inputs)\n logits = self.proj(text_representation)\n prob = torch.softmax(logits, -1)\n return prob[:, 1]\n"
] | [
[
"torch.nn.Softmax",
"numpy.sqrt",
"torch.sin",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.softmax",
"torch.ones",
"numpy.sin",
"torch.nn.Sigmoid",
"torch.bmm",
"torch.arange",
"torch.cos",
"numpy.power",
"torch.nn.init.xavier_normal_",
"torch.zeros_like",
"torch.nn.Linear",
"torch.log",
"torch.nn.Conv1d",
"torch.nn.LSTM",
"numpy.cos",
"torch.nn.LayerNorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scotthuang1989/opencv_study | [
"9b6354907609c9841915f6300ee5915a9d80906f"
] | [
"tutorial/gui_feature/track_bar.py"
] | [
"\"\"\"\nSimple application shows color you specify\n* one window which show the color\n* 3 trackbar\n\"\"\"\n\n\"\"\"\ncv2.getTrackbarPos(trackbarname, windname)\nreuturns: the current position of specified trackbar\n\"\"\"\n\n\"\"\"\ncv2.createTrackbar(trackbarName, windowName, value, count, onChange)\n\"\"\"\n\nimport numpy as np\nimport cv2\n\ndef test_Trackbar():\n def nothing(x):\n pass\n img=np.zeros((300,512,3),np.uint8)\n cv2.namedWindow(\"image\")\n\n cv2.createTrackbar('R','image',0,255,nothing)\n cv2.createTrackbar('B','image',0,255,nothing)\n cv2.createTrackbar('G','image',0,255,nothing)\n\n switch='0:OFF\\n1:ON'\n cv2.createTrackbar(switch,'image',0,1,nothing)\n\n while 1:\n cv2.imshow('image',img)\n if cv2.waitKey(20)&0xFF == ord('q'):\n break\n r=cv2.getTrackbarPos('R','image')\n g=cv2.getTrackbarPos('G','image')\n b=cv2.getTrackbarPos('B','image')\n s=cv2.getTrackbarPos(switch,'image')\n # import pdb; pdb.set_trace()\n if s:\n img[:]=[b,g,r]\n else:\n img[:]=0\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n test_Trackbar()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
blefaudeux/fairscale | [
"aa5850107a37c7d5644b6079516e7ae1079ff5e8"
] | [
"fairscale/optim/oss.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, List, Optional, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING:\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n Pipe combines pipeline parallelism with checkpointing to reduce peak\n memory required to train while minimizing device under-utilization.\n\n You should determine the balance when defining a :class:`Pipe` module, as\n balancing will not be done automatically. The module will be partitioned\n into multiple devices according to the given balance. You may rely on\n heuristics to find your own optimal configuration.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(\n self,\n params: _params_t,\n optim: Type[Optimizer] = SGD,\n group: Any = dist.group.WORLD,\n **defaults: Any\n ):\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n self.group = group\n self.rank = dist.get_rank(group)\n param_groups = self.partition_parameters()\n self.optim = optim(param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._global_state_dict = []\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n if len(params) > 0:\n pg = copy.copy(param_group)\n pg[\"params\"] = params\n param_groups[rank].append(pg)\n return param_groups\n\n def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:\n loss = self.optim.step(closure=closure)\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n dist.broadcast(param, rank, group=self.group)\n return loss\n\n def state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def _collect_state_dict(self) -> List[dict]:\n \"\"\"\n Collect all the state shards\n \"\"\"\n empty_buffer = torch.empty([1], dtype=torch.uint8)\n global_optim_state = []\n local_state = self.state_dict()\n\n if len(local_state[\"state\"]) == 0:\n return []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.info(\"Saving self state\")\n global_optim_state.append(\n recursive_copy_to_device(\n local_state, non_blocking=True, device=torch.device(\"cpu\")\n )\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank)\n else:\n # Reuse the param_groups from this rank, these are shared across replicas\n logging.info(\"Receiving state from rank %s \", rank)\n replica_state = {\n \"state\": broadcast_object(empty_buffer, src_rank=rank),\n \"param_groups\": local_state[\"param_groups\"],\n }\n\n # Fetch from the other replicas\n global_optim_state.append(\n recursive_copy_to_device(\n replica_state, non_blocking=True, device=torch.device(\"cpu\")\n )\n )\n logging.info(\"State from rank %s received\", rank)\n\n return global_optim_state\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.empty([1], dtype=torch.uint8)\n local_state = self.state_dict()\n\n if len(local_state[\"state\"]) == 0:\n return\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.info(\n \"Sending the sharded SGD state to the reference replica from rank %s\",\n rank,\n )\n broadcast_object(local_state[\"state\"], src_rank=rank)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.info(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank)\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> List[dict]:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.info(\"Pulling the sharded SGD state from all replicas\")\n self._global_state_dict = self._collect_state_dict()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n @property\n def global_state_dict(self):\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called\n \"\"\"\n\n assert (\n len(self._global_state_dict) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n return self._global_state_dict\n\n def load_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n self.optim.load_state_dict(state_dict)\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n"
] | [
[
"torch.distributed.broadcast",
"torch.empty",
"torch.device",
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stephenyan1231/d2go | [
"cbd695ac4aab07f98e3157340213b6f568f16eb1"
] | [
"tools/lightning_train_net.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nimport logging\nimport os\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Type\n\nimport pytorch_lightning as pl # type: ignore\nfrom d2go.config import CfgNode, temp_defrost, auto_scale_world_size\nfrom d2go.runner import create_runner\nfrom d2go.runner.callbacks.quantization import (\n QuantizationAwareTraining,\n ModelTransform,\n)\nfrom d2go.runner.lightning_task import GeneralizedRCNNTask\nfrom d2go.setup import basic_argument_parser\nfrom d2go.utils.misc import dump_trained_model_configs\nfrom detectron2.utils.events import EventStorage\nfrom detectron2.utils.file_io import PathManager\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom torch.distributed import get_rank\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"detectron2go.lightning.train_net\")\n\nFINAL_MODEL_CKPT = f\"model_final{ModelCheckpoint.FILE_EXTENSION}\"\n\n\n@dataclass\nclass TrainOutput:\n output_dir: str\n accuracy: Optional[Dict[str, Any]] = None\n tensorboard_log_dir: Optional[str] = None\n model_configs: Optional[Dict[str, str]] = None\n\n\ndef maybe_override_output_dir(cfg: CfgNode, output_dir: Optional[str]) -> None:\n \"\"\"Overrides the output directory if `output_dir` is not None. \"\"\"\n if output_dir is not None and output_dir != cfg.OUTPUT_DIR:\n cfg.OUTPUT_DIR = output_dir\n logger.warning(\n f\"Override cfg.OUTPUT_DIR ({cfg.OUTPUT_DIR}) to be the same as \"\n f\"output_dir {output_dir}\"\n )\n\n\ndef _get_trainer_callbacks(cfg: CfgNode) -> List[Callback]:\n \"\"\"Gets the trainer callbacks based on the given D2Go Config.\n\n Args:\n cfg: The normalized ConfigNode for this D2Go Task.\n\n Returns:\n A list of configured Callbacks to be used by the Lightning Trainer.\n \"\"\"\n callbacks: List[Callback] = [\n LearningRateMonitor(logging_interval=\"step\"),\n ModelCheckpoint(\n dirpath=cfg.OUTPUT_DIR,\n save_last=True,\n ),\n ]\n if cfg.QUANTIZATION.QAT.ENABLED:\n callbacks.append(QuantizationAwareTraining.from_config(cfg))\n return callbacks\n\n\ndef get_accelerator(device: str) -> str:\n return \"ddp_cpu\" if device.lower() == \"cpu\" else \"ddp\"\n\n\ndef do_train(\n cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask\n) -> Dict[str, str]:\n \"\"\"Runs the training loop with given trainer and task.\n\n Args:\n cfg: The normalized ConfigNode for this D2Go Task.\n trainer: PyTorch Lightning trainer.\n task: Lightning module instance.\n\n Returns:\n A map of model name to trained model config path.\n \"\"\"\n with EventStorage() as storage:\n task.storage = storage\n trainer.fit(task)\n final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT)\n trainer.save_checkpoint(final_ckpt) # for validation monitor\n\n trained_cfg = cfg.clone()\n with temp_defrost(trained_cfg):\n trained_cfg.MODEL.WEIGHTS = final_ckpt\n model_configs = dump_trained_model_configs(\n cfg.OUTPUT_DIR, {\"model_final\": trained_cfg}\n )\n return model_configs\n\n\ndef do_test(trainer: pl.Trainer, task: GeneralizedRCNNTask):\n \"\"\"Runs the evaluation with a pre-trained model.\n\n Args:\n cfg: The normalized ConfigNode for this D2Go Task.\n trainer: PyTorch Lightning trainer.\n task: Lightning module instance.\n\n \"\"\"\n with EventStorage() as storage:\n task.storage = storage\n trainer.test(task)\n\n\ndef main(\n cfg: CfgNode,\n output_dir: Optional[str] = None,\n task_cls: Type[GeneralizedRCNNTask] = GeneralizedRCNNTask,\n eval_only: bool = False,\n num_machines: int = 1,\n num_gpus: int = 0,\n num_processes: int = 1,\n) -> TrainOutput:\n \"\"\"Main function for launching a training with lightning trainer\n Args:\n cfg: D2go config node\n num_machines: Number of nodes used for distributed training\n num_gpus: Number of GPUs to train on each node\n num_processes: Number of processes on each node.\n NOTE: Automatically set to the number of GPUs when using DDP.\n Set a value greater than 1 to mimic distributed training on CPUs.\n eval_only: True if run evaluation only.\n \"\"\"\n assert (\n num_processes == 1 or num_gpus == 0\n ), \"Only set num_processes > 1 when training on CPUs\"\n auto_scale_world_size(cfg, num_machines * num_gpus)\n maybe_override_output_dir(cfg, output_dir)\n\n task = task_cls.from_config(cfg, eval_only)\n tb_logger = TensorBoardLogger(save_dir=cfg.OUTPUT_DIR)\n\n trainer_params = {\n # training loop is bounded by max steps, use a large max_epochs to make\n # sure max_steps is met first\n \"max_epochs\": 10 ** 8,\n \"max_steps\": cfg.SOLVER.MAX_ITER,\n \"val_check_interval\": cfg.TEST.EVAL_PERIOD\n if cfg.TEST.EVAL_PERIOD > 0\n else cfg.SOLVER.MAX_ITER,\n \"num_nodes\": num_machines,\n \"gpus\": num_gpus,\n \"num_processes\": num_processes,\n \"accelerator\": get_accelerator(cfg.MODEL.DEVICE),\n \"callbacks\": _get_trainer_callbacks(cfg),\n \"logger\": tb_logger,\n \"num_sanity_val_steps\": 0,\n \"progress_bar_refresh_rate\": 10,\n }\n\n last_checkpoint = os.path.join(cfg.OUTPUT_DIR, \"last.ckpt\")\n if PathManager.exists(last_checkpoint):\n # resume training from checkpoint\n trainer_params[\"resume_from_checkpoint\"] = last_checkpoint\n logger.info(f\"Resuming training from checkpoint: {last_checkpoint}.\")\n\n trainer = pl.Trainer(**trainer_params)\n model_configs = None\n if eval_only:\n do_test(trainer, task)\n else:\n model_configs = do_train(cfg, trainer, task)\n\n return TrainOutput(\n output_dir=cfg.OUTPUT_DIR,\n tensorboard_log_dir=tb_logger.log_dir,\n accuracy=task.eval_res,\n model_configs=model_configs,\n )\n\n\ndef build_config(\n config_file: str,\n task_cls: Type[GeneralizedRCNNTask],\n opts: Optional[List[str]] = None,\n) -> CfgNode:\n \"\"\"Build config node from config file\n Args:\n config_file: Path to a D2go config file\n output_dir: When given, this will override the OUTPUT_DIR in the config\n opts: A list of config overrides. e.g. [\"SOLVER.IMS_PER_BATCH\", \"2\"]\n \"\"\"\n cfg = task_cls.get_default_cfg()\n cfg.merge_from_file(config_file)\n\n if opts:\n cfg.merge_from_list(opts)\n return cfg\n\n\ndef argument_parser():\n parser = basic_argument_parser(distributed=True, requires_output_dir=False)\n parser.add_argument(\n \"--num-gpus\", type=int, default=0, help=\"number of GPUs per machine\"\n )\n return parser\n\n\nif __name__ == \"__main__\":\n args = argument_parser().parse_args()\n task_cls = create_runner(args.runner) if args.runner else GeneralizedRCNNTask\n cfg = build_config(args.config_file, task_cls, args.opts)\n ret = main(\n cfg,\n args.output_dir,\n task_cls,\n eval_only=False, # eval_only\n num_machines=args.num_machines,\n num_gpus=args.num_gpus,\n num_processes=args.num_processes,\n )\n if get_rank() == 0:\n print(ret)\n"
] | [
[
"torch.distributed.get_rank"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bolero2/deeplab-v3-torch | [
"512c701d236ff2092be9aa8cc0219a333283bfb6"
] | [
"get_model.py"
] | [
"import sys\nimport torch\nimport os\nimport yaml\nfrom glob import glob\n\nfrom modeling.deeplab import *\n\n\ndef get_model(yaml_path : str = './setting.yaml'):\n file_path = os.path.split(sys.modules[__name__].__file__)[0]\n yaml_file = os.path.join(file_path, yaml_path)\n\n assert os.path.isfile(yaml_file), \"There isn't setting.yaml file!\"\n with open(yaml_file) as f:\n setting = yaml.load(f, Loader=yaml.SafeLoader)\n assert len(setting), \"either setting value must be specified. (yaml file is empty.)\"\n\n if not isinstance(setting['classes'], list) and setting['classes'].split('.')[1] == 'txt':\n file_list = glob(f\"{setting['DATASET']['root_path']}/**/{setting['classes']}\", recursive=True)\n assert len(file_list) == 1, \"Error.\"\n file_list = file_list[0]\n class_txt = open(file_list, 'r')\n classes = class_txt.readlines()\n class_txt.close()\n for i, c in enumerate(classes):\n if c[-1] == '\\n':\n classes[i] = c[:-1]\n\n setting['classes'] = classes\n\n setting['nc'] = len(setting['classes'])\n setting['file_path'] = file_path\n\n setting['train']['sync_bn'], setting['train']['resume'], setting['train']['checkname'] = None, None, None\n setting['train']['cuda'] = not setting['train']['no_cuda'] and torch.cuda.is_available()\n\n if isinstance(setting['train']['lr'], str) and setting['train']['lr'] != 'None':\n print(\"learning rate is {}\".format(setting['train']['lr']))\n setting['train']['lr'] = float(setting['train']['lr'])\n\n elif setting['train']['lr'] is None or setting['train']['lr'] == 'None':\n print(\"learning rate is None state. Override lr: 1e-3.\")\n setting['train']['lr'] = 1e-3\n\n model = DeepLab(num_classes=setting['nc'],\n backbone=setting['train']['backbone'],\n output_stride=setting['train']['out_stride'],\n sync_bn=setting['train']['sync_bn'],\n freeze_bn=setting['train']['freeze_bn'],\n setting=setting)\n\n return model\n\n\nif __name__ == \"__main__\":\n setting = yaml.load('setting.yaml')\n model = DeepLab(num_classes=21,\n backbone='resnet',\n output_stride=16,\n sync_bn=None,\n freeze_bn=False,\n setting=setting)\n\n print(model)\n exit()"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
julenmendieta/metaseq | [
"fa875d1f72317aa7ef95cb128b739956b16eef9f"
] | [
"metaseq/test/examples/atf3_peaks.py"
] | [
"\"\"\"\nPractical testing grounds to see what sorts of features are needed. Heavily\ncommented to serve as interim documentation.\n\nUse the download_data.py script in the test/data dir to get ENCODE CTCF\nChIP-seq data.\n\nDifferent modes -- TSS, intron, peaks.\n\nEach one generates features of interest, and then grabs the raw data from the\nBAM files to construct the signal from that region. Doing that for thousands\nof features results in a matrix.\n\nCluster the matrix, sort the clusters, adjust heatmap . . . and then add\na strip of dots along the left side that, when zoomed in, can be clicked to\nspawn a mini-browser that shows the IP and input signal for that row as well as\nall nearby genes.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport pybedtools\nimport metaseq\nfrom metaseq.integration import chipseq\nfrom metaseq import colormap_adjust\n\n# Edit the settings files to make tweaks\nimport atf3_peaks_settings as settings\nimport atf3_peaks_helpers as helpers\n\n# global list that will store spawned figs, so you can use close_figs() to\n# close them all\nFIGS = []\n\n\ndef close_figs():\n \"\"\"\n Convenience function to close all mini-browser figures\n \"\"\"\n for fig in FIGS:\n plt.close(fig)\n\n# Choices for RUN_TYPE are:\n# * 'intron': all introns of all genes on the selected chromosomes\n# * 'TSS' : gene-level TSSs, +/- upstream and downstream bp\n# * 'peaks' : peaks from ENCODE; acts as a positive control on the numbers\n\nRUN_TYPE = 'TSS'\n\ntry:\n chip = chipseq.Chipseq(\n ip_bam=metaseq.example_filename(\n 'wgEncodeHaibTfbsK562Atf3V0416101AlnRep1.bam'\n ),\n control_bam=metaseq.example_filename(\n 'wgEncodeHaibTfbsK562RxlchV0416101AlnRep1.bam'\n ),\n dbfn=metaseq.example_filename(\n 'Homo_sapiens.GRCh37.66.cleaned.gtf.db')\n )\nexcept ValueError:\n raise ValueError(\"please use the download_data.py script in the \"\n \"data directory\")\n\n\nif RUN_TYPE == \"TSS\":\n # Gets all genes on selected chroms, then applies the TSS modifier and\n # saves the results\n tss_fn = 'example_tsses.gtf'\n if not os.path.exists(tss_fn):\n features = pybedtools.BedTool(helpers.gene_generator())\\\n .filter(helpers.chromfilter)\\\n .each(helpers.TSS, upstream=settings.UPSTREAM,\n downstream=settings.DOWNSTREAM)\\\n .saveas(tss_fn)\n else:\n features = pybedtools.BedTool(tss_fn)\n\nelif RUN_TYPE == \"intron\":\n # Gets all genes and exons on selected chroms, then subtracts exons from\n # genes.\n intron_fn = 'example_introns.gtf'\n if not os.path.exists(intron_fn):\n features = pybedtools.BedTool(helpers.intron_generator())\\\n .filter(helpers.chromfilter)\\\n .saveas(intron_fn)\n else:\n features = pybedtools.BedTool(intron_fn)\n\nelif RUN_TYPE == 'peaks':\n # Extends the ENCODE peaks and filters out ones with pvals higher than 1e-5\n features = pybedtools.BedTool(peaks)\\\n .filter(helpers.chromfilter)\\\n .filter(helpers.peak_filter)\\\n .each(helpers.peak_extender)\\\n .saveas()\n\n# This does most of the work -- given the list of `features`, we send chunks of\n# 50 features to each of 8 processes, binning reads in the BAM file in to\n# settings.BINS bins and extending the reads 3'-war by settings.FRAGMENT_SIZE\nchip.diff_array(\n features=features,\n array_kwargs=dict(processes=8, chunksize=50, bins=settings.BINS,\n fragment_size=settings.FRAGMENT_SIZE),\n )\n\n# Nice colormap centered on zero that doesn't get too saturated on the\n# negatives if they're not as extreme as the positives\ncmap = colormap_adjust.smart_colormap(\n chip.diffed_array.min(), chip.diffed_array.max())\n\n\n# Calculate the TIP scores for all features (see Cheng et al. 2001,\n# Bioinformatics 27(23):3221-3227)\nrow_order = np.argsort(metaseq.plotutils.tip_zscores(chip.diffed_array))\n\n# Indices to use if we want mini-batch k-means clustering\n# row_order, breaks = metaseq.plotutils.clustered_sortind(d, k=10)\n\n# x-axis for plots, also used for the extent of the matrix\nx = np.linspace(-settings.UPSTREAM, settings.DOWNSTREAM, settings.BINS)\n\n# Make and show the 4-panel fig.\nchip.plot(x, row_order=row_order, imshow_kwargs=dict(cmap=cmap))\nplt.show()\n"
] | [
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jren73/DeepSpeed | [
"e4bd86a84c98ebcaec462abc118576e127528501"
] | [
"tests/unit/test_checkpointing.py"
] | [
"import torch\n\nimport torch.distributed as dist\n\nimport deepspeed\nfrom deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\nfrom deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1\n\nfrom deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer\nfrom deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer\n\nfrom deepspeed.runtime.pipe.topology import *\nPipeTopo = PipeDataParallelTopology\n\nfrom deepspeed.ops.op_builder import FusedLambBuilder, CPUAdamBuilder\n\nimport argparse\nimport pytest\nimport json\nimport os\nimport numbers\nfrom common import distributed_test\nfrom simple_model import *\n\n\ndef compare_deepspeed_states(saved_model, loaded_model):\n # These are compared in more depth in other places\n assert hasattr(loaded_model, 'module')\n\n assert saved_model.csr_tensor_module_names == loaded_model.csr_tensor_module_names\n assert saved_model.skipped_steps == loaded_model.skipped_steps\n assert saved_model.global_steps == loaded_model.global_steps\n\n\ndef compare_model_states(saved_model, loaded_model, compare_optimizer=True):\n compare_deepspeed_states(saved_model, loaded_model)\n\n for p0, p1 in zip(saved_model.module.parameters(), loaded_model.module.parameters()):\n assert id(p0) != id(p1), f'Comparing fp16 model state tensor against itself : {id(p0)} <====> {id(p1)}'\n assert torch.allclose(p0, p1, atol=1e-07), f\"FP16 model state {p0} is not equal to {p1}\"\n\n if not compare_optimizer:\n return\n\n if isinstance(saved_model.optimizer, FP16_DeepSpeedZeroOptimizer):\n for p0, p1 in zip(saved_model.optimizer.single_partition_of_fp32_groups, loaded_model.optimizer.single_partition_of_fp32_groups):\n assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'\n assert torch.allclose(p0, p1, atol=1e-07), f\"Fp32 model states {p0} is not equal to {p1}\"\n\n elif isinstance(saved_model.optimizer, FP16_DeepSpeedZeroOptimizer_Stage1):\n for partition0, partition1 in zip(saved_model.optimizer.local_sub_partitions_of_fp32_groups, loaded_model.optimizer.local_sub_partitions_of_fp32_groups):\n for p0, p1 in zip(partition0, partition1):\n assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'\n assert torch.allclose(p0, p1, atol=1e-07), f\"Fp32 model states {p0} is not equal to {p1}\"\n\n elif isinstance(saved_model.optimizer, FP16_Optimizer):\n for p0, p1 in zip(saved_model.optimizer.fp32_groups_flat, loaded_model.optimizer.fp32_groups_flat):\n assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'\n assert torch.allclose(p0, p1, atol=1e-07), f\"FP32 model states {p0} is not equal to {p1}\"\n\n elif isinstance(saved_model.optimizer, FP16_UnfusedOptimizer):\n for params0, params1 in zip(saved_model.optimizer.fp32_groups, loaded_model.optimizer.fp32_groups):\n for p0, p1 in zip(params0, params1):\n assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'\n assert torch.allclose(p0, p1, atol=1e-07), f\"FP32 model states {p0} is not equal to {p1}\"\n elif isinstance(saved_model.optimizer, torch.optim.Optimizer):\n pass\n else:\n assert False, f'Unexpected Optimizer Type: {saved_model.optimizer}'\n\n\ndef compare_optimizer_states(saved_model, loaded_model, hidden_dim, fp16=True):\n saved_optimizer = saved_model.optimizer.optimizer if fp16 else saved_model.optimizer\n loaded_optimizer = loaded_model.optimizer.optimizer if fp16 else loaded_model.optimizer\n\n for state0, state1 in zip(saved_optimizer.state.values(),\n loaded_optimizer.state.values()):\n for s0, s1 in zip(state0.values(), state1.values()):\n if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor):\n assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}'\n assert torch.equal(s0, s1)\n else:\n assert s0 == s1\n\n\ndef compare_lr_scheduler_states(saved_model, loaded_model):\n assert hasattr(saved_model, 'lr_scheduler')\n assert hasattr(loaded_model, 'lr_scheduler')\n\n saved_scheduler = saved_model.lr_scheduler\n loaded_scheduler = loaded_model.lr_scheduler\n\n assert hasattr(saved_scheduler, 'state_dict')\n assert hasattr(loaded_scheduler, 'state_dict')\n\n saved_sd = saved_scheduler.state_dict()\n loaded_sd = loaded_scheduler.state_dict()\n\n print(f\"saved_sd = {saved_sd}\")\n print(f\"loaded_sd = {loaded_sd}\")\n\n assert saved_sd.keys() == loaded_sd.keys()\n\n for state0, state1 in zip(saved_sd.values(), loaded_sd.values()):\n if isinstance(state0, numbers.Number) and isinstance(state1, numbers.Number):\n assert state0 == state1\n\n\ndef create_deepspeed_model(args, model, base_optimizer):\n if base_optimizer is None:\n ds_model, _, _, _ = deepspeed.initialize(args=args,\n model=model,\n model_parameters=model.parameters())\n else:\n ds_model, _, _, _ = deepspeed.initialize(args=args,\n model=model,\n optimizer=base_optimizer)\n\n return ds_model\n\n\ndef checkpoint_correctness_verification(args,\n models,\n hidden_dim,\n tmpdir,\n load_optimizer_states=False,\n load_lr_scheduler_states=False,\n fp16=True,\n train_batch=False,\n base_optimizers=[None,\n None],\n empty_tag=False):\n dtype = torch.half if fp16 else torch.float32\n ds_model = create_deepspeed_model(args=args,\n model=models[0],\n base_optimizer=base_optimizers[0])\n\n data_loader = random_dataloader(model=ds_model,\n total_samples=50,\n hidden_dim=hidden_dim,\n device=ds_model.device,\n dtype=dtype)\n\n if train_batch:\n ds_model.set_dataloader(data_loader)\n for n, batch in enumerate(data_loader):\n loss = ds_model.train_batch()\n else:\n for n, batch in enumerate(data_loader):\n loss = ds_model(batch[0], batch[1])\n ds_model.backward(loss)\n ds_model.step()\n\n trained_model = ds_model\n\n save_folder = os.path.join(tmpdir, 'saved_checkpoint')\n save_tag = None if empty_tag else '1'\n\n trained_model.save_checkpoint(save_folder, tag=save_tag)\n\n loaded_model = create_deepspeed_model(args=args,\n model=models[1],\n base_optimizer=base_optimizers[1])\n\n loaded_model.load_checkpoint(save_folder,\n tag=save_tag,\n load_optimizer_states=load_optimizer_states,\n load_lr_scheduler_states=load_lr_scheduler_states)\n\n compare_model_states(trained_model, loaded_model)\n\n if load_optimizer_states:\n compare_optimizer_states(trained_model, loaded_model, hidden_dim, fp16)\n\n if load_lr_scheduler_states:\n compare_lr_scheduler_states(trained_model, loaded_model)\n\n\[email protected](not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME],\n reason=\"lamb is not compatible\")\ndef test_checkpoint_unfused_optimizer(tmpdir):\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Lamb\",\n \"params\": {\n \"lr\": 0.00015\n }\n },\n \"gradient_clipping\": 1.0,\n \"fp16\": {\n \"enabled\": True\n },\n \"scheduler\": {\n \"type\": \"OneCycle\",\n \"params\": {\n \"cycle_first_step_size\": 1000,\n \"cycle_first_stair_count\": 500,\n \"cycle_second_step_size\": 1000,\n \"cycle_second_stair_count\": 500,\n \"decay_step_size\": 1000,\n \"cycle_min_lr\": 0.0001,\n \"cycle_max_lr\": 0.0010,\n \"decay_lr_rate\": 0.001,\n \"cycle_min_mom\": 0.85,\n \"cycle_max_mom\": 0.99,\n \"decay_mom_rate\": 0.0\n }\n }\n }\n\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_unfused_optimizer(args,\n models,\n hidden_dim,\n load_optimizer_states):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states)\n\n _test_checkpoint_unfused_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=True)\n\n _test_checkpoint_unfused_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=False)\n\n\ndef test_checkpoint_fused_optimizer(tmpdir):\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 0.00015,\n \"betas\": [0.8,\n 0.999],\n \"eps\": 1e-8,\n \"weight_decay\": 3e-7\n }\n },\n \"fp16\": {\n \"enabled\": True\n }\n }\n\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_fused_optimizer(args,\n models,\n hidden_dim,\n load_optimizer_states):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states)\n\n _test_checkpoint_fused_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=True)\n\n _test_checkpoint_fused_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=False)\n\n\[email protected]('zero_stage, use_cpu_offload',\n [\n (1,\n False),\n (2,\n False),\n (2,\n True),\n ])\ndef test_checkpoint_zero_optimizer(tmpdir, zero_stage, use_cpu_offload):\n if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:\n pytest.skip(\"cpu-adam is not compatible\")\n\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": 'Adam',\n \"params\": {\n \"lr\": 0.00015,\n \"betas\": [0.8,\n 0.999],\n \"eps\": 1e-8,\n \"weight_decay\": 3e-7\n }\n },\n \"fp16\": {\n \"enabled\": True\n },\n \"zero_optimization\": {\n \"stage\": zero_stage,\n \"cpu_offload\": use_cpu_offload\n }\n }\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_zero_optimizer(args, models, hidden_dim, load_optimizer_states):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states)\n\n _test_checkpoint_zero_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=True)\n\n\[email protected]('zero_stage, use_cpu_offload',\n [\n (1,\n False),\n (2,\n False),\n (2,\n True),\n ])\ndef test_checkpoint_zero_no_optimizer(tmpdir, zero_stage, use_cpu_offload):\n if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:\n pytest.skip(\"cpu-adam is not compatible\")\n\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": 'Adam',\n \"params\": {\n \"lr\": 0.00015,\n \"betas\": [0.8,\n 0.999],\n \"eps\": 1e-8,\n \"weight_decay\": 3e-7\n }\n },\n \"fp16\": {\n \"enabled\": True\n },\n \"zero_optimization\": {\n \"stage\": zero_stage,\n \"cpu_offload\": use_cpu_offload\n }\n }\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_zero_no_optimizer(args,\n models,\n hidden_dim,\n load_optimizer_states):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states)\n\n _test_checkpoint_zero_no_optimizer(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=False)\n\n\[email protected]('zero_stage, use_cpu_offload',\n [\n (0,\n False),\n (1,\n False),\n (2,\n False),\n (2,\n True),\n ])\ndef test_checkpoint_lr_scheduler(tmpdir, zero_stage, use_cpu_offload):\n if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:\n pytest.skip(\"cpu-adam is not compatible\")\n\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": 'Adam',\n \"params\": {\n \"lr\": 0.00015,\n \"betas\": [0.8,\n 0.999],\n \"eps\": 1e-8,\n \"weight_decay\": 3e-7\n }\n },\n \"fp16\": {\n \"enabled\": True\n },\n \"zero_optimization\": {\n \"stage\": zero_stage,\n \"cpu_offload\": use_cpu_offload\n },\n \"scheduler\": {\n \"type\": \"WarmupLR\",\n \"params\": {\n \"warmup_min_lr\": 0,\n \"warmup_max_lr\": 0.001,\n \"warmup_num_steps\": 1000\n }\n }\n }\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_lr_scheduler(args,\n models,\n hidden_dim,\n load_optimizer_states,\n load_lr_scheduler_states):\n checkpoint_correctness_verification(\n args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states,\n load_lr_scheduler_states=load_lr_scheduler_states)\n\n _test_checkpoint_lr_scheduler(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=False,\n load_lr_scheduler_states=True)\n\n\[email protected]('zero_stage, use_cpu_offload',\n [\n (0,\n False),\n (1,\n False),\n (2,\n False),\n (2,\n True),\n ])\ndef test_checkpoint_no_lr_scheduler(tmpdir, zero_stage, use_cpu_offload):\n if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:\n pytest.skip(\"cpu-adam is not compatible\")\n\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": 'Adam',\n \"params\": {\n \"lr\": 1e-5\n }\n },\n \"fp16\": {\n \"enabled\": True\n },\n \"zero_optimization\": {\n \"stage\": zero_stage,\n \"cpu_offload\": use_cpu_offload\n },\n \"scheduler\": {\n \"type\": \"WarmupLR\",\n \"params\": {\n \"warmup_min_lr\": 0,\n \"warmup_max_lr\": 0.001,\n \"warmup_num_steps\": 1000\n }\n },\n }\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_no_lr_scheduler(args,\n models,\n hidden_dim,\n load_optimizer_states,\n load_lr_scheduler_states):\n checkpoint_correctness_verification(\n args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=load_optimizer_states,\n load_lr_scheduler_states=load_lr_scheduler_states)\n\n _test_checkpoint_no_lr_scheduler(args=args,\n models=models,\n hidden_dim=hidden_dim,\n load_optimizer_states=False,\n load_lr_scheduler_states=False)\n\n\ndef test_checkpoint_fp32_optimizer(tmpdir):\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 0.00015,\n \"betas\": [0.8,\n 0.999],\n \"eps\": 1e-8,\n \"weight_decay\": 3e-7\n }\n },\n \"fp16\": {\n \"enabled\": False\n }\n }\n\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n\n models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_fp32_optimizer(args, models, hidden_dim):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n fp16=False)\n\n _test_checkpoint_fp32_optimizer(args=args, models=models, hidden_dim=hidden_dim)\n\n\[email protected](\"zero_stage\", [0, 1])\ndef test_checkpoint_pipe_engine(zero_stage, tmpdir, stages=2):\n config_dict = {\n \"train_batch_size\": 2,\n \"train_micro_batch_size_per_gpu\": 1,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 1e-5\n }\n },\n \"zero_optimization\": {\n \"stage\": zero_stage\n },\n \"fp16\": {\n \"enabled\": zero_stage > 0\n },\n \"scheduler\": {\n \"type\": \"OneCycle\",\n \"params\": {\n \"cycle_first_step_size\": 1000,\n \"cycle_first_stair_count\": 500,\n \"cycle_second_step_size\": 1000,\n \"cycle_second_stair_count\": 500,\n \"decay_step_size\": 1000,\n \"cycle_min_lr\": 0.0001,\n \"cycle_max_lr\": 0.0010,\n \"decay_lr_rate\": 0.001,\n \"cycle_min_mom\": 0.85,\n \"cycle_max_mom\": 0.99,\n \"decay_mom_rate\": 0.0\n }\n }\n }\n\n @distributed_test(world_size=4)\n def _test(save_folder, num_stages):\n args = args_from_dict(tmpdir, config_dict)\n models = [LinearStackPipe(num_stages=num_stages) for _ in range(2)]\n checkpoint_correctness_verification(args=args,\n models=models,\n hidden_dim=models[0].hidden_dim,\n tmpdir=save_folder,\n fp16=config_dict['fp16']['enabled'],\n load_optimizer_states=True,\n load_lr_scheduler_states=True,\n train_batch=True)\n\n _test(tmpdir, num_stages=stages)\n\n\[email protected](\"base_topo,test_topo\",\n [\n (PipeTopo(num_pp=1,\n num_dp=4),\n PipeTopo(num_pp=4,\n num_dp=1)),\n (PipeTopo(num_pp=2,\n num_dp=2),\n PipeTopo(num_pp=2,\n num_dp=2)),\n (PipeTopo(num_pp=4,\n num_dp=1),\n PipeTopo(num_pp=2,\n num_dp=2)),\n ])\ndef test_checkpoint_pipe_module(base_topo, test_topo, tmpdir):\n @distributed_test(world_size=4)\n def _test(base_topo, test_topo, save_folder):\n base_model = LinearStackPipe(topology=base_topo)\n base_model.save_state_dict(save_folder)\n\n dist.barrier()\n\n test_model = LinearStackPipe(topology=test_topo)\n test_model.load_state_dir(save_folder)\n\n # Base and test can have different lengths, so make sure we map from the\n # smaller to larger model\n if len(base_model.forward_funcs) < len(test_model.forward_funcs):\n A = base_model\n B = test_model\n else:\n A = test_model\n B = base_model\n\n # Compare layers individually since partitions are different\n for idx, A_layer in enumerate(A.forward_funcs):\n if not hasattr(A_layer, 'parameters'):\n # Skip functionals, etc.\n continue\n\n # Find the corresponding layer in B\n global_idx = idx + A._local_start\n B_local_idx = global_idx - B._local_start\n B_layer = B.forward_funcs[B_local_idx]\n\n # Compare layer parameters\n for p0, p1 in zip(A_layer.parameters(), B_layer.parameters()):\n assert torch.allclose(p0, p1, atol=1e-07), f\"Model state {p0} is not equal to {p1}\"\n\n _test(base_topo, test_topo, save_folder=tmpdir)\n\n\[email protected]('zero_stage', [1, 2])\ndef test_checkpoint_zero_hybrid_optimizer_state(tmpdir, zero_stage):\n config_dict = {\n \"train_micro_batch_size_per_gpu\": 2,\n \"gradient_accumulation_steps\": 2,\n \"steps_per_print\": 1,\n \"zero_optimization\": {\n \"stage\": zero_stage\n },\n \"zero_allow_untested_optimizer\": True,\n \"fp16\": {\n \"enabled\": True,\n \"initial_scale_power\": 8\n }\n }\n\n args = args_from_dict(tmpdir, config_dict)\n hidden_dim = 10\n models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)]\n optimizers = [HybridStateOptimizer(model.parameters()) for model in models]\n\n @distributed_test(world_size=[2])\n def _test_checkpoint_zero_hybrid_optimizer_state(args,\n models,\n optimizers,\n hidden_dim):\n checkpoint_correctness_verification(args,\n models=models,\n base_optimizers=optimizers,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=True)\n\n _test_checkpoint_zero_hybrid_optimizer_state(args=args,\n models=models,\n optimizers=optimizers,\n hidden_dim=hidden_dim)\n\n\ndef test_checkpoint_latest(tmpdir):\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 0.00015\n }\n }\n }\n hidden_dim = 10\n args = args_from_dict(tmpdir, config_dict)\n models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)]\n\n @distributed_test(world_size=[1])\n def _helper(args, models):\n checkpoint_correctness_verification(args,\n models=models,\n hidden_dim=hidden_dim,\n tmpdir=tmpdir,\n load_optimizer_states=True,\n load_lr_scheduler_states=False,\n fp16=False,\n empty_tag=True)\n\n _helper(args, models)\n\n\ndef test_checkpoint_missing_latest(tmpdir):\n config_dict = {\n \"train_batch_size\": 2,\n \"steps_per_print\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 0.00015\n }\n }\n }\n hidden_dim = 10\n args = args_from_dict(tmpdir, config_dict)\n\n model = SimpleModel(hidden_dim, rank=args.local_rank)\n\n @distributed_test(world_size=[1])\n def _helper(args, model, hidden_dim):\n model, _, _,_ = deepspeed.initialize(args=args,\n model=model,\n model_parameters=model.parameters())\n # should be no-op, since latest doesn't exist\n model.load_checkpoint(tmpdir)\n\n _helper(args=args, model=model, hidden_dim=hidden_dim)\n"
] | [
[
"torch.equal",
"torch.allclose",
"torch.distributed.barrier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
saritma/hw5_2019 | [
"6833385c9f608d7bf7f62f0f81975a9b5e9e3431"
] | [
"hw5.py"
] | [
"from pathlib import Path \nimport pandas as pd\nimport json\nfrom pandas.io.json import json_normalize\nimport numpy as np\nimport matplotlib.pyplot as plt\nclass QuestionnaireAnalysis:\n \"\"\"\n Reads and analyzes data generated by the questionnaire experiment.\n Should be able to accept strings and pathlib.Path objects.\n \"\"\"\n \n def __init__(self, data_fname):\n self.data_fname=data_fname\n\n def read_data(self):\n \"\"\"\n Reads the json data located in self.data_fname into memory, to\n the attribute self.data.\n \"\"\"\n with open(self.data_fname) as json_file: \n self.data = json.load(json_file)\n self.a=json_normalize(self.data)\n self.a[self.a=='nan']=np.nan\n \n \n\n def show_age_distrib(self) :\n \"\"\"\n Calculates and plots the age distribution of the participants.\n Returns a tuple containing two numpy arrays:\n The first item being the number of people in a given bin.\n The second item being the bin edges.\n \"\"\"\n\n \n self.a.age.plot(kind='hist',bins=[0, 10,20,30,40,50,60,70,80,90,100])\n self.n, self.bins=np.histogram(self.a.age,bins=[0, 10,20,30,40,50,60,70,80,90,100])\n plt.show()\n return self.n , self.bins\n\n def remove_rows_without_mail(self) :\n \"\"\"\n Checks self.data for rows with invalid emails, and removes them.\n Returns the corrected DataFrame, i.e. the same table but with\n the erroneous rows removed and the (ordinal) index after a reset.\n \"\"\"\n df=[]\n for i in range(100):\n if '@' and '.com' in self.a.email[i]:\n df.append(i)\n self.d=self.a.iloc[df]\n self.d=self.d.reset_index(drop=True)\n return self.d\n\n def fill_na_with_mean(self) :\n \"\"\"\n Finds, in the original DataFrame, the subjects that didn't answer\n all questions, and replaces that missing value with the mean of the\n other grades for that student. Returns the corrected DataFrame,\n as well as the row indices of the students that their new grades\n were generated.\n \"\"\"\n b=self.a.loc[:,'q1':'q5']\n idx=np.where(b.isnull())\n\n for i in range(100):\n d=b.iloc[i] \n d=d.fillna(np.nanmean(list(d)))\n b.iloc[i]=d\n\n self.a.loc[:,'q1':'q5']=b\n return self.a, idx[0]\n\nif __name__=='__main__':\n data=QuestionnaireAnalysis('data.json')\n data.read_data()\n print(data.show_age_distrib())\n print(data.remove_rows_without_mail())\n print(data.fill_na_with_mean())\n\n"
] | [
[
"pandas.io.json.json_normalize",
"matplotlib.pyplot.show",
"numpy.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
Junlin-Yin/Data-Mining-Homework | [
"043f836e3dd30f32b5b06f40af61ae55b9287fbc"
] | [
"hw3/hw3_code/knn/hack.py"
] | [
"import numpy as np\nimport extract_image\nimport knn\n\ndef hack(img_name):\n '''\n HACK Recognize a CAPTCHA image\n Inputs:\n img_name: filename of image\n Outputs:\n digits: 1x5 matrix, 5 digits in the input CAPTCHA image.\n '''\n data = np.load('hack_data.npz')\n\n # YOUR CODE HERE (you can delete the following code as you wish)\n x_train = data['x_train'] # x_train.shape = (50, 140)\n y_train = data['y_train'] # y_train.shape = (50, )\n # begin answer\n x_test = extract_image.extract_image(img_name) # x_test.shape = (5, 140)\n k = 10\n digits = knn.knn(x_test, x_train, y_train, k) # digits.shape = (5, )\n # end answer\n\n return digits"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carpeanon/turing | [
"0fa103ab0432efe9d21237d5690f42cf60d292be"
] | [
"tasks/copy.py"
] | [
"import os\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom random import randint\n\nfrom ntm import NTM\nfrom utils import pprint\nfrom ntm_cell import NTMCell\n\nprint_interval = 5\n\ndef copy(ntm, seq_length, sess, print_=True):\n start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)\n start_symbol[0] = 1\n end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)\n end_symbol[1] = 1\n\n seq = generate_copy_sequence(seq_length, ntm.cell.input_dim - 2)\n\n feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}\n feed_dict.update(\n {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}\n )\n feed_dict.update({\n ntm.start_symbol: start_symbol,\n ntm.end_symbol: end_symbol\n })\n\n input_states = [state['write_w'][0] for state in ntm.input_states[seq_length]]\n output_states = [state['read_w'][0] for state in ntm.get_output_states(seq_length)]\n\n result = sess.run(ntm.get_outputs(seq_length) + \\\n input_states + output_states + \\\n [ntm.get_loss(seq_length)],\n feed_dict=feed_dict)\n\n is_sz = len(input_states)\n os_sz = len(output_states)\n\n outputs = result[:seq_length]\n read_ws = result[seq_length:seq_length + is_sz]\n write_ws = result[seq_length + is_sz:seq_length + is_sz + os_sz]\n loss = result[-1]\n\n if print_:\n np.set_printoptions(suppress=True)\n print(\" true output : \")\n pprint(seq)\n print(\" predicted output :\")\n pprint(np.round(outputs))\n print(\" Loss : %f\" % loss)\n np.set_printoptions(suppress=False)\n else:\n return seq, outputs, read_ws, write_ws, loss\n\ndef copy_train(config, sess):\n if not os.path.isdir(config.checkpoint_dir):\n raise Exception(\" [!] Directory %s not found\" % config.checkpoint_dir)\n\n # delimiter flag for start and end\n start_symbol = np.zeros([config.input_dim], dtype=np.float32)\n start_symbol[0] = 1\n end_symbol = np.zeros([config.input_dim], dtype=np.float32)\n end_symbol[1] = 1\n\n cell = NTMCell(input_dim=config.input_dim,\n output_dim=config.output_dim,\n controller_layer_size=config.controller_layer_size,\n write_head_size=config.write_head_size,\n read_head_size=config.read_head_size)\n ntm = NTM(cell, sess, config.min_length, config.max_length)\n\n print(\" [*] Initialize all variables\")\n tf.initialize_all_variables().run()\n print(\" [*] Initialization finished\")\n\n start_time = time.time()\n for idx in xrange(config.epoch):\n seq_length = randint(config.min_length, config.max_length)\n seq = generate_copy_sequence(seq_length, config.input_dim - 2)\n\n feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}\n feed_dict.update(\n {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}\n )\n feed_dict.update({\n ntm.start_symbol: start_symbol,\n ntm.end_symbol: end_symbol\n })\n\n _, cost, step = sess.run([ntm.optims[seq_length],\n ntm.get_loss(seq_length),\n ntm.global_step], feed_dict=feed_dict)\n\n if idx % 100 == 0:\n ntm.save(config.checkpoint_dir, 'copy', step)\n\n if idx % print_interval == 0:\n print(\"[%5d] %2d: %.2f (%.1fs)\" \\\n % (idx, seq_length, cost, time.time() - start_time))\n\n print(\"Training Copy task finished\")\n return cell, ntm\n\ndef generate_copy_sequence(length, bits):\n seq = np.zeros([length, bits + 2], dtype=np.float32)\n for idx in xrange(length):\n seq[idx, 2:bits+2] = np.random.rand(bits).round()\n return list(seq)\n"
] | [
[
"numpy.set_printoptions",
"numpy.round",
"tensorflow.initialize_all_variables",
"numpy.random.rand",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
lorenh516/financial_distress | [
"121b83ea74aadaaf6147de82a9132edba178866f"
] | [
"analysis/credit_util.py"
] | [
"import pandas as pd\nfrom ml_explore import eval_ratios\n\n\ndef payment_grid(df, focus_cols, group_col):\n df_list = []\n for col in focus_cols:\n new_df = eval_ratios(df, include_cols = [group_col, col], \n category_cols = [group_col], method = \"sum\", \n pct = True)\n df_list.append(new_df)\n full_df = pd.concat(df_list, axis = 1)\n return full_df\n\n\n\ndef standardized_comparison(df, primary_cols, group_cols, insert_col = None):\n df_list = []\n if insert_col:\n focus_cols = primary_cols + [insert_col]\n final_group = group_cols + [insert_col]\n return df[focus_cols].groupby(final_group).mean()\n\n return df[primary_cols].groupby(group_cols).mean()"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
LalicUfscar/WE-PE-tool | [
"e90ef8a098809a58d17347da6a88fd980e99416f"
] | [
"src/error_identification/tests/k_fold_correct_error.py"
] | [
"#!/usr/bin/env python3\nimport subprocess\nimport os\nimport progressbar\nimport sys\nfrom readers.read_blast import BlastReader\nfrom readers.read_giza import GIZAReader\nfrom utils import tag_sentences, extract_features, format_features\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import KFold\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import accuracy_score, classification_report, recall_score, make_scorer\n\n# BLAST_PATH = '/home/marciolima/Documentos/Lalic/post-editing/src/error_identification/error-ident-blast.txt'\n# BLAST_PATH = '/home/marciolima/Documentos/Lalic/post-editing/src/error_identification/exemplo.blast'\nBLAST_PATH = '/home/marciolima/Documentos/WE-PE-tool/error-ident-blast.txt'\nFEATURES_FILE = 'features_final.pkl'\nTW_SZ = 5\nERRORS = ['lex-incTrWord', 'lex-notTrWord']\n\nlb = LabelEncoder()\n\ndef test_correct_error(data):\n # Replace not correct targets with error\n data.loc[data['target'] != 'correct', 'target'] = 'error'\n # Encode target into numbers\n data['target'] = lb.fit_transform(data['target'])\n\n X = data.loc[:, data.columns != 'target']\n y = data['target']\n\n kf = KFold(n_splits=10, shuffle=True)\n\n print('Arvore de decisao - GINI')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n # Training\n dt = DecisionTreeClassifier()\n dt.fit(X.loc[train], y.loc[train])\n\n results = dt.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('Arvore de decisao - Entropy')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n # Training\n dt = DecisionTreeClassifier(criterion='entropy')\n dt.fit(X.loc[train], y.loc[train])\n\n results = dt.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('SVM - Um contra todos')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n svm = LinearSVC()\n svm.fit(X.loc[train], y.loc[train])\n\n results = svm.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('SVM - Crammer-Singer')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n svm = LinearSVC(multi_class='crammer_singer')\n svm.fit(X.loc[train], y.loc[train])\n\n results = svm.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('Perceptron')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n perceptron = Perceptron()\n perceptron.fit(X.loc[train], y.loc[train])\n\n results = perceptron.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('Random Forest - GINI')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n random_forest = RandomForestClassifier()\n random_forest.fit(X.loc[train], y.loc[train])\n\n results = random_forest.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('Random Forest - Entropy')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n random_forest = RandomForestClassifier(criterion='entropy')\n random_forest.fit(X.loc[train], y.loc[train])\n\n results = random_forest.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n print('------------------------------')\n\n print('Naive Bayes')\n avg_precision = 0\n avg_precision_correct = 0\n avg_precision_error = 0\n fold = 1\n for (train, test) in kf.split(X):\n naive_bayes = BernoulliNB()\n naive_bayes.fit(X.loc[train], y.loc[train])\n\n results = naive_bayes.predict(X.loc[test])\n precision = accuracy_score(y.loc[test], results)\n avg_precision += precision\n\n precision_correct = accuracy_score(\n y.loc[test].loc[y.loc[test] == 0], results[y.loc[test] == lb.transform(['correct'])[0]])\n avg_precision_correct += precision_correct\n precision_error = accuracy_score(\n y.loc[test].loc[y.loc[test] != 0], results[y.loc[test] != lb.transform(['correct'])[0]])\n avg_precision_error += precision_error\n\n print('Precisao - Fold {}: {:.2f}%'.format(fold, precision * 100))\n print('Precisao corretas - Fold {}: {:.2f}%'.format(fold,\n precision_correct * 100))\n print('Precisao erros - Fold {}: {:.2f}%\\n'.format(fold, precision_error * 100))\n fold += 1\n avg_precision /= 10\n avg_precision_correct /= 10\n avg_precision_error /= 10\n print('Precisao media: {:.2f}%'.format(avg_precision * 100))\n print('Precisao media corretas: {:.2f}%'.format(\n avg_precision_correct * 100))\n print('Precisao media erros: {:.2f}%'.format(avg_precision_error * 100))\n\n\ndef main():\n \"\"\"Main function\n \"\"\"\n blast_reader = BlastReader(BLAST_PATH)\n src_lines = list()\n sys_lines = list()\n target = list()\n\n # Files for GIZA\n src_file = open('/tmp/src.txt', 'w')\n sys_file = open('/tmp/sys.txt', 'w')\n\n # Correct sentences\n for i in blast_reader.get_correct_indices():\n src_lines.append(blast_reader.src_lines[i])\n sys_lines.append(blast_reader.sys_lines[i])\n target.append('correct')\n\n # Write files for GIZA\n src_file.write(' '.join(blast_reader.src_lines[i]))\n src_file.write('\\n')\n sys_file.write(' '.join(blast_reader.sys_lines[i]))\n sys_file.write('\\n')\n\n # Error lines\n errors = blast_reader.get_filtered_errors(ERRORS)\n # errors = blast_reader.error_lines\n for (line, error) in errors:\n src_lines.append(blast_reader.src_lines[line])\n sys_lines.append(blast_reader.sys_lines[line])\n target.append(error)\n\n src_file.write(' '.join(blast_reader.src_lines[line]))\n src_file.write('\\n')\n sys_file.write(' '.join(blast_reader.sys_lines[line]))\n sys_file.write('\\n')\n src_file.close()\n sys_file.close()\n\n # Tag sentences\n print('Tagging sentences', file=sys.stderr)\n tagged_lines = tag_sentences(src_lines, sys_lines)\n\n # Align sentences\n print('Aligning sentences', file=sys.stderr)\n application_path = str(os.path.abspath(os.path.curdir))\n proc = subprocess.Popen([application_path + '/src/aligner/align_sentences.sh',\n '--srcpath', '/tmp/src.txt',\n '--syspath', '/tmp/sys.txt'],\n stdout=subprocess.PIPE)\n out = proc.communicate()\n num_sents = int(out[0])\n giza_reader = GIZAReader('/tmp/giza.output')\n alignments = giza_reader.aligned_lines[:num_sents]\n\n # Extract features\n print('Extracting features', file=sys.stderr)\n training_instances = list()\n ignored_instances = 0\n for (i, sent) in progressbar.progressbar(enumerate(tagged_lines)):\n features = extract_features(\n sent, alignments[i]['alignment'], TW_SZ, target[i])\n if features:\n training_instances.append(features)\n else:\n ignored_instances += 1\n print('Finalizado!', file=sys.stderr)\n print('Instancias ignoradas: {}'.format(ignored_instances), file=sys.stderr)\n\n print('Iniciando treinamento', file=sys.stderr)\n data = format_features(training_instances)\n test_correct_error(data)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.KFold",
"sklearn.linear_model.Perceptron",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.svm.LinearSVC",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
asokraju/sb3-seir | [
"d3e07af143e7eeec5b812cd9607a01ed48a8aecc"
] | [
"gym_seir/envs/Tseir_env.py"
] | [
"import numpy as np\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nclass TSeirEnv(gym.Env):\n \"\"\"\n Description:\n Each city's population is broken down into four compartments --\n Susceptible, Exposed, Infectious, and Removed -- to model the spread of\n COVID-19.\n Source:\n Code modeled after cartpole.py from\n github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py\n \n Time:\n discretizing_time: time in minutes used to discretizing the model\n sampling_time: time in days that we sample from the system\n sim_length: time in days\n \n \n Observation*:\n Type: Box(4,)\n Num Observation Min Max\n 0 Susceptible 0 Total Population\n 1 Exposed 0 Total Population\n 2 Infected 0 Total Population\n 3 Recovered 0 Total Population\n 4 Day 0 25\n \n \n Actions*:\n Type: Box(4,), min=0 max=2\n Num Action Change in model Crowd density\n 0 Lockdown affect transmission rate 0\n 1 Social distancing affect transmission rate 0.5-1 = 0.75 \n 2 No Social distancing affect transmission rate 1-5 = 1.5\n \n Reward:\n reward = weight * economic cost + (1-weight) * public health cost\n \n Economic cost:\n Num Action Crowd density cost\n 0 Lockdown 0 1.0\n 1 Social distancing 0.5-1 = 0.75 0.25\n 2 No Social distancing (regular day) 1-5 = 1.5 0.0\n Health cost: min max\n 1.0 - 0.00001* number of infected 0.0 1.0\n weight:\n a user defined weight. Default 0.5\n Episode Termination:\n Episode length (time) reaches specified maximum (end time)\n The end of analysis period is ~170 days\n \"\"\"\n\n\n metadata = {'render.modes': ['console']}\n\n def __init__(\n self, \n discretizing_time = 5, \n sampling_time = 7, \n sim_length = 175, \n weight = 0.5, \n theta = 113.92, \n inital_state = [99666., 81., 138., 115., 0.], \n state_normalization = True,\n validation = False,\n noise = False,\n noise_percent = 0):\n super(TSeirEnv, self).__init__()\n\n self.dt = discretizing_time/(24*60)\n self.Ts = sampling_time\n self.time_steps = int((self.Ts) / self.dt)\n \n self.popu = 1e5 #100000\n self.state_normalization = state_normalization\n self.trainNoise = False\n self.weight = weight #reward weighting\n self.inital_state = np.array(inital_state, dtype=float)\n self.validation = validation\n\n #model paramenters\n self.theta = np.full(shape=1, fill_value=theta, dtype=float) \n\n self.d = np.full(shape=1, fill_value=1/24, dtype=float)\n\n self.sigma = 1.0/5 # needds to be changed?\n\n # Recovery rate\n self.gamma = 0.05 # needs to be changed?\n\n # total number of actions\n self.n_actions = 3\n\n # Crowd densities\n self.rho = np.array([0.044, 0.25, 1.], dtype=float)\n self.infection_rate = self.theta * (self.d ** 2) * self.rho\n # Resulting Infection rate = beta = rho*theta*d^2 = [0.009, 0.049, 0.196]\n # there is some non-zero infection rate duction lockdown --> due to necessary supply chains that we cannot stop for survival\n # Reproduction number = R0 = beta/gamma = [0.18, 0.98, 3.92]\n # If R0 is less than one the disease will die out, and if R0>1 the disease will increase exponentially\n #Economic costs \n self.eco_costs = np.array([1, 0.2, 0.0], dtype=float) \n\n #gym action space and observation space\n self.action_space = spaces.Discrete(3)\n self.observation_space = spaces.Box(0, np.inf, shape=(5,), dtype=np.float64)\n\n #Total number of simulation days\n self.sim_length = sim_length\n self.daynum = 0\n\n # noise\n self.noise = noise\n self.noise_percent = noise_percent\n\n #seeding\n self.seed()\n\n #memory to save the trajectories\n self.state_trajectory = []\n self.action_trajectory = []\n self.rewards = []\n self.weekly_rewards = []\n self.count = 0\n \n # initialize state\n self.get_state()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def get_state(self):\n if not self.validation:\n # init_E = np.random.normal(self.inital_state[1],self.inital_state[1]*0.1) \n # # np.random.randint(self.inital_state[1]*0.1, high=self.inital_state[1]*2)\n # init_I = np.random.normal(self.inital_state[2],self.inital_state[1]*0.1) \n # #np.random.randint(self.inital_state[2]*0.1, high=self.inital_state[2]*2)\n # init_R = np.random.normal(self.inital_state[3],self.inital_state[1]*0.1) \n # #np.random.randint(self.inital_state[3]*0.1, high=self.inital_state[3]*2)\n # init_S = self.popu - init_E - init_I - init_R\n # self.state = np.array([init_S, init_E, init_I, init_R], dtype=float)\n self.state = self.random_uniform_state()\n else:\n self.state = self.inital_state\n self.state_trajectory.append(list(self.state))\n\n def random_uniform_state(self):\n S = np.random.uniform(low=0.0, high=self.popu)\n E = np.random.uniform(low=0.0, high=self.popu-S)\n I = np.random.uniform(low=0.0, high=self.popu-(S+E))\n R = self.popu-(S+E+I)\n T = 0\n return np.array([S,E,I,R, T])\n\n def set_state(self, state):\n err_msg = \"%s is Invalid. S+E+I+R not equal to %s\" % (state, self.popu)\n assert self.popu==sum(state[:-1]), err_msg\n err_msg = \"%s should be of the form (S,E,I,R,T), where T=0\" % (state)\n assert state.shape[0]==5, err_msg\n self.state = state\n \n def normalize_state(self,state):\n if self.state_normalization:\n S, E, I, R, T = state[0], state[1], state[2], state[3], state[4]\n S, E, I, R, T = S/self.popu, E/self.popu, I/self.popu, R/self.popu, float(T)/float(self.sim_length)\n return np.array([S, E, I, R, T], dtype=float)\n else:\n return state\n\n def mini_step(self, rho, day):\n\n # action should be with in 0 - 2\n # \n beta = self.theta * (self.d ** 2) * rho\n S, E, I, R, T = self.state\n\n dS = - (beta) * I * S / self.popu\n dE = - dS - (self.sigma * E)\n dI = (self.sigma * E) - (self.gamma * I)\n dR = (self.gamma * I)\n\n new_S = S + self.dt * dS\n new_E = E + self.dt * dE\n new_I = I + self.dt * dI\n new_R = R + self.dt * dR\n\n return np.array([new_S, new_E, new_I, new_R, float(self.daynum)+float(day)], dtype =float)\n\n def step(self, action):\n\n for ts in range(self.time_steps):\n self.state = self.mini_step(self.rho[action], day=(ts+1)*self.dt)\n\n # saving the states and actions in the memory buffers\n self.state_trajectory.append(list(self.state))\n self.action_trajectory.append(action)\n self.count += 1\n self.daynum += self.Ts\n # Costs\n # action represent the crowd density, so decrease in crowd density increases the economic cost\n economicCost = self.eco_costs[action] * self.Ts * 0.91\n\n # Public health Cost increases with increase in Infected people.\n # publichealthCost = (1.45e-5 * (self.state[2]+self.state[3])) * self.Ts\n Delta_S = self.state_trajectory[-1-self.time_steps][0] - self.state_trajectory[-1][0]\n publichealthCost = Delta_S/620.0\n \n # Rewards\n reward = - self.weight * economicCost - (1. - self.weight) * publichealthCost\n reward = reward / 150.\n self.weekly_rewards.append(reward)\n # Check if episode is over\n done = bool(self.daynum >= self.sim_length)\n\n # saving the states and actions in the memory buffers\n #self.state_trajectory.append(list(self.state))\n #self.action_trajectory.append(action)\n for _ in range(self.time_steps):\n self.rewards.append(reward)\n if not self.noise:\n return self.normalize_state(self.state), reward, done, {}\n else:\n S, E, I, R = self.state[0], self.state[1], self.state[2], self.state[3]\n I = (1 - (self.noise_percent / 100) ) * I\n S = (1 + (self.noise_percent / 100) ) * S\n noisy_state = np.array([S, E, I, R, self.state[4]], dtype =float)\n return self.normalize_state(noisy_state), reward, done, {}\n \n def reset(self):\n\n # reset to initial conditions\n self.daynum = 0\n\n #memory reset\n self.state_trajectory = []\n self.action_trajectory = []\n self.rewards = []\n self.weekly_rewards = []\n self.count = 0\n self.get_state()\n\n return self.normalize_state(self.state)\n \n def render(self, mode='console'):\n if mode != 'console':\n raise NotImplementedError()\n print(\"not implemented\")\n\n def close(self):\n pass\n"
] | [
[
"numpy.random.uniform",
"numpy.array",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YangChuan80/BellfortSequenceParser | [
"87af8e42a70c772cc12e757be3cc18e55f247f0e"
] | [
"Source/BellfortSequenceParser.py"
] | [
"# Bellfort Sequence Parser\n\n## Modules\n\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter import ttk\nimport tkinter.font as tkf\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nimport threading\nimport time\nimport os\nimport shutil\n\n## Helper Functions\n### Reverse Complement\n\ndef reverseComplement(sequence):\n complement = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}\n rc_sequence=''\n for s in sequence:\n rc_sequence = complement[s] + rc_sequence\n return rc_sequence\n\n### FASTQ File Browse\n\ndef buttonBrowseFASTQ():\n global filenameFASTQ, indicator_preprocess\n \n try:\n filenameFASTQ = filedialog.askopenfilename(filetypes=(('FASTQ files', '*.fastq'), \n ('All files', '*.*')))\n text_fileFASTQ.delete('1.0', tk.END)\n text_fileFASTQ.insert('1.0', filenameFASTQ.split('/')[-1])\n \n # Reset the progress bar///////////////\n progressbar['value'] = 0\n progressbar_loadFASTQ['value'] = 0\n \n # Reset the percentage\n text_percentage.delete('1.0', tk.END)\n text_percentage.insert('1.0', str('0%'))\n \n indicator_preprocess = 0\n except:\n filenameFASTQ = '' \n\n### FASTQ File Load\n\ndef loadFASTQ():\n global reads\n \n start_time = time.time() \n \n f = open(filenameFASTQ)\n\n reads = []\n\n try:\n while 1:\n name = f.readline().rstrip()\n sequence = f.readline().rstrip()\n f.readline()\n quality = f.readline().rstrip()\n\n if len(name) == 0:\n break\n\n union = name, sequence\n\n reads.append(union) \n\n end_time = time.time()\n delta_time = end_time - start_time\n\n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\n text_readNum.delete('1.0', tk.END)\n text_readNum.insert('1.0', str(len(reads))) \n\n except:\n messagebox.showwarning(\"File Loading Failed\", \n \"Sorry, file loading failed! Please check the file format.\")\n f.close()\n\ndef start_loadFASTQ_thread(event):\n global loadFASTQ_thread\n \n if filenameFASTQ != '':\n loadFASTQ_thread = threading.Thread(target=loadFASTQ)\n loadFASTQ_thread.daemon = True\n\n progressbar_loadFASTQ.start(10)\n loadFASTQ_thread.start()\n root.after(20, check_loadFASTQ_thread)\n else:\n messagebox.showwarning(\"No File\", \n \"Sorry, no file loaded! Please choose FASTQ file first.\")\n\ndef check_loadFASTQ_thread():\n if loadFASTQ_thread.is_alive():\n progressbar_loadFASTQ.start(10)\n root.after(20, check_loadFASTQ_thread)\n else:\n progressbar_loadFASTQ.stop()\n progressbar_loadFASTQ['value']=100\n messagebox.showinfo(\"FASTQ File Loaded\", \"FASTQ file successfully loaded!\")\n\n### Divide FASTQ File\n\ndef divideFASTQ(): \n start_time = time.time() \n\n gotten = text_readNumDivided.get('1.0', tk.END)\n readNumDivided = int(gotten.rstrip())\n\n if os.path.exists(filenameFASTQ+'.folder'): \n # Remove the folder previously made:\n shutil.rmtree(filenameFASTQ+'.folder')\n\n # Make a new one:\n os.makedirs(filenameFASTQ+'.folder') \n\n line_num = 0\n file_no = 1\n\n f_input = open(filenameFASTQ) \n f_output = open(filenameFASTQ+'.folder/' + filenameFASTQ.split('/')[-1] + '__Slice_No_' + str(file_no) + '.fastq', 'w') \n\n while 1:\n # Input ///////////////////////////////////\n name = f_input.readline()\n sequence = f_input.readline()\n f_input.readline()\n quality = f_input.readline()\n\n if len(name) == 0:\n break \n\n # Output ////////////////////////////////////\n\n f_output.write(name)\n f_output.write(sequence)\n f_output.write('+\\n')\n f_output.write(quality) \n\n line_num += 1\n\n if line_num == readNumDivided: \n f_output.close() \n file_no += 1\n f_output = open(filenameFASTQ+'.folder/' + filenameFASTQ.split('/')[-1] + '__Slice_No_' + str(file_no) + '.fastq', 'w')\n line_num = 0 \n\n end_time = time.time()\n delta_time = end_time - start_time\n\n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\n f_input.close()\n f_output.close()\n\ndef start_divideFASTQ_thread(event):\n global divideFASTQ_thread\n \n if filenameFASTQ != '':\n divideFASTQ_thread = threading.Thread(target=divideFASTQ)\n divideFASTQ_thread.daemon = True\n\n progressbar_loadFASTQ.start(10)\n divideFASTQ_thread.start()\n root.after(20, check_divideFASTQ_thread)\n else:\n messagebox.showwarning(\"No File\", \n \"Sorry, no file loaded! Please choose FASTQ file first.\")\n\ndef check_divideFASTQ_thread():\n if divideFASTQ_thread.is_alive():\n progressbar_loadFASTQ.start(10)\n root.after(20, check_divideFASTQ_thread)\n else:\n progressbar_loadFASTQ.stop()\n progressbar_loadFASTQ['value']=100\n messagebox.showinfo(\"FASTQ File Divided\", \"FASTQ file has been successfully divided!\")\n\n### Preprocess\n\ndef preprocessFASTQ():\n global reads, indicator_preprocess, kmer_dict_reads\n \n try:\n num = len(reads) \n indicator_preprocess = 0\n gain = 50/num\n\n gotten = text_sequence_len.get('1.0', tk.END)\n k = int(gotten.rstrip())\n \n if k > len(reads[0][1]):\n messagebox.showwarning(\"Target Sequence Length Error\", \n \"Sorry, the target sequence length is more than read length. Please check.\")\n elif k < 3:\n messagebox.showwarning(\"Sequence Too Short\", \n \"Sorry, the target sequence length is too short which will make the program running slowly. Please check.\")\n elif filenameSequences == '':\n messagebox.showwarning(\"No Sequences Loaded\", \n \"Sorry, no sequences loaded! Please load sequences first.\")\n else:\n kmer_dict_reads = {}\n\n start_time = time.time()\n\n for read in reads:\n for i in range(len(read[1])-k+1):\n kmer_dict_reads[read[1][i:i+k]] = set()\n indicator_preprocess += gain \n\n for read in reads:\n for i in range(len(read[1])-k+1):\n kmer_dict_reads[read[1][i:i+k]].add(read)\n indicator_preprocess += gain\n \n indicator_progress = 100\n \n # Add MatchAll Here ///////////////////////////////////////////////////////\n matchAll()\n\n end_time = time.time()\n delta_time = end_time - start_time\n\n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n \n messagebox.showinfo(\"Preprocess FASTQ & Count Matched Sequences Completed\", \n \"Current FASTQ preprocess & matched sequence counts successfully completed!\")\n\n except NameError:\n messagebox.showwarning(\"No FASTQ File Loaded\", \n \"Sorry, no loaded FASTQ file found! Please load FASTQ file first.\")\n\ndef start_preprocess_thread(event):\n global preprocess_thread, indicator_preprocess\n preprocess_thread = threading.Thread(target=preprocessFASTQ)\n preprocess_thread.daemon = True\n \n progressbar['value'] = indicator_preprocess\n text_percentage.delete('1.0', tk.END)\n text_percentage.insert('1.0', str(int(indicator_preprocess))+'%')\n \n preprocess_thread.start()\n root.after(20, check_preprocess_thread)\n\ndef check_preprocess_thread():\n if preprocess_thread.is_alive():\n progressbar['value'] = indicator_preprocess\n text_percentage.delete('1.0', tk.END)\n text_percentage.insert('1.0', str(int(indicator_preprocess))+'%')\n \n root.after(20, check_preprocess_thread)\n\n### Match All\n\ndef matchAll():\n global kmer_dict_reads, indicator_matchAll, df\n \n try:\n len(kmer_dict_reads) \n num = len(df)\n \n if num == 0:\n messagebox.showwarning(\"No Sequences Loaded\", \n \"Sorry, no sequences loaded! Please load sequences first.\")\n else: \n indicator_matchAll = 0\n gain = 1000000/num\n\n start_time = time.time()\n\n arr = np.array(df)\n\n for i in range(len(arr)):\n key1 = arr[i,2]\n key2 = reverseComplement(key1)\n \n try:\n n1 = len(kmer_dict_reads[key1])\n except KeyError:\n n1 = 0\n \n try:\n n2 = len(kmer_dict_reads[key2])\n except KeyError:\n n2 = 0\n \n arr[i, 4] += n1 + n2\n arr[i, 5] += 1\n \n indicator_matchAll += gain\n\n df = pd.DataFrame(arr, columns = ['gene_id', 'UID', 'seq', 'Reserved', 'Count', 'Tag'])\n #df = df.set_index('UID', drop=False) \n\n end_time = time.time()\n delta_time = end_time - start_time\n\n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\n except NameError:\n messagebox.showwarning(\"No FASTQ Preprocessed or No Sequences Loaded\", \n \"Sorry, no FASTQ preprocess implemented or no sequences file loaded! Please preprocess FASTQ or load sequences first.\") \n\ndef start_matchAll_thread(event):\n global matchAll_thread, indicator_matchAll\n matchAll_thread = threading.Thread(target=matchAll)\n matchAll_thread.daemon = True\n \n progressbar['value'] = indicator_matchAll\n \n matchAll_thread.start()\n root.after(20, check_matchAll_thread)\n\ndef check_matchAll_thread():\n if matchAll_thread.is_alive():\n progressbar['value'] = indicator_matchAll\n \n root.after(20, check_matchAll_thread)\n else:\n messagebox.showinfo(\"Matching Completed\", \n \"Counting of sequences matched successfully completed!\")\n\n### Match Single\n\ndef buttonMatch():\n gotten = text_sequence.get('1.0', tk.END)\n p1 = gotten.rstrip() \n p2 = reverseComplement(p1)\n \n if p1 == '' or p2 == '':\n messagebox.showwarning(\"No Sequence Found\", \n \"Sorry, no sequence found in the text blank above! Please check the sequence.\")\n else:\n try:\n len(kmer_dict_reads)\n try:\n n1 = len(kmer_dict_reads[p1])\n except KeyError:\n n1 = 0\n \n try:\n n2 = len(kmer_dict_reads[p2])\n except KeyError:\n n2 = 0\n \n count = n1 + n2\n \n text_count.delete('1.0', tk.END)\n text_count.insert('1.0', str(count))\n \n except NameError:\n messagebox.showwarning(\"No FASTQ Preprocessed\", \n \"Sorry, no FASTQ preprocess implemented! Please preprocess FASTQ first.\")\n\n### File of Target Sequence Load\n\ndef loadSequences():\n global filenameSequences, df, recordNum\n \n progressbar_loadSequences['value'] = 0\n try:\n filenameSequences = filedialog.askopenfilename(filetypes=(('Comma-Separated (CSV) text file', '*.csv'), ('All files', '*.*')))\n text_fileSequences.delete('1.0', tk.END)\n text_fileSequences.insert('1.0', filenameSequences.split('/')[-1])\n except:\n filenameSequences = '' \n \n if filenameSequences == '':\n messagebox.showwarning(\"No File\", \"Sorry, no file chosen! Please choose file of sequences first.\")\n else: \n try:\n start_time = time.time()\n \n df = pd.read_csv(filenameSequences)\n df['count'] = 0\n df['tag'] = 0\n #df = df.set_index('UID', drop=False) \n \n recordNum = len(df)\n \n progressbar_loadSequences['value'] = 100\n \n end_time = time.time()\n delta_time = end_time - start_time\n \n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time))\n \n text_recordNum.delete('1.0', tk.END)\n text_recordNum.insert('1.0', str(recordNum))\n \n messagebox.showinfo(\"File of Sequences Loaded\", \"File of sequences successfully loaded!\") \n except:\n messagebox.showwarning(\"File Loading Failed\", \"Sorry, file loading failed! Please check the file format.\") \n\n### Load Half-matched Sequences\n\ndef buttonLoadHalfMatchedSequences():\n global df, recordNum, filenameSequences\n \n progressbar_loadSequences['value'] = 0\n try:\n filenameSequences = filedialog.askopenfilename(filetypes=(('Comma-Separated (CSV) text file', '*.csv'), ('All files', '*.*')))\n text_fileSequences.delete('1.0', tk.END)\n text_fileSequences.insert('1.0', filenameSequences.split('/')[-1])\n except:\n filenameSequences = '' \n \n if filenameSequences == '':\n messagebox.showwarning(\"No File\", \"Sorry, no file chosen! Please choose file of sequences first.\")\n else: \n try:\n start_time = time.time()\n \n df = pd.read_csv(filenameSequences) \n df = df.set_index('Unnamed: 0', drop=True) \n \n recordNum = len(df)\n \n progressbar_loadSequences['value'] = 100\n \n end_time = time.time()\n delta_time = end_time - start_time\n \n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time))\n \n text_recordNum.delete('1.0', tk.END)\n text_recordNum.insert('1.0', str(recordNum))\n \n messagebox.showinfo(\"File of Half Matched Sequences Loaded\", \"File of half matched sequences successfully loaded!\") \n except:\n messagebox.showwarning(\"File Loading Failed\", \"Sorry, file loading failed! Please check the file format.\") \n\n### Table Events\n\ndef OnDoubleClick(event):\n item = table.selection()[0]\n value = table.item(item, 'values')\n geneID = value[0]\n uid = value[1]\n sequence = value[2]\n rc_sequence = reverseComplement(sequence)\n \n text_geneID.delete('1.0', tk.END)\n text_geneID.insert('1.0', str(geneID))\n \n text_uid.delete('1.0', tk.END)\n text_uid.insert('1.0', str(uid))\n \n text_sequence.delete('1.0', tk.END)\n text_sequence.insert('1.0', str(sequence))\n \n text_rc_sequence.delete('1.0', tk.END)\n text_rc_sequence.insert('1.0', str(rc_sequence))\n \n\ndef sortby(tree, col, descending):\n \"\"\"sort tree contents when a column header is clicked on\"\"\"\n # grab values to sort\n data = [(tree.set(child, col), child) for child in tree.get_children('')]\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, int(not descending)))\n\ndef display_in_table():\n try:\n for a in df.index:\n row = df.ix[a]\n table.insert(\"\", \"end\", \"\", values=tuple(row)) \n except NameError:\n messagebox.showwarning(\"No Sequences to be Displayed\", \n \"Sorry, there's no loaded sequences to be displayed! Please load sequence file first.\") \n\n### Other Button Functions\n\ndef clear():\n for i in table.get_children():\n table.delete(i)\n\ndef browse():\n start_time = time.time()\n clear()\n display_in_table()\n delta_time = time.time() - start_time\n \n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\ndef buttonExport(): \n if filenameSequences == '' or (filenameFASTQ == '' and len(filenameFASTQs) == 0):\n messagebox.showwarning(\"No File Loaded\", \n \"Sorry, no file loaded! Please choose sequence file and FASTQ file first.\")\n else:\n try:\n len(df)\n len(reads)\n directory = filedialog.askdirectory()\n df.to_csv(directory + '/Counts of ' + filenameSequences.split('/')[-1] + '__matchedWith__' + filenameFASTQs[0].split('/')[-1] + '.csv')\n messagebox.showinfo(\"File Exported\", \"File of counted sequences successfully exported!\") \n except NameError:\n messagebox.showwarning(\"Error: No Counted DataFrame Generated\", \n \"Sorry, no effective counted DataFrame generated! Please check the previous workflow.\")\n\ndef buttonAbout():\n about_root=tk.Tk()\n \n w = 380 # width for the Tk root\n h = 310 # height for the Tk root\n\n # get screen width and height\n ws = about_root.winfo_screenwidth() # width of the screen\n hs = about_root.winfo_screenheight() # height of the screen\n\n # calculate x and y coordinates for the Tk root window\n x = (ws/2) - (w/2)\n y = (hs/2) - (h/2)\n\n # set the dimensions of the screen \n # and where it is placed\n about_root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n about_root.title('About Bellfort Sequence Parser') \n about_root.iconbitmap('dna.ico')\n\n label_author=tk.Label(about_root,text='Bellfort Sequence Parser Version 2.0', font=('tahoma', 9))\n label_author.place(x=90,y=30)\n\n label_author=tk.Label(about_root,text='Copyright (C) 2016', font=('tahoma', 9))\n label_author.place(x=125,y=60)\n \n label_author=tk.Label(about_root,text='Chen Lab', font=('tahoma', 9))\n label_author.place(x=150,y=90)\n \n label_author=tk.Label(about_root,text='Human Genome Sequencing Center', font=('tahoma', 9))\n label_author.place(x=80,y=120)\n \n label_author=tk.Label(about_root,text='Department of Molecular and Human Genetics', font=('tahoma', 9))\n label_author.place(x=50,y=150)\n \n label_author=tk.Label(about_root,text='Baylor College of Medicine', font=('tahoma', 9))\n label_author.place(x=110,y=180)\n \n\n button_okay=ttk.Button(about_root, width=15, text='OK', command=about_root.destroy)\n button_okay.place(x=130, y=235)\n\n about_root.mainloop()\n\n### Batch Mode\n#### FASTQ Files Loaded Button\n\ndef buttonBrowseFASTQs():\n global filenameFASTQs\n \n try:\n filenameFASTQs = filedialog.askopenfilenames(filetypes=(('FASTQ files', '*.fastq'), \n ('All files', '*.*')))\n text_fileFASTQs.delete('1.0', tk.END)\n text_fileFASTQs.insert('1.0', filenameFASTQs[0].split('/')[-1].split('__')[0])\n \n text_fileNum.delete('1.0', tk.END)\n text_fileNum.insert('1.0', str(len(filenameFASTQs)))\n \n # Reset the progress bar///////////////\n progressbar['value'] = 0\n progressbar_loadFASTQ['value'] = 0\n \n # Reset the percentage\n text_percentage.delete('1.0', tk.END)\n text_percentage.insert('1.0', str('0%'))\n \n indicator_preprocess = 0\n except:\n filenameFASTQs = ''\n\n#### Batch Process Button Series\n\ndef loadFASTQ_batch(filenameFASTQ):\n global reads\n \n start_time = time.time() \n \n f = open(filenameFASTQ)\n\n reads = []\n\n #try:\n while 1:\n name = f.readline().rstrip()\n sequence = f.readline().rstrip()\n f.readline()\n quality = f.readline().rstrip()\n\n if len(name) == 0:\n break\n\n union = name, sequence, quality\n\n reads.append(union)\n\n f.close()\n\n end_time = time.time()\n delta_time = end_time - start_time\n\n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\n text_readNum.delete('1.0', tk.END)\n text_readNum.insert('1.0', str(len(reads))) \n\n '''\n except:\n messagebox.showwarning(\"File Loading Failed\", \n \"Sorry, file loading failed! Please check the file format.\") '''\n\ndef preprocessFASTQ_batch():\n global reads, kmer_dict_reads, indicator_batch, gain_file\n \n try: \n gotten = text_sequence_len.get('1.0', tk.END)\n k = int(gotten.rstrip())\n \n if k > len(reads[0][1]):\n messagebox.showwarning(\"Target Sequence Length Error\", \n \"Sorry, the target sequence length is more than read length. Please check.\")\n elif k < 3:\n messagebox.showwarning(\"Sequence Too Short\", \n \"Sorry, the target sequence length is too short which will make the program running slowly. Please check.\")\n elif filenameSequences == '':\n messagebox.showwarning(\"No Sequences Loaded\", \n \"Sorry, no sequences loaded! Please load sequences first.\")\n else:\n kmer_dict_reads = {}\n \n gain = gain_file/(len(reads)*2)\n\n for read in reads:\n for i in range(len(read[1])-k+1):\n kmer_dict_reads[read[1][i:i+k]] = set()\n \n indicator_batch += gain \n \n for read in reads:\n for i in range(len(read[1])-k+1):\n kmer_dict_reads[read[1][i:i+k]].add(read)\n \n indicator_batch += gain \n\n except NameError:\n messagebox.showwarning(\"No FASTQ File Loaded\", \n \"Sorry, no loaded FASTQ file found! Please load FASTQ file first.\")\n\ndef matchAll_batch():\n global kmer_dict_reads, df, indicator_batch, gain, reads_matched\n \n try: \n arr = np.array(df)\n \n reads_matched = set()\n\n for i in range(len(arr)):\n key1 = arr[i,2]\n key2 = reverseComplement(key1)\n\n try:\n n1 = len(kmer_dict_reads[key1])\n except KeyError:\n n1 = 0\n\n try:\n n2 = len(kmer_dict_reads[key2])\n except KeyError:\n n2 = 0\n \n arr[i, 4] += n1 + n2\n arr[i, 5] += 1\n \n if n1 != 0:\n for read in kmer_dict_reads[key1]:\n start_point = read[1].find(key1)\n reads_matched.add((read[0], key1, read[2][start_point : start_point+len(key1)]))\n \n if n2 != 0:\n for read in kmer_dict_reads[key2]:\n start_point = read[1].find(key2)\n reads_matched.add((read[0], key2, read[2][start_point : start_point+len(key2)]))\n \n\n\n df = pd.DataFrame(arr, columns = ['gene_id', 'UID', 'seq', 'Reserved', 'Count', 'Tag'])\n #df = df.set_index('UID', drop=False) \n\n except NameError:\n messagebox.showwarning(\"No FASTQ Preprocessed or No Sequences Loaded\", \n \"Sorry, no FASTQ preprocess implemented or no sequences file loaded! Please preprocess FASTQ or load sequences first.\") \n\ndef autoExport(): \n if filenameSequences == '' or (filenameFASTQ == '' and len(filenameFASTQs) == 0):\n messagebox.showwarning(\"No File Loaded\", \n \"Sorry, no file loaded! Please choose sequence file and FASTQ file first.\")\n else:\n try:\n len(df)\n len(reads)\n \n df.to_csv(filenameSequences + '__matchedWith__' + filenameFASTQs[0].split('/')[-1].split('__')[0] + '.csv')\n \n except NameError:\n messagebox.showwarning(\"Error: No Counted DataFrame Generated\", \n \"Sorry, no effective counted DataFrame generated! Please check the previous workflow.\")\n\ndef batchProcess():\n global filenameFASTQs, indicator_batch, gain_file, kmer_dict_reads, reads_matched\n \n start_time = time.time() \n \n if filenameFASTQs == '':\n messagebox.showwarning('No FASTQ Chosen', \n 'Sorry, no FASTQ file chosen! Please browse and choose FASTQ file first.') \n \n elif len(df) == 0:\n messagebox.showwarning(\"No Sequences Loaded\", \n \"Sorry, no sequences loaded! Please load sequences first.\")\n else:\n indicator_batch = 0\n gain_file = 100/len(filenameFASTQs)\n \n # ///////////////// Main Stream of Batch Match ////////////////////////////////////////////////////\n \n f_trimmed = open(filenameFASTQs[0].split('__')[0] + '__trimmedBy__' + filenameSequences.split('/')[-1] + '.fastq', 'w')\n \n reads_matched = set()\n \n for filenameFASTQ in filenameFASTQs:\n loadFASTQ_batch(filenameFASTQ)\n preprocessFASTQ_batch()\n matchAll_batch()\n \n for read_matched in reads_matched:\n f_trimmed.write(read_matched[0]+'\\n')\n f_trimmed.write(read_matched[1]+'\\n')\n f_trimmed.write('+\\n')\n f_trimmed.write(read_matched[2]+'\\n')\n \n f_trimmed.close()\n \n autoExport() \n \n # Attention! Memory Recollected !!!!!!!!!!\n kmer_dict_reads = {}\n \n indicator_batch = 100\n \n messagebox.showinfo('Matching Completed', 'Tada! Counting of sequences matched successfully completed!') \n \n delta_time = time.time() - start_time\n \n \n \n text_time.delete('1.0', tk.END)\n text_time.insert('1.0', str(delta_time)) \n\ndef start_batch_thread(event):\n global batch_thread, indicator_batch\n batch_thread = threading.Thread(target=batchProcess)\n batch_thread.daemon = True\n \n progressbar_batch['value'] = indicator_batch\n text_percentage_batch.delete('1.0', tk.END)\n text_percentage_batch.insert('1.0', str(int(indicator_batch))+'%')\n \n batch_thread.start()\n root.after(20, check_batch_thread)\n\ndef check_batch_thread():\n if batch_thread.is_alive():\n progressbar_batch['value'] = indicator_batch\n text_percentage_batch.delete('1.0', tk.END)\n text_percentage_batch.insert('1.0', str(int(indicator_batch))+'%')\n \n root.after(20, check_batch_thread) \n\n## Main Flow\n\nheaders = ['gene_id', 'UID', 'seq', 'Reserved', 'count', 'tag']\nheader_widths = [280, 150, 350, 100, 80, 100]\n\nroot = tk.Tk()\n\nindicator_preprocess = 0\nindicator_loadSequences = 0\nindicator_matchAll = 0\nindicator_batch = 0\nfilenameSequences = ''\nfilenameFASTQ = ''\nrecordNum = 0\ncount = 0\ndf = pd.DataFrame([])\n\nroot.geometry(\"{0}x{1}+0+0\".format(root.winfo_screenwidth(), root.winfo_screenheight()))\n#root.attributes('-fullscreen', True)\nroot.title('Bellfort Sequence Parser')\nroot.iconbitmap('dna.ico')\n\n\n# Multicolumn Listbox/////////////////////////////////////////////////////////////////////////////\ntable = ttk.Treeview(height=\"20\", columns=headers, selectmode=\"extended\")\ntable.pack(padx=10, pady=20, ipadx=1200, ipady=100)\n\ni = 1\nfor header in headers:\n table.heading('#'+str(i), text=header.title(), anchor=tk.W, command=lambda c=header: sortby(table, c, 0))\n table.column('#'+str(i), stretch=tk.NO, minwidth=0, width=tkf.Font().measure(header.title())+header_widths[i-1]) \n i+=1 \ntable.column('#0', stretch=tk.NO, minwidth=0, width=0)\n\ntable.bind(\"<Double-1>\", OnDoubleClick)\n#///////////////////////////////////////////////////////////////////////////////////////////\n\n# Scrollbar////////////////////////////////////////////////////////////////////////////////////////\nvsb = ttk.Scrollbar(table, orient=\"vertical\", command = table.yview)\nhsb = ttk.Scrollbar(table, orient=\"horizontal\", command = table.xview)\n## Link scrollbars activation to top-level object\ntable.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)\n## Link scrollbar also to every columns\nmap(lambda col: col.configure(yscrollcommand=vsb.set,xscrollcommand=hsb.set), table)\nvsb.pack(side = tk.RIGHT, fill = tk.Y)\nhsb.pack(side = tk.BOTTOM, fill = tk.X) \n\n#//////////////////////////////////////////////////////////////////////////////////////////////\ny0 =310\ny1 = 350\ny2 = 420\ny3 = 460\ny4 = 520\ny5 = 555\ny6 = 595\ny7 = 645\ny8 = 695\n# Text /////////////////////////////////////////////////////////////////////////////////////\ntext_recordNum=tk.Text(root, width=18, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_recordNum.place(x=840, y=y0)\nlabel_recordNum=tk.Label(root, text='records', font=('tahoma', 9))\nlabel_recordNum.place(x=1000,y=y0)\n\ntext_fileSequences=tk.Text(root, width=55, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_fileSequences.place(x=60, y=y0)\n\ntext_fileFASTQ=tk.Text(root, width=36, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_fileFASTQ.place(x=60, y=y4)\n\ntext_fileFASTQs=tk.Text(root, width=36, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_fileFASTQs.place(x=60, y=y6+10)\n\ntext_count=tk.Text(root, width=16, height=1, font=('tahoma', 9), bd=2)\ntext_count.place(x=1000, y=y3)\nlabel_count=tk.Label(root, text='Count:', font=('tahoma', 9))\nlabel_count.place(x=940,y=y3)\n\ntext_geneID=tk.Text(root, width=20, height=1, font=('tahoma', 9), bd=2)\ntext_geneID.place(x=140, y=y2)\nlabel_geneID=tk.Label(root, text='Gene ID:', font=('tahoma', 9))\nlabel_geneID.place(x=60,y=y2)\n\ntext_uid=tk.Text(root, width=20, height=1, font=('tahoma', 9), bd=2)\ntext_uid.place(x=390, y=y2)\nlabel_uid=tk.Label(root, text='UID:', font=('tahoma', 9))\nlabel_uid.place(x=340,y=y2)\n\ntext_sequence=tk.Text(root, width=38, height=1, font=('tahoma', 9), bd=2)\ntext_sequence.place(x=680, y=y2)\nlabel_sequence=tk.Label(root, text='Sequence:', font=('tahoma', 9))\nlabel_sequence.place(x=600,y=y2)\n\ntext_rc_sequence=tk.Text(root, width=38, height=1, font=('tahoma', 9), bd=2)\ntext_rc_sequence.place(x=1000, y=y2)\n\ntext_sequence_len=tk.Text(root, width=5, height=1, font=('tahoma', 9), bd=2)\ntext_sequence_len.place(x=1260, y=y5)\nlabel_sequence_len=tk.Label(root, text='nts', font=('tahoma', 9))\nlabel_sequence_len.place(x=1315,y=y5)\ntext_sequence_len.delete('1.0', tk.END)\ntext_sequence_len.insert('1.0', str(20))\n\ntext_readNumDivided=tk.Text(root, width=13, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_readNumDivided.place(x=335, y=y3+10)\nlabel_readNumDivided1=tk.Label(root, text='by every', font=('tahoma', 9))\nlabel_readNumDivided1.place(x=255,y=y3+10)\nlabel_readNumDivided2=tk.Label(root, text='reads', font=('tahoma', 9))\nlabel_readNumDivided2.place(x=460,y=y3+10)\ntext_readNumDivided.delete('1.0', tk.END)\ntext_readNumDivided.insert('1.0', str(250000))\n\ntext_fileNum=tk.Text(root, width=12, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_fileNum.place(x=400, y=y6+10)\nlabel_fileNum=tk.Label(root, text='files', font=('tahoma', 9))\nlabel_fileNum.place(x=520,y=y6+10)\n\ntext_readNum=tk.Text(root, width=22, height=1, font=('tahoma', 9), bd=2, wrap='none')\ntext_readNum.place(x=700, y=y6+10)\nlabel_readNum=tk.Label(root, text='reads', font=('tahoma', 9))\nlabel_readNum.place(x=890,y=y6+10)\n\ntext_time=tk.Text(root, width=15, height=1, font=('tahoma', 9), bd=2)\ntext_time.place(x=115, y=y8)\nlabel_time=tk.Label(root, text='Time:', font=('tahoma', 9))\nlabel_time.place(x=60,y=y8)\nlabel_seconds=tk.Label(root, text='second(s)', font=('tahoma', 9))\nlabel_seconds.place(x=250,y=y8)\n\ntext_percentage=tk.Text(root, width=8, height=1, font=('tahoma', 9), bg='gray95', bd=0)\ntext_percentage.place(x=1260, y=y4)\n\ntext_percentage_batch=tk.Text(root, width=8, height=1, font=('tahoma', 9), bg='gray95', bd=0)\ntext_percentage_batch.place(x=1260, y=y7)\n\n# ProgressBar /////////////////////////////////////////////////////////////////////////////\nprogressbar_loadSequences = ttk.Progressbar(root, length=200, maximum=100, mode='determinate')\nprogressbar_loadSequences.place(x=530,y=y0)\n\nprogressbar_loadFASTQ = ttk.Progressbar(root, length=300, mode='indeterminate')\nprogressbar_loadFASTQ.place(x=400,y=y4)\n\nprogressbar = ttk.Progressbar(root, length=460, maximum=100, mode='determinate')\nprogressbar.place(x=760,y=y4)\n\nprogressbar_batch = ttk.Progressbar(root, length=520, maximum=100, mode='determinate')\nprogressbar_batch.place(x=700,y=y7)\n\n# Button /////////////////////////////////////////////////////////////////////////////////\nbutton_loadSequences = ttk.Button(root, text=\"Load sgRNA\", width=20, command=loadSequences)\nbutton_loadSequences.place(x=60, y=y1)\n\nbutton_loadHalfMatchedSequences = ttk.Button(root, text=\"Load Half Matched sgRNA\", width=30, command=buttonLoadHalfMatchedSequences)\nbutton_loadHalfMatchedSequences.place(x=265, y=y1)\n\nbutton_clear = ttk.Button(root, text=\"Clear\", width=20, command=clear)\nbutton_clear.place(x=1180, y=y1)\n\nbutton_refresh = ttk.Button(root, text=\"Browse\", width=20, command=browse)\nbutton_refresh.place(x=1180, y=y0)\n\nbutton_browseFASTQ = ttk.Button(root, text=\"Browse FASTQ...\", width=20, command=buttonBrowseFASTQ)\nbutton_browseFASTQ.place(x=60, y=y5)\n\nbutton_divideFASTQ = ttk.Button(root, text=\"Divide FASTQ\", width=20, command=lambda:start_divideFASTQ_thread(None))\nbutton_divideFASTQ.place(x=60, y=y3+10)\n\nbutton_loadFASTQ = ttk.Button(root, text=\"Load FASTQ\", width=20, command=lambda:start_loadFASTQ_thread(None))\nbutton_loadFASTQ.place(x=400, y=y5)\n\nbutton_preprocessFASTQ = ttk.Button(root, text=\"Preprocess FASTQ & Count All Matched Sequences\", width=55, command=lambda:start_preprocess_thread(None))\nbutton_preprocessFASTQ.place(x=760, y=y5)\n\nbutton_match = ttk.Button(root, text=\"Match\", width=20, command=buttonMatch)\nbutton_match.place(x=680, y=y3)\n\nbutton_matchAll = ttk.Button(root, text=\"Match All\", width=20, command=lambda:start_matchAll_thread(None))\nbutton_matchAll.place(x=1180, y=y3)\n\nbutton_about = ttk.Button(root, text=\"About\", width=20, command=buttonAbout)\nbutton_about.place(x=980, y=y8)\n\nbutton_export = ttk.Button(root, text=\"Export\", width=20, command=buttonExport)\nbutton_export.place(x=720, y=y8)\n\nbutton_exit = ttk.Button(root, text=\"Exit\", width=20, command=root.destroy)\nbutton_exit.place(x=1180, y=y8)\n\nbutton_browseFASTQs = ttk.Button(root, text=\"Browse FASTQs...\", width=25, command=buttonBrowseFASTQs)\nbutton_browseFASTQs.place(x=60, y=y7)\n\nbutton_batchProcess = ttk.Button(root, text=\"Batch Process\", width=30, command=lambda:start_batch_thread(None))\nbutton_batchProcess.place(x=400, y=y7)\n\nroot.bind('<Return>', start_preprocess_thread)\nroot.bind('<Return>', start_loadFASTQ_thread)\nroot.bind('<Return>', start_divideFASTQ_thread)\nroot.bind('<Return>', start_matchAll_thread)\n\nroot.mainloop()"
] | [
[
"numpy.array",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
S3DEV/utils4 | [
"6c196c9928914412f0cadcb8b2ce1f0eeb285b25"
] | [
"utils4/cmaps.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n:Purpose: This module provides an easy-access, light-weight wrapper,\n around ``matplotlib``'s colour maps, and can be used for\n retrieving and previewing named colour maps.\n\n:Platform: Linux/Windows | Python 3.6+\n:Developer: J Berendt\n:Email: [email protected]\n\n:Comments: n/a\n\n:Examples:\n\n Retrieve 5 colours from the 'viridis' colour map in hex format\n and preview the colours::\n\n >>> from utils4.cmaps import cmaps\n\n >>> clrs = cmaps.get_cmap('viridis', 15, as_hex=True, preview=True)\n >>> clrs\n\n ['#2d718e', '#297b8e', '#25858e', '#218f8d', '#1f998a',\n '#20a386', '#26ad81', '#34b679', '#46c06f', '#5cc863',\n '#73d056', '#8ed645', '#aadc32', '#c5e021', '#fde725']\n\n .. figure:: _static/img/cmaps_viridis15.png\n :scale: 75%\n :align: center\n\n Preview of the requested 'viridis' colour map of 15 colours\n\n\n List named colours from the matplotlib colour palette::\n\n >>> from utils4.cmaps import cmaps\n\n >>> cmaps.get_named_colours()\n\n {'aliceblue': '#F0F8FF',\n 'antiquewhite': '#FAEBD7',\n 'aqua': '#00FFFF',\n ...,\n 'whitesmoke': '#F5F5F5',\n 'yellow': '#FFFF00',\n 'yellowgreen': '#9ACD32'}\n\n\n List or retrieve colour map names::\n\n >>> from utils4.cmaps import cmaps\n\n >>> cmaps.view_cmaps(view_only=True)\n\n ['magma',\n 'inferno',\n 'plasma',\n ...,\n 'tab20_r',\n 'tab20b_r',\n 'tab20c_r']\n\n\"\"\"\n# pylint: disable=import-error\n# pylint: disable=invalid-name\n# pylint: disable=wrong-import-order\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import Union\n\n\nclass _Preview: # pragma: nocover\n \"\"\"Provide a preview for a given colourmap.\"\"\"\n\n def __init__(self, colours):\n \"\"\"_Preview class initialiser.\n\n Args:\n colours (Union[list, np.array]): Iterable of colours for\n preview.\n\n \"\"\"\n self._c = colours\n self._n = len(colours)\n self._x = None\n self._y = None\n self._build_dataset()\n\n def plot(self):\n \"\"\"Plot to show colours.\"\"\"\n w = 6 if self._n < 50 else 10\n h = w/1.618033\n _, ax = plt.subplots(figsize=[w, h])\n ax.scatter(self._x,\n self._y,\n marker='o',\n s=100,\n c=self._c)\n plt.show()\n\n def _build_dataset(self):\n \"\"\"Create a dataset to be plotted.\"\"\"\n self._x = np.arange(self._n)\n self._y = np.sin(self._x*(np.pi/180))\n\n\nclass CMaps():\n \"\"\"Provides an easy-access layer to ``matplotlib``'s colour maps.\"\"\"\n\n @staticmethod\n def get_cmap(map_name: str,\n n: int=25,\n as_hex: bool=False,\n preview: bool=False) -> Union[list, np.array]:\n \"\"\"Get a list of (n) RGBA or Hex colours from a specified map.\n\n This colour wrapper is specialised to return (n) colours from\n a normalised colour map. Meaning, rather than returning the\n 5 lightest colours, or the 200 lightest to medium colours, the\n lightest colours are removed (as often they are difficult to\n see in a graph) and the darkest colour is added. The intent\n is to provide (n) 'usable' colours for graphing.\n\n Args:\n map_name (str): Name of the matplotlib colourmap.\n n (int, optional): Number of colours to return. Must\n be >= 255. Defaults to 25.\n as_hex (bool, optional): Return the colours as a hex string.\n Defaults to False, which returns colours as RGBA.\n preview (bool, optional): Preview the colour map. Defaults\n to False.\n\n Raises:\n ValueError: If the value of ``n`` is not between 1 and 255.\n\n Returns:\n Union[list, np.array]: Iterable of (n) colours.\n\n \"\"\"\n if (n < 1) | (n > 255):\n raise ValueError('The value of n must be: 1 <= n <= 255.')\n norm = matplotlib.colors.Normalize(vmin=-150, vmax=256)\n cmap = matplotlib.cm.get_cmap(map_name)\n clrs = cmap(norm(range(256)))\n N = int(256//n)\n c = clrs[::N]\n # Trim colours until desired length is met.\n while len(c) > n:\n if len(c) - n == 1:\n c = c[:-1]\n else:\n # Shave colours off boths ends until desired length is met.\n c = c[:-1] if len(c) % 2 == 0 else c[1:]\n c[-1] = clrs[-1]\n if as_hex:\n c_ = [matplotlib.colors.rgb2hex(i) for i in c]\n c = c_[:]\n if preview: # pragma: nocover\n _Preview(colours=c).plot()\n return c\n\n @staticmethod\n def get_named_colours() -> dict:\n \"\"\"Return a dictionary of CSS name and hex value.\n\n Returns:\n dict: A dict of named colours as ``{name: hex_code}`` pairs.\n\n \"\"\"\n return matplotlib.colors.cnames\n\n @staticmethod\n def view_cmaps(view_only: bool=True) -> Union[list, None]:\n \"\"\"Show the available colour map names.\n\n Args:\n view_only (bool, optional): If ``True`` the list will be\n printed and ``None`` is returned. If ``False``, the list\n is returned and nothing is printed. Defaults to True.\n\n Returns:\n Union[list, None]: A list of colour maps names if\n ``view-only`` is False, otherwise None.\n\n \"\"\"\n c = plt.colormaps()\n if view_only:\n print(c)\n c = None\n return c\n\n\ncmaps = CMaps()\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.colormaps",
"matplotlib.pyplot.subplots",
"matplotlib.colors.Normalize",
"numpy.sin",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.show",
"matplotlib.colors.rgb2hex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanglh14/InteractiveGrasping | [
"b5bc1866a1847e7b0c11616fd6cbe949c64a355b"
] | [
"FTSensor/pybind11-master/tests/test_buffers.py"
] | [
"# -*- coding: utf-8 -*-\r\nimport io\r\nimport struct\r\nimport sys\r\n\r\nimport pytest\r\n\r\nfrom pybind11_tests import buffers as m\r\nfrom pybind11_tests import ConstructorStats\r\n\r\nPY3 = sys.version_info[0] >= 3\r\n\r\npytestmark = pytest.requires_numpy\r\n\r\nwith pytest.suppress(ImportError):\r\n import numpy as np\r\n\r\n\r\ndef test_from_python():\r\n with pytest.raises(RuntimeError) as excinfo:\r\n m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array\r\n assert str(excinfo.value) == \"Incompatible buffer format!\"\r\n\r\n m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\r\n m4 = m.Matrix(m3)\r\n\r\n for i in range(m4.rows()):\r\n for j in range(m4.cols()):\r\n assert m3[i, j] == m4[i, j]\r\n\r\n cstats = ConstructorStats.get(m.Matrix)\r\n assert cstats.alive() == 1\r\n del m3, m4\r\n assert cstats.alive() == 0\r\n assert cstats.values() == [\"2x3 matrix\"]\r\n assert cstats.copy_constructions == 0\r\n # assert cstats.move_constructions >= 0 # Don't invoke any\r\n assert cstats.copy_assignments == 0\r\n assert cstats.move_assignments == 0\r\n\r\n\r\n# PyPy: Memory leak in the \"np.array(m, copy=False)\" call\r\n# https://bitbucket.org/pypy/pypy/issues/2444\r\[email protected]_on_pypy\r\ndef test_to_python():\r\n mat = m.Matrix(5, 4)\r\n assert memoryview(mat).shape == (5, 4)\r\n\r\n assert mat[2, 3] == 0\r\n mat[2, 3] = 4.0\r\n mat[3, 2] = 7.0\r\n assert mat[2, 3] == 4\r\n assert mat[3, 2] == 7\r\n assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )\r\n assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )\r\n\r\n mat2 = np.array(mat, copy=False)\r\n assert mat2.shape == (5, 4)\r\n assert abs(mat2).sum() == 11\r\n assert mat2[2, 3] == 4 and mat2[3, 2] == 7\r\n mat2[2, 3] = 5\r\n assert mat2[2, 3] == 5\r\n\r\n cstats = ConstructorStats.get(m.Matrix)\r\n assert cstats.alive() == 1\r\n del mat\r\n pytest.gc_collect()\r\n assert cstats.alive() == 1\r\n del mat2 # holds a mat reference\r\n pytest.gc_collect()\r\n assert cstats.alive() == 0\r\n assert cstats.values() == [\"5x4 matrix\"]\r\n assert cstats.copy_constructions == 0\r\n # assert cstats.move_constructions >= 0 # Don't invoke any\r\n assert cstats.copy_assignments == 0\r\n assert cstats.move_assignments == 0\r\n\r\n\r\[email protected]_on_pypy\r\ndef test_inherited_protocol():\r\n \"\"\"SquareMatrix is derived from Matrix and inherits the buffer protocol\"\"\"\r\n\r\n matrix = m.SquareMatrix(5)\r\n assert memoryview(matrix).shape == (5, 5)\r\n assert np.asarray(matrix).shape == (5, 5)\r\n\r\n\r\[email protected]_on_pypy\r\ndef test_pointer_to_member_fn():\r\n for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:\r\n buf = cls()\r\n buf.value = 0x12345678\r\n value = struct.unpack('i', bytearray(buf))[0]\r\n assert value == 0x12345678\r\n\r\n\r\[email protected]_on_pypy\r\ndef test_readonly_buffer():\r\n buf = m.BufferReadOnly(0x64)\r\n view = memoryview(buf)\r\n assert view[0] == 0x64 if PY3 else b'd'\r\n assert view.readonly\r\n\r\n\r\[email protected]_on_pypy\r\ndef test_selective_readonly_buffer():\r\n buf = m.BufferReadOnlySelect()\r\n\r\n memoryview(buf)[0] = 0x64 if PY3 else b'd'\r\n assert buf.value == 0x64\r\n\r\n io.BytesIO(b'A').readinto(buf)\r\n assert buf.value == ord(b'A')\r\n\r\n buf.readonly = True\r\n with pytest.raises(TypeError):\r\n memoryview(buf)[0] = 0 if PY3 else b'\\0'\r\n with pytest.raises(TypeError):\r\n io.BytesIO(b'1').readinto(buf)\r\n"
] | [
[
"numpy.asarray",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jie311/miemiedetection | [
"b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6"
] | [
"mmdet/evaluators/voc_evaluator.py"
] | [
"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport sys\nimport tempfile\nimport time\nfrom collections import ChainMap\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport numpy as np\n\nimport torch\n\nfrom mmdet.utils import gather, is_main_process, postprocess, synchronize, time_synchronized\n\n\nclass VOCEvaluator:\n \"\"\"\n VOC AP Evaluation class.\n \"\"\"\n\n def __init__(\n self,\n dataloader,\n img_size,\n confthre,\n nmsthre,\n num_classes,\n ):\n \"\"\"\n Args:\n dataloader (Dataloader): evaluate dataloader.\n img_size (int): image size after preprocess. images are resized\n to squares whose shape is (img_size, img_size).\n confthre (float): confidence threshold ranging from 0 to 1, which\n is defined in the config file.\n nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.\n \"\"\"\n self.dataloader = dataloader\n self.img_size = img_size\n self.confthre = confthre\n self.nmsthre = nmsthre\n self.num_classes = num_classes\n self.num_images = len(dataloader.dataset)\n\n def evaluate(\n self,\n model,\n distributed=False,\n half=False,\n trt_file=None,\n decoder=None,\n test_size=None,\n ):\n \"\"\"\n VOC average precision (AP) Evaluation. Iterate inference on the test dataset\n and the results are evaluated by COCO API.\n\n NOTE: This function will change training mode to False, please save states if needed.\n\n Args:\n model : model to evaluate.\n\n Returns:\n ap50_95 (float) : COCO style AP of IoU=50:95\n ap50 (float) : VOC 2007 metric AP of IoU=50\n summary (sr): summary info of evaluation.\n \"\"\"\n # TODO half to amp_test\n tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor\n model = model.eval()\n if half:\n model = model.half()\n ids = []\n data_list = {}\n progress_bar = tqdm if is_main_process() else iter\n\n inference_time = 0\n nms_time = 0\n n_samples = max(len(self.dataloader) - 1, 1)\n\n if trt_file is not None:\n from torch2trt import TRTModule\n\n model_trt = TRTModule()\n model_trt.load_state_dict(torch.load(trt_file))\n\n x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()\n model(x)\n model = model_trt\n\n for cur_iter, (imgs, _, info_imgs, ids) in enumerate(\n progress_bar(self.dataloader)\n ):\n with torch.no_grad():\n imgs = imgs.type(tensor_type)\n\n # skip the the last iters since batchsize might be not enough for batch inference\n is_time_record = cur_iter < len(self.dataloader) - 1\n if is_time_record:\n start = time.time()\n\n outputs = model(imgs)\n if decoder is not None:\n outputs = decoder(outputs, dtype=outputs.type())\n\n if is_time_record:\n infer_end = time_synchronized()\n inference_time += infer_end - start\n\n outputs = postprocess(\n outputs, self.num_classes, self.confthre, self.nmsthre\n )\n if is_time_record:\n nms_end = time_synchronized()\n nms_time += nms_end - infer_end\n\n data_list.update(self.convert_to_voc_format(outputs, info_imgs, ids))\n\n statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])\n if distributed:\n data_list = gather(data_list, dst=0)\n data_list = ChainMap(*data_list)\n torch.distributed.reduce(statistics, dst=0)\n\n eval_results = self.evaluate_prediction(data_list, statistics)\n synchronize()\n return eval_results\n\n def convert_to_voc_format(self, outputs, info_imgs, ids):\n predictions = {}\n for (output, img_h, img_w, img_id) in zip(\n outputs, info_imgs[0], info_imgs[1], ids\n ):\n if output is None:\n predictions[int(img_id)] = (None, None, None)\n continue\n output = output.cpu()\n\n bboxes = output[:, 0:4]\n\n # preprocessing: resize\n scale = min(\n self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)\n )\n bboxes /= scale\n\n cls = output[:, 6]\n scores = output[:, 4] * output[:, 5]\n\n predictions[int(img_id)] = (bboxes, cls, scores)\n return predictions\n\n def evaluate_prediction(self, data_dict, statistics):\n if not is_main_process():\n return 0, 0, None\n\n logger.info(\"Evaluate in main process...\")\n\n inference_time = statistics[0].item()\n nms_time = statistics[1].item()\n n_samples = statistics[2].item()\n\n a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)\n a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)\n\n time_info = \", \".join(\n [\n \"Average {} time: {:.2f} ms\".format(k, v)\n for k, v in zip(\n [\"forward\", \"NMS\", \"inference\"],\n [a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],\n )\n ]\n )\n\n info = time_info + \"\\n\"\n\n all_boxes = [\n [[] for _ in range(self.num_images)] for _ in range(self.num_classes)\n ]\n for img_num in range(self.num_images):\n bboxes, cls, scores = data_dict[img_num]\n if bboxes is None:\n for j in range(self.num_classes):\n all_boxes[j][img_num] = np.empty([0, 5], dtype=np.float32)\n continue\n for j in range(self.num_classes):\n mask_c = cls == j\n if sum(mask_c) == 0:\n all_boxes[j][img_num] = np.empty([0, 5], dtype=np.float32)\n continue\n\n c_dets = torch.cat((bboxes, scores.unsqueeze(1)), dim=1)\n all_boxes[j][img_num] = c_dets[mask_c].numpy()\n\n sys.stdout.write(\n \"im_eval: {:d}/{:d} \\r\".format(img_num + 1, self.num_images)\n )\n sys.stdout.flush()\n\n with tempfile.TemporaryDirectory() as tempdir:\n mAP50, mAP70 = self.dataloader.dataset.evaluate_detections(\n all_boxes, tempdir\n )\n return mAP50, mAP70, info\n"
] | [
[
"torch.ones",
"torch.load",
"torch.cuda.FloatTensor",
"torch.distributed.reduce",
"torch.no_grad",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gogobd/Recycle-GAN | [
"c7fd530be2eebee83fb2cbf056777b30c539bddc"
] | [
"models/cycle_gan_model.py"
] | [
"import numpy as np\nimport torch\nimport os\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nimport itertools\nimport util.util as util\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\nimport sys\n\n\nclass CycleGANModel(BaseModel):\n def name(self):\n return 'CycleGANModel'\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n\n nb = opt.batchSize\n size = opt.fineSize\n self.input_A = self.Tensor(nb, opt.input_nc, size, size)\n self.input_B = self.Tensor(nb, opt.output_nc, size, size)\n\n # load/define networks\n # The naming conversion is different from those used in the paper\n # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n\n self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)\n self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,\n opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)\n\n if self.isTrain:\n use_sigmoid = opt.no_lsgan\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n self.netD_B = networks.define_D(opt.input_nc, opt.ndf,\n opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)\n if not self.isTrain or opt.continue_train:\n print(f\"Loading model {opt.which_epoch}.\")\n which_epoch = opt.which_epoch\n self.load_network(self.netG_A, 'G_A', which_epoch)\n self.load_network(self.netG_B, 'G_B', which_epoch)\n if self.isTrain:\n self.load_network(self.netD_A, 'D_A', which_epoch)\n self.load_network(self.netD_B, 'D_B', which_epoch)\n\n if self.isTrain:\n self.old_lr = opt.lr\n self.fake_A_pool = ImagePool(opt.pool_size)\n self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.schedulers = []\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D_A)\n self.optimizers.append(self.optimizer_D_B)\n for optimizer in self.optimizers:\n self.schedulers.append(networks.get_scheduler(optimizer, opt))\n\n print('---------- Networks initialized -------------')\n networks.print_network(self.netG_A)\n networks.print_network(self.netG_B)\n if self.isTrain:\n networks.print_network(self.netD_A)\n networks.print_network(self.netD_B)\n print('-----------------------------------------------')\n\n def set_input(self, input):\n AtoB = self.opt.which_direction == 'AtoB'\n input_A = input['A' if AtoB else 'B']\n input_B = input['B' if AtoB else 'A']\n self.input_A.resize_(input_A.size()).copy_(input_A)\n self.input_B.resize_(input_B.size()).copy_(input_B)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n self.real_A = Variable(self.input_A)\n self.real_B = Variable(self.input_B)\n\n def test(self):\n real_A = Variable(self.input_A, volatile=True)\n fake_B = self.netG_A(real_A)\n self.rec_A = self.netG_B(fake_B).data\n self.fake_B = fake_B.data\n\n real_B = Variable(self.input_B, volatile=True)\n fake_A = self.netG_B(real_B)\n self.rec_B = self.netG_A(fake_A).data\n self.fake_A = fake_A.data\n\n # get image paths\n def get_image_paths(self):\n return self.image_paths\n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n def backward_D_A(self):\n fake_B = self.fake_B_pool.query(self.fake_B)\n loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n self.loss_D_A = loss_D_A.data[0]\n\n def backward_D_B(self):\n fake_A = self.fake_A_pool.query(self.fake_A)\n loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n self.loss_D_B = loss_D_B.data[0]\n\n def backward_G(self):\n lambda_idt = self.opt.identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed.\n idt_A = self.netG_A(self.real_B)\n loss_idt_A = self.criterionIdt(idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed.\n idt_B = self.netG_B(self.real_A)\n loss_idt_B = self.criterionIdt(idt_B, self.real_A) * lambda_A * lambda_idt\n\n self.idt_A = idt_A.data\n self.idt_B = idt_B.data\n self.loss_idt_A = loss_idt_A.data[0]\n self.loss_idt_B = loss_idt_B.data[0]\n else:\n loss_idt_A = 0\n loss_idt_B = 0\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n fake_B = self.netG_A(self.real_A)\n pred_fake = self.netD_A(fake_B)\n loss_G_A = self.criterionGAN(pred_fake, True)\n\n # GAN loss D_B(G_B(B))\n fake_A = self.netG_B(self.real_B)\n pred_fake = self.netD_B(fake_A)\n loss_G_B = self.criterionGAN(pred_fake, True)\n\n # Forward cycle loss\n rec_A = self.netG_B(fake_B)\n loss_cycle_A = self.criterionCycle(rec_A, self.real_A) * lambda_A\n\n # Backward cycle loss\n rec_B = self.netG_A(fake_A)\n loss_cycle_B = self.criterionCycle(rec_B, self.real_B) * lambda_B\n # combined loss\n loss_G = loss_G_A + loss_G_B + loss_cycle_A + loss_cycle_B + loss_idt_A + loss_idt_B\n loss_G.backward()\n\n self.fake_B = fake_B.data\n self.fake_A = fake_A.data\n self.rec_A = rec_A.data\n self.rec_B = rec_B.data\n\n self.loss_G_A = loss_G_A.data[0]\n self.loss_G_B = loss_G_B.data[0]\n self.loss_cycle_A = loss_cycle_A.data[0]\n self.loss_cycle_B = loss_cycle_B.data[0]\n\n def optimize_parameters(self):\n # forward\n self.forward()\n # G_A and G_B\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n # D_A\n self.optimizer_D_A.zero_grad()\n self.backward_D_A()\n self.optimizer_D_A.step()\n # D_B\n self.optimizer_D_B.zero_grad()\n self.backward_D_B()\n self.optimizer_D_B.step()\n\n def get_current_errors(self):\n ret_errors = OrderedDict([('D_A', self.loss_D_A), ('G_A', self.loss_G_A), ('Cyc_A', self.loss_cycle_A),\n ('D_B', self.loss_D_B), ('G_B', self.loss_G_B), ('Cyc_B', self.loss_cycle_B)])\n if self.opt.identity > 0.0:\n ret_errors['idt_A'] = self.loss_idt_A\n ret_errors['idt_B'] = self.loss_idt_B\n return ret_errors\n\n def get_current_visuals(self):\n real_A = util.tensor2im(self.input_A)\n fake_B = util.tensor2im(self.fake_B)\n rec_A = util.tensor2im(self.rec_A)\n real_B = util.tensor2im(self.input_B)\n fake_A = util.tensor2im(self.fake_A)\n rec_B = util.tensor2im(self.rec_B)\n ret_visuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A),\n ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])\n if self.opt.isTrain and self.opt.identity > 0.0:\n ret_visuals['idt_A'] = util.tensor2im(self.idt_A)\n ret_visuals['idt_B'] = util.tensor2im(self.idt_B)\n return ret_visuals\n\n def save(self, label):\n self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)\n self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)\n self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)\n self.save_network(self.netD_B, 'D_B', label, self.gpu_ids)\n"
] | [
[
"torch.nn.L1Loss",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nowcastsystem/QUANTAXIS | [
"66b5d002adf69642241a1bdb5edd90dc4111e690"
] | [
"QUANTAXIS/QAFetch/QAQuery_Advance.py"
] | [
"# coding: utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2018 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport datetime\nimport re\nimport pymongo\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom QUANTAXIS.QAData import (QA_DataStruct_Index_day, QA_DataStruct_Index_min,\n QA_DataStruct_Future_day, QA_DataStruct_Future_min,\n QA_DataStruct_Stock_block, QA_DataStruct_Financial,\n QA_DataStruct_Stock_day, QA_DataStruct_Stock_min,\n QA_DataStruct_Stock_transaction)\nfrom QUANTAXIS.QAFetch.QAQuery import (QA_fetch_index_day,\n QA_fetch_index_min,\n QA_fetch_stock_day,\n QA_fetch_stock_full,\n QA_fetch_stock_min,\n QA_fetch_future_day,\n QA_fetch_future_min,\n QA_fetch_financial_report,\n QA_fetch_stock_list,\n QA_fetch_index_list,\n QA_fetch_future_list,\n QA_fetch_stock_financial_calendar,\n QA_fetch_stock_divyield\n )\nfrom QUANTAXIS.QAUtil.QADate import month_data\nfrom QUANTAXIS.QAUtil import (DATABASE, QA_Setting, QA_util_date_stamp,\n QA_util_date_valid, QA_util_log_info,\n QA_util_time_stamp, QA_util_getBetweenQuarter,\n QA_util_datetime_to_strdate, QA_util_add_months)\n\n\"\"\"\n按要求从数据库取数据,并转换成numpy结构\n\n总体思路:\n⚙️QA_fetch_***_adv\n📍⚙️QA_fetch_*** 🐌 获取数据collections从mongodb中 🐌 返回DataFrame ,\n📍📍⚙️用返回的 DataFrame 初始化 ️QA_DataStruct_***\n\n类型***有\n_Stock_day\n_Stock_min\n_Index_day\n_Index_min\n\"\"\"\n\n\n\ndef QA_fetch_option_day_adv(\n code,\n start='all', end=None,\n if_drop_index=True,\n # 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉\n collections=DATABASE.option_day):\n '''\n\n '''\n pass\n\n\ndef QA_fetch_stock_day_adv(\n code,\n start='all', end=None,\n if_drop_index=True,\n # 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉\n collections=DATABASE.stock_day):\n '''\n\n :param code: 股票代码\n :param start: 开始日期\n :param end: 结束日期\n :param if_drop_index:\n :param collections: 默认数据库\n :return: 如果股票代码不存 或者开始结束日期不存在 在返回 None ,合法返回 QA_DataStruct_Stock_day 数据\n '''\n '获取股票日线'\n end = start if end is None else end\n start = str(start)[0:10]\n end = str(end)[0:10]\n\n if start == 'all':\n start = '1990-01-01'\n end = str(datetime.date.today())\n\n res = QA_fetch_stock_day(code, start, end, format='pd')\n if res is None:\n # 🛠 todo 报告是代码不合法,还是日期不合法\n print(\"QA Error QA_fetch_stock_day_adv parameter code=%s , start=%s, end=%s call QA_fetch_stock_day return None\" % (\n code, start, end))\n return None\n else:\n res_reset_index = res.set_index(['date', 'code'], drop=if_drop_index)\n # if res_reset_index is None:\n # print(\"QA Error QA_fetch_stock_day_adv set index 'datetime, code' return None\")\n # return None\n return QA_DataStruct_Stock_day(res_reset_index)\n\n\ndef QA_fetch_stock_min_adv(\n code,\n start, end=None,\n frequence='1min',\n if_drop_index=True,\n # 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉\n collections=DATABASE.stock_min):\n '''\n '获取股票分钟线'\n :param code: 字符串str eg 600085\n :param start: 字符串str 开始日期 eg 2011-01-01\n :param end: 字符串str 结束日期 eg 2011-05-01\n :param frequence: 字符串str 分钟线的类型 支持 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m 类型\n :param if_drop_index: Ture False , dataframe drop index or not\n :param collections: mongodb 数据库\n :return: QA_DataStruct_Stock_min 类型\n '''\n if frequence in ['1min', '1m']:\n frequence = '1min'\n elif frequence in ['5min', '5m']:\n frequence = '5min'\n elif frequence in ['15min', '15m']:\n frequence = '15min'\n elif frequence in ['30min', '30m']:\n frequence = '30min'\n elif frequence in ['60min', '60m']:\n frequence = '60min'\n else:\n print(\"QA Error QA_fetch_stock_min_adv parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m\" % frequence)\n return None\n\n # __data = [] 未使用\n\n end = start if end is None else end\n if len(start) == 10:\n start = '{} 09:30:00'.format(start)\n\n if len(end) == 10:\n end = '{} 15:00:00'.format(end)\n\n if start == end:\n # 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_stock_min, 不支持start end是相等的\n print(\"QA Error QA_fetch_stock_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! \" % (\n code, start, end))\n return None\n\n # 🛠 todo 报告错误 如果开始时间 在 结束时间之后\n\n res = QA_fetch_stock_min(\n code, start, end, format='pd', frequence=frequence)\n if res is None:\n print(\"QA Error QA_fetch_stock_min_adv parameter code=%s , start=%s, end=%s frequence=%s call QA_fetch_stock_min return None\" % (\n code, start, end, frequence))\n return None\n else:\n res_set_index = res.set_index(['datetime', 'code'], drop=if_drop_index)\n # if res_set_index is None:\n # print(\"QA Error QA_fetch_stock_min_adv set index 'datetime, code' return None\")\n # return None\n return QA_DataStruct_Stock_min(res_set_index)\n\n\ndef QA_fetch_stock_day_full_adv(date):\n '''\n '返回全市场某一天的数据'\n :param date:\n :return: QA_DataStruct_Stock_day类 型数据\n '''\n # 🛠 todo 检查日期data参数\n res = QA_fetch_stock_full(date, 'pd')\n if res is None:\n print(\"QA Error QA_fetch_stock_day_full_adv parameter date=%s call QA_fetch_stock_full return None\" % (date))\n return None\n else:\n res_set_index = res.set_index(['date', 'code'])\n # if res_set_index is None:\n # print(\"QA Error QA_fetch_stock_day_full set index 'date, code' return None\")\n return QA_DataStruct_Stock_day(res_set_index)\n\n\ndef QA_fetch_index_day_adv(\n code,\n start, end=None,\n if_drop_index=True,\n # 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉\n collections=DATABASE.index_day):\n '''\n :param code: code: 字符串str eg 600085\n :param start: 字符串str 开始日期 eg 2011-01-01\n :param end: 字符串str 结束日期 eg 2011-05-01\n :param if_drop_index: Ture False , dataframe drop index or not\n :param collections: mongodb 数据库\n :return:\n '''\n '获取指数日线'\n end = start if end is None else end\n start = str(start)[0:10]\n end = str(end)[0:10]\n\n # 🛠 todo 报告错误 如果开始时间 在 结束时间之后\n # 🛠 todo 如果相等\n\n res = QA_fetch_index_day(code, start, end, format='pd')\n if res is None:\n print(\"QA Error QA_fetch_index_day_adv parameter code=%s start=%s end=%s call QA_fetch_index_day return None\" % (\n code, start, end))\n return None\n else:\n res_set_index = res.set_index(['date', 'code'], drop=if_drop_index)\n # if res_set_index is None:\n # print(\"QA Error QA_fetch_index_day_adv set index 'date, code' return None\")\n # return None\n return QA_DataStruct_Index_day(res_set_index)\n\n\ndef QA_fetch_index_min_adv(\n code,\n start, end=None,\n frequence='1min',\n if_drop_index=True,\n collections=DATABASE.index_min):\n '''\n '获取股票分钟线'\n :param code:\n :param start:\n :param end:\n :param frequence:\n :param if_drop_index:\n :param collections:\n :return:\n '''\n if frequence in ['1min', '1m']:\n frequence = '1min'\n elif frequence in ['5min', '5m']:\n frequence = '5min'\n elif frequence in ['15min', '15m']:\n frequence = '15min'\n elif frequence in ['30min', '30m']:\n frequence = '30min'\n elif frequence in ['60min', '60m']:\n frequence = '60min'\n\n # __data = [] 没有使用\n\n end = start if end is None else end\n if len(start) == 10:\n start = '{} 09:30:00'.format(start)\n if len(end) == 10:\n end = '{} 15:00:00'.format(end)\n\n # 🛠 todo 报告错误 如果开始时间 在 结束时间之后\n\n # if start == end:\n # 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的\n #print(\"QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! \" % (code, start, end))\n # return None\n\n res = QA_fetch_index_min(\n code, start, end, format='pd', frequence=frequence)\n if res is None:\n print(\"QA Error QA_fetch_index_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_index_min return None\" % (\n code, start, end, frequence))\n else:\n res_reset_index = res.set_index(\n ['datetime', 'code'], drop=if_drop_index)\n # if res_reset_index is None:\n # print(\"QA Error QA_fetch_index_min_adv set index 'date, code' return None\")\n return QA_DataStruct_Index_min(res_reset_index)\n\n\ndef QA_fetch_stock_transaction_adv(code, start, end=None, if_drop_index=True, collections=DATABASE.stock_transaction):\n '''\n\n :param code:\n :param start:\n :param end:\n :param if_drop_index:\n :param collections:\n :return:\n '''\n end = start if end is None else end\n data = DataFrame([item for item in collections.find({\n 'code': str(code), \"date\": {\n \"$gte\": start,\n \"$lte\": end\n }})])\n\n data['datetime'] = pd.to_datetime(data['datetime'])\n return QA_DataStruct_Stock_transaction(data.set_index('datetime', drop=if_drop_index))\n\n# 没有被使用, 和下面的QA_fetch_stock_list_adv函数是一致的\n# def QA_fetch_security_list_adv(collections=DATABASE.stock_list):\n# '获取股票列表'\n# return pd.DataFrame([item for item in collections.find()]).drop('_id', axis=1, inplace=False)\n\n\ndef QA_fetch_stock_list_adv(collections=DATABASE.stock_list):\n '''\n '获取股票列表'\n :param collections: mongodb 数据库\n :return: DataFrame\n '''\n stock_list_items = QA_fetch_stock_list(collections)\n if len(stock_list_items) == 0:\n print(\"QA Error QA_fetch_stock_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.stock_list is empty!\")\n return None\n return stock_list_items\n\n\ndef QA_fetch_index_list_adv(collections=DATABASE.index_list):\n '''\n '获取股票列表'\n :param collections: mongodb 数据库\n :return: DataFrame\n '''\n index_list_items = QA_fetch_index_list(collections)\n if len(index_list_items) == 0:\n print(\"QA Error QA_fetch_index_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.index_list is empty!\")\n return None\n return index_list_items\n\ndef QA_fetch_future_day_adv(\n code,\n start, end=None,\n if_drop_index=True,\n # 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉\n collections=DATABASE.index_day):\n '''\n :param code: code: 字符串str eg 600085\n :param start: 字符串str 开始日期 eg 2011-01-01\n :param end: 字符串str 结束日期 eg 2011-05-01\n :param if_drop_index: Ture False , dataframe drop index or not\n :param collections: mongodb 数据库\n :return:\n '''\n '获取期货日线'\n end = start if end is None else end\n start = str(start)[0:10]\n end = str(end)[0:10]\n\n # 🛠 todo 报告错误 如果开始时间 在 结束时间之后\n # 🛠 todo 如果相等\n\n res = QA_fetch_future_day(code, start, end, format='pd')\n if res is None:\n print(\"QA Error QA_fetch_future_day_adv parameter code=%s start=%s end=%s call QA_fetch_future_day return None\" % (\n code, start, end))\n else:\n res_set_index = res.set_index(['date', 'code'])\n # if res_set_index is None:\n # print(\"QA Error QA_fetch_index_day_adv set index 'date, code' return None\")\n # return None\n return QA_DataStruct_Future_day(res_set_index)\n\n\ndef QA_fetch_future_min_adv(\n code,\n start, end=None,\n frequence='1min',\n if_drop_index=True,\n collections=DATABASE.future_min):\n '''\n '获取股票分钟线'\n :param code:\n :param start:\n :param end:\n :param frequence:\n :param if_drop_index:\n :param collections:\n :return:\n '''\n if frequence in ['1min', '1m']:\n frequence = '1min'\n elif frequence in ['5min', '5m']:\n frequence = '5min'\n elif frequence in ['15min', '15m']:\n frequence = '15min'\n elif frequence in ['30min', '30m']:\n frequence = '30min'\n elif frequence in ['60min', '60m']:\n frequence = '60min'\n\n # __data = [] 没有使用\n\n end = start if end is None else end\n if len(start) == 10:\n start = '{} 00:00:00'.format(start)\n if len(end) == 10:\n end = '{} 15:00:00'.format(end)\n\n # 🛠 todo 报告错误 如果开始时间 在 结束时间之后\n\n # if start == end:\n # 🛠 todo 如果相等,根据 frequence 获取开始时间的 时间段 QA_fetch_index_min_adv, 不支持start end是相等的\n #print(\"QA Error QA_fetch_index_min_adv parameter code=%s , start=%s, end=%s is equal, should have time span! \" % (code, start, end))\n # return None\n\n res = QA_fetch_future_min(\n code, start, end, format='pd', frequence=frequence)\n if res is None:\n print(\"QA Error QA_fetch_future_min_adv parameter code=%s start=%s end=%s frequence=%s call QA_fetch_future_min return None\" % (\n code, start, end, frequence))\n else:\n res_reset_index = res.set_index(\n ['datetime', 'code'], drop=if_drop_index)\n # if res_reset_index is None:\n # print(\"QA Error QA_fetch_index_min_adv set index 'date, code' return None\")\n return QA_DataStruct_Future_min(res_reset_index)\n\n\ndef QA_fetch_future_list_adv(collections=DATABASE.future_list):\n '''\n '获取股票列表'\n :param collections: mongodb 数据库\n :return: DataFrame\n '''\n future_list_items = QA_fetch_future_list()\n if len(future_list_items) == 0:\n print(\"QA Error QA_fetch_future_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.future_list is empty!\")\n return None\n return future_list_items\n\n\ndef QA_fetch_stock_block_adv(code=None, blockname=None, collections=DATABASE.stock_block):\n '''\n 返回板块 ❌\n :param code:\n :param blockname:\n :param collections: 默认数据库 stock_block\n :return: QA_DataStruct_Stock_block\n '''\n if code is not None and blockname is None:\n # 返回这个股票代码所属的板块\n data = pd.DataFrame([item for item in collections.find(\n {'code': {'$in': code}})])\n data = data.drop(['_id'], axis=1)\n\n return QA_DataStruct_Stock_block(data.set_index(['blockname', 'code'], drop=True).drop_duplicates())\n elif blockname is not None and code is None:\n #\n # 🛠 todo fnished 返回 这个板块所有的股票\n # 返回该板块所属的股票\n # print(\"QA Error blockname is Not none code none, return all code from its block name have not implemented yet !\")\n\n items_from_collections = [item for item in collections.find(\n {'blockname': re.compile(blockname)})]\n data = pd.DataFrame(items_from_collections).drop(['_id'], axis=1)\n data_set_index = data.set_index(['blockname', 'code'], drop=True)\n return QA_DataStruct_Stock_block(data_set_index)\n\n else:\n # 🛠 todo 返回 判断 这个股票是否和属于该板块\n data = pd.DataFrame(\n [item for item in collections.find()]).drop(['_id'], axis=1)\n data_set_index = data.set_index(['blockname', 'code'], drop=True)\n return QA_DataStruct_Stock_block(data_set_index)\n\n\ndef QA_fetch_stock_realtime_adv(code=None,\n num=1,\n collections=DATABASE.get_collection('realtime_{}'.format(datetime.date.today()))):\n '''\n 返回当日的上下五档, code可以是股票可以是list, num是每个股票获取的数量\n :param code:\n :param num:\n :param collections: realtime_XXXX-XX-XX 每天实时时间\n :return: DataFrame\n '''\n if code is not None:\n # code 必须转换成list 去查询数据库\n if isinstance(code, str):\n code = [code]\n elif isinstance(code, list):\n pass\n else:\n print(\n \"QA Error QA_fetch_stock_realtime_adv parameter code is not List type or String type\")\n\n items_from_collections = [item for item in collections.find(\n {'code': {'$in': code}}, limit=num*len(code), sort=[('datetime', pymongo.DESCENDING)])]\n if items_from_collections is None:\n print(\"QA Error QA_fetch_stock_realtime_adv find parameter code={} num={} collection={} return NOne\".format(\n code, num, collections))\n return\n\n data = pd.DataFrame(items_from_collections)\n data_set_index = data.set_index(\n ['datetime', 'code'], drop=False).drop(['_id'], axis=1)\n return data_set_index\n else:\n print(\"QA Error QA_fetch_stock_realtime_adv parameter code is None\")\n\n\ndef QA_fetch_financial_report_adv(code, start, end=None, ltype='EN'):\n \"\"\"高级财务查询接口\n Arguments:\n code {[type]} -- [description]\n start {[type]} -- [description]\n Keyword Arguments:\n end {[type]} -- [description] (default: {None})\n \"\"\"\n\n if end is None:\n\n return QA_DataStruct_Financial(QA_fetch_financial_report(code, start, ltype=ltype))\n else:\n series = pd.Series(\n data=month_data, index=pd.to_datetime(month_data), name='date')\n timerange = series.loc[start:end].tolist()\n return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange, ltype=ltype))\n\n\n\n\n# def QA_fetch_financial_report_adv(code, start='all', end=None, type='report'):\n# \"\"\"高级财务查询接口\n\n# Arguments:\n# code {[type]} -- [description]\n# start {[type]} -- [description]\n\n# Keyword Arguments:\n# end {[type]} -- [description] (default: {None})\n# \"\"\"\n# end = start if end is None else end\n# start = str(start)[0:10]\n# end = str(end)[0:10]\n\n# if start == 'all':\n# start = '1990-01-01'\n# end = str(datetime.date.today())\n\n# if end is None:\n# end = str(datetime.date.today())\n# date_list = list(pd.DataFrame.from_dict(QA_util_getBetweenQuarter(\n# start, QA_util_datetime_to_strdate(QA_util_add_months(end, -3)))).T.iloc[:, 1])\n# if type == 'report':\n# return QA_DataStruct_Financial(QA_fetch_financial_report(code, date_list))\n# elif type == 'date':\n# return QA_DataStruct_Financial(QA_fetch_financial_report(code, date_list, type='date'))\n# else:\n# daterange = pd.date_range(start, end)\n# timerange = [item.strftime('%Y-%m-%d') for item in list(daterange)]\n# if type == 'report':\n# return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange))\n# elif type == 'date':\n# return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange, type='date'))\n\n\ndef QA_fetch_stock_financial_calendar_adv(code, start=\"all\", end=None, format='pd', collections=DATABASE.report_calendar):\n '获取股票日线'\n #code= [code] if isinstance(code,str) else code\n end = start if end is None else end\n start = str(start)[0:10]\n end = str(end)[0:10]\n\n # code checking\n if start == 'all':\n start = '1990-01-01'\n end = str(datetime.date.today())\n\n if end is None:\n\n return QA_DataStruct_Financial(QA_fetch_stock_financial_calendar(code, start, str(datetime.date.today())))\n else:\n series = pd.Series(\n data=month_data, index=pd.to_datetime(month_data), name='date')\n timerange = series.loc[start:end].tolist()\n return QA_DataStruct_Financial(QA_fetch_stock_financial_calendar(code, start, end))\n\n\ndef QA_fetch_stock_divyield_adv(code, start=\"all\", end=None, format='pd', collections=DATABASE.report_calendar):\n '获取股票日线'\n #code= [code] if isinstance(code,str) else code\n end = start if end is None else end\n start = str(start)[0:10]\n end = str(end)[0:10]\n\n # code checking\n if start == 'all':\n start = '1990-01-01'\n end = str(datetime.date.today())\n\n if end is None:\n\n return QA_DataStruct_Financial(QA_fetch_stock_divyield(code, start, str(datetime.date.today())))\n else:\n series = pd.Series(\n data=month_data, index=pd.to_datetime(month_data), name='date')\n timerange = series.loc[start:end].tolist()\n return QA_DataStruct_Financial(QA_fetch_stock_divyield(code, start, end))\n\n\nif __name__ == '__main__':\n QA_fetch_stock_realtime_adv(['000001', '000002'], num=10)\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jiaowozidaren/graduation_paper | [
"7c2f1a9138ff79fedb12b0250ca4c90bbc40a49c"
] | [
"my_ising_grid.py"
] | [
"import numpy as np\nimport Ising as Is\nimport matplotlib.pyplot as plt\nimport math\nimport matplotlib as mpl\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\n\nfig, ax = plt.subplots()\ndata = []\n\nT = 4\ng = Is.Ising_mat(100,100,T)\ng.random_state()\n\nprint(\"begin\")\n\nfor i in range(150):\n _ = g.cluster_flip()\n #_ = g.single_flip()\n data.append(g.mat.copy())\n \nfor frame in range(0, len(data)):\n ax.cla()\n \n ax.set_title(\"Step {}\".format(frame))\n plt.imshow(data[frame], cmap=mpl.cm.winter)\n plt.savefig('D:\\\\R\\\\ising_grid'+str(frame)+'.png')\n\nprint(\"competed\")\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anjum48/segmentation_models.pytorch | [
"871c88cc774d4d4416ce26b5c8b7162405248311"
] | [
"segmentation_models_pytorch/encoders/__init__.py"
] | [
"import functools\nimport torch.utils.model_zoo as model_zoo\n\nfrom .resnet import resnet_encoders\nfrom .dpn import dpn_encoders\nfrom .vgg import vgg_encoders\nfrom .senet import senet_encoders\nfrom .densenet import densenet_encoders\nfrom .inceptionresnetv2 import inceptionresnetv2_encoders\nfrom .inceptionv4 import inceptionv4_encoders\nfrom .efficientnet import efficient_net_encoders\nfrom .mobilenet import mobilenet_encoders\nfrom .xception import xception_encoders\nfrom .timm_efficientnet import timm_efficientnet_encoders\nfrom .timm_resnest import timm_resnest_encoders\nfrom .timm_res2net import timm_res2net_encoders\nfrom .timm_resnet import timm_resnet_encoders\nfrom .timm_regnet import timm_regnet_encoders\nfrom .timm_sknet import timm_sknet_encoders\n\n# from .timm_nfnet import timm_nfnet_encoders\nfrom ._preprocessing import preprocess_input\n\nencoders = {}\nencoders.update(resnet_encoders)\nencoders.update(dpn_encoders)\nencoders.update(vgg_encoders)\nencoders.update(senet_encoders)\nencoders.update(densenet_encoders)\nencoders.update(inceptionresnetv2_encoders)\nencoders.update(inceptionv4_encoders)\nencoders.update(efficient_net_encoders)\nencoders.update(mobilenet_encoders)\nencoders.update(xception_encoders)\nencoders.update(timm_efficientnet_encoders)\nencoders.update(timm_resnest_encoders)\nencoders.update(timm_res2net_encoders)\nencoders.update(timm_resnet_encoders)\nencoders.update(timm_regnet_encoders)\nencoders.update(timm_sknet_encoders)\n# encoders.update(timm_nfnet_encoders)\n\n\ndef get_encoder(name, in_channels=3, depth=5, weights=None):\n\n try:\n Encoder = encoders[name][\"encoder\"]\n except KeyError:\n raise KeyError(\n \"Wrong encoder name `{}`, supported encoders: {}\".format(\n name, list(encoders.keys())\n )\n )\n\n params = encoders[name][\"params\"]\n params.update(depth=depth)\n encoder = Encoder(**params)\n\n if weights is not None:\n try:\n settings = encoders[name][\"pretrained_settings\"][weights]\n except KeyError:\n raise KeyError(\n \"Wrong pretrained weights `{}` for encoder `{}`. Available options are: {}\".format(\n weights,\n name,\n list(encoders[name][\"pretrained_settings\"].keys()),\n )\n )\n encoder.load_state_dict(model_zoo.load_url(settings[\"url\"]))\n\n encoder.set_in_channels(in_channels)\n\n return encoder\n\n\ndef get_encoder_names():\n return list(encoders.keys())\n\n\ndef get_preprocessing_params(encoder_name, pretrained=\"imagenet\"):\n settings = encoders[encoder_name][\"pretrained_settings\"]\n\n if pretrained not in settings.keys():\n raise ValueError(\"Available pretrained options {}\".format(settings.keys()))\n\n formatted_settings = {}\n formatted_settings[\"input_space\"] = settings[pretrained].get(\"input_space\")\n formatted_settings[\"input_range\"] = settings[pretrained].get(\"input_range\")\n formatted_settings[\"mean\"] = settings[pretrained].get(\"mean\")\n formatted_settings[\"std\"] = settings[pretrained].get(\"std\")\n return formatted_settings\n\n\ndef get_preprocessing_fn(encoder_name, pretrained=\"imagenet\"):\n params = get_preprocessing_params(encoder_name, pretrained=pretrained)\n return functools.partial(preprocess_input, **params)\n"
] | [
[
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zjzh/mne-python | [
"89e163f60be122be2150ee93a9d3fcea5641b8b7"
] | [
"mne/viz/_brain/_brain.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# Oleh Kozynets <[email protected]>\n# Guillaume Favelier <[email protected]>\n# jona-sassenhagen <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: Simplified BSD\n\nimport contextlib\nfrom functools import partial\nfrom io import BytesIO\nimport os\nimport os.path as op\nimport sys\nimport time\nimport copy\nimport traceback\nimport warnings\n\nimport numpy as np\nfrom collections import OrderedDict\nfrom decorator import decorator\n\nfrom .colormap import calculate_lut\nfrom .surface import _Surface\nfrom .view import views_dicts, _lh_views_dict\nfrom .callback import (ShowView, TimeCallBack, SmartCallBack,\n UpdateLUT, UpdateColorbarScale)\n\nfrom ..utils import (_show_help_fig, _get_color_list, concatenate_images,\n _generate_default_filename, _save_ndarray_img)\nfrom .._3d import (_process_clim, _handle_time, _check_views,\n _handle_sensor_types, _plot_sensors)\nfrom ...defaults import _handle_default, DEFAULTS\nfrom ...fixes import _point_data, _cell_data\nfrom ..._freesurfer import (vertex_to_mni, read_talxfm, read_freesurfer_lut,\n _get_head_surface, _get_skull_surface)\nfrom ...io.pick import pick_types\nfrom ...io.meas_info import Info\nfrom ...surface import (mesh_edges, _mesh_borders, _marching_cubes,\n get_meg_helmet_surf)\nfrom ...source_space import SourceSpaces\nfrom ...transforms import (apply_trans, invert_transform, _get_trans,\n _get_transforms_to_coord_frame)\nfrom ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,\n use_log_level, Bunch, _ReuseCycle, warn,\n get_subjects_dir, _check_fname, _to_rgb)\n\n\n_ARROW_MOVE = 10 # degrees per press\n\n\n@decorator\ndef safe_event(fun, *args, **kwargs):\n \"\"\"Protect against PyQt5 exiting on event-handling errors.\"\"\"\n try:\n return fun(*args, **kwargs)\n except Exception:\n traceback.print_exc(file=sys.stderr)\n\n\nclass _Overlay(object):\n def __init__(self, scalars, colormap, rng, opacity, name):\n self._scalars = scalars\n self._colormap = colormap\n assert rng is not None\n self._rng = rng\n self._opacity = opacity\n self._name = name\n\n def to_colors(self):\n from .._3d import _get_cmap\n from matplotlib.colors import Colormap, ListedColormap\n\n if isinstance(self._colormap, str):\n cmap = _get_cmap(self._colormap)\n elif isinstance(self._colormap, Colormap):\n cmap = self._colormap\n else:\n cmap = ListedColormap(\n self._colormap / 255., name=str(type(self._colormap)))\n logger.debug(\n f'Color mapping {repr(self._name)} with {cmap.name} '\n f'colormap and range {self._rng}')\n\n rng = self._rng\n assert rng is not None\n scalars = _norm(self._scalars, rng)\n\n colors = cmap(scalars)\n if self._opacity is not None:\n colors[:, 3] *= self._opacity\n return colors\n\n\ndef _norm(x, rng):\n if rng[0] == rng[1]:\n factor = 1 if rng[0] == 0 else 1e-6 * rng[0]\n else:\n factor = rng[1] - rng[0]\n return (x - rng[0]) / factor\n\n\nclass _LayeredMesh(object):\n def __init__(self, renderer, vertices, triangles, normals):\n self._renderer = renderer\n self._vertices = vertices\n self._triangles = triangles\n self._normals = normals\n\n self._polydata = None\n self._actor = None\n self._is_mapped = False\n\n self._current_colors = None\n self._cached_colors = None\n self._overlays = OrderedDict()\n\n self._default_scalars = np.ones(vertices.shape)\n self._default_scalars_name = 'Data'\n\n def map(self):\n kwargs = {\n \"color\": None,\n \"pickable\": True,\n \"rgba\": True,\n }\n mesh_data = self._renderer.mesh(\n x=self._vertices[:, 0],\n y=self._vertices[:, 1],\n z=self._vertices[:, 2],\n triangles=self._triangles,\n normals=self._normals,\n scalars=self._default_scalars,\n **kwargs\n )\n self._actor, self._polydata = mesh_data\n self._is_mapped = True\n\n def _compute_over(self, B, A):\n assert A.ndim == B.ndim == 2\n assert A.shape[1] == B.shape[1] == 4\n A_w = A[:, 3:] # * 1\n B_w = B[:, 3:] * (1 - A_w)\n C = A.copy()\n C[:, :3] *= A_w\n C[:, :3] += B[:, :3] * B_w\n C[:, 3:] += B_w\n C[:, :3] /= C[:, 3:]\n return np.clip(C, 0, 1, out=C)\n\n def _compose_overlays(self):\n B = cache = None\n for overlay in self._overlays.values():\n A = overlay.to_colors()\n if B is None:\n B = A\n else:\n cache = B\n B = self._compute_over(cache, A)\n return B, cache\n\n def add_overlay(self, scalars, colormap, rng, opacity, name):\n overlay = _Overlay(\n scalars=scalars,\n colormap=colormap,\n rng=rng,\n opacity=opacity,\n name=name,\n )\n self._overlays[name] = overlay\n colors = overlay.to_colors()\n if self._current_colors is None:\n self._current_colors = colors\n else:\n # save previous colors to cache\n self._cached_colors = self._current_colors\n self._current_colors = self._compute_over(\n self._cached_colors, colors)\n\n # apply the texture\n self._apply()\n\n def remove_overlay(self, names):\n to_update = False\n if not isinstance(names, list):\n names = [names]\n for name in names:\n if name in self._overlays:\n del self._overlays[name]\n to_update = True\n if to_update:\n self.update()\n\n def _apply(self):\n if self._current_colors is None or self._renderer is None:\n return\n self._renderer._set_mesh_scalars(\n mesh=self._polydata,\n scalars=self._current_colors,\n name=self._default_scalars_name,\n )\n\n def update(self, colors=None):\n if colors is not None and self._cached_colors is not None:\n self._current_colors = self._compute_over(\n self._cached_colors, colors)\n else:\n self._current_colors, self._cached_colors = \\\n self._compose_overlays()\n self._apply()\n\n def _clean(self):\n mapper = self._actor.GetMapper()\n mapper.SetLookupTable(None)\n self._actor.SetMapper(None)\n self._actor = None\n self._polydata = None\n self._renderer = None\n\n def update_overlay(self, name, scalars=None, colormap=None,\n opacity=None, rng=None):\n overlay = self._overlays.get(name, None)\n if overlay is None:\n return\n if scalars is not None:\n overlay._scalars = scalars\n if colormap is not None:\n overlay._colormap = colormap\n if opacity is not None:\n overlay._opacity = opacity\n if rng is not None:\n overlay._rng = rng\n # partial update: use cache if possible\n if name == list(self._overlays.keys())[-1]:\n self.update(colors=overlay.to_colors())\n else: # full update\n self.update()\n\n\n@fill_doc\nclass Brain(object):\n \"\"\"Class for visualizing a brain.\n\n .. warning::\n The API for this class is not currently complete. We suggest using\n :meth:`mne.viz.plot_source_estimates` with the PyVista backend\n enabled to obtain a ``Brain`` instance.\n\n Parameters\n ----------\n subject_id : str\n Subject name in Freesurfer subjects dir.\n hemi : str\n Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case\n of 'both', both hemispheres are shown in the same window.\n In the case of 'split' hemispheres are displayed side-by-side\n in different viewing panes.\n surf : str\n FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).\n title : str\n Title for the window.\n cortex : str, list, dict\n Specifies how the cortical surface is rendered. Options:\n\n 1. The name of one of the preset cortex styles:\n ``'classic'`` (default), ``'high_contrast'``,\n ``'low_contrast'``, or ``'bone'``.\n 2. A single color-like argument to render the cortex as a single\n color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``.\n 3. A list of two color-like used to render binarized curvature\n values for gyral (first) and sulcal (second). regions, e.g.,\n ``['red', 'blue']`` or ``[(1, 0, 0), (0, 0, 1)]``.\n 4. A dict containing keys ``'vmin', 'vmax', 'colormap'`` with\n values used to render the binarized curvature (where 0 is gyral,\n 1 is sulcal).\n\n .. versionchanged:: 0.24\n Add support for non-string arguments.\n alpha : float in [0, 1]\n Alpha level to control opacity of the cortical surface.\n size : int | array-like, shape (2,)\n The size of the window, in pixels. can be one number to specify\n a square window, or a length-2 sequence to specify (width, height).\n background : tuple(int, int, int)\n The color definition of the background: (red, green, blue).\n foreground : matplotlib color\n Color of the foreground (will be used for colorbars and text).\n None (default) will use black or white depending on the value\n of ``background``.\n figure : list of Figure | None\n If None (default), a new window will be created with the appropriate\n views.\n subjects_dir : str | None\n If not None, this directory will be used as the subjects directory\n instead of the value set using the SUBJECTS_DIR environment\n variable.\n views : list | str\n The views to use.\n offset : bool | str\n If True, shifts the right- or left-most x coordinate of the left and\n right surfaces, respectively, to be at zero. This is useful for viewing\n inflated surface where hemispheres typically overlap. Can be \"auto\"\n (default) use True with inflated surfaces and False otherwise\n (Default: 'auto'). Only used when ``hemi='both'``.\n\n .. versionchanged:: 0.23\n Default changed to \"auto\".\n show_toolbar : bool\n If True, toolbars will be shown for each view.\n offscreen : bool\n If True, rendering will be done offscreen (not shown). Useful\n mostly for generating images or screenshots, but can be buggy.\n Use at your own risk.\n interaction : str\n Can be \"trackball\" (default) or \"terrain\", i.e. a turntable-style\n camera.\n units : str\n Can be 'm' or 'mm' (default).\n %(view_layout)s\n silhouette : dict | bool\n As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity\n and ``decimate`` (level of decimation between 0 and 1 or None) of the\n brain's silhouette to display. If True, the default values are used\n and if False, no silhouette will be displayed. Defaults to False.\n theme : str | path-like\n Can be \"auto\" (default), \"light\", or \"dark\" or a path-like to a\n custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection,\n :mod:`qdarkstyle` respectively and `darkdetect\n <https://github.com/albertosottile/darkdetect>`__ is required.\n show : bool\n Display the window as soon as it is ready. Defaults to True.\n\n Attributes\n ----------\n geo : dict\n A dictionary of PyVista surface objects for each hemisphere.\n overlays : dict\n The overlays.\n\n Notes\n -----\n This table shows the capabilities of each Brain backend (\"✓\" for full\n support, and \"-\" for partial support):\n\n .. table::\n :widths: auto\n\n +---------------------------+--------------+---------------+\n | 3D function: | surfer.Brain | mne.viz.Brain |\n +===========================+==============+===============+\n | add_annotation | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_data | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_foci | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_head | | ✓ |\n +---------------------------+--------------+---------------+\n | add_label | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_sensors | | ✓ |\n +---------------------------+--------------+---------------+\n | add_skull | | ✓ |\n +---------------------------+--------------+---------------+\n | add_text | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | add_volume_labels | | ✓ |\n +---------------------------+--------------+---------------+\n | close | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | data | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | foci | ✓ | |\n +---------------------------+--------------+---------------+\n | labels | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | remove_data | | ✓ |\n +---------------------------+--------------+---------------+\n | remove_foci | ✓ | |\n +---------------------------+--------------+---------------+\n | remove_head | | ✓ |\n +---------------------------+--------------+---------------+\n | remove_labels | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | remove_annotations | - | ✓ |\n +---------------------------+--------------+---------------+\n | remove_sensors | | ✓ |\n +---------------------------+--------------+---------------+\n | remove_skull | | ✓ |\n +---------------------------+--------------+---------------+\n | remove_text | | ✓ |\n +---------------------------+--------------+---------------+\n | remove_volume_labels | | ✓ |\n +---------------------------+--------------+---------------+\n | scale_data_colormap | ✓ | |\n +---------------------------+--------------+---------------+\n | save_image | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | save_movie | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | screenshot | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | show_view | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | TimeViewer | ✓ | ✓ |\n +---------------------------+--------------+---------------+\n | enable_depth_peeling | | ✓ |\n +---------------------------+--------------+---------------+\n | get_picked_points | | ✓ |\n +---------------------------+--------------+---------------+\n | add_data(volume) | | ✓ |\n +---------------------------+--------------+---------------+\n | view_layout | | ✓ |\n +---------------------------+--------------+---------------+\n | flatmaps | | ✓ |\n +---------------------------+--------------+---------------+\n | vertex picking | | ✓ |\n +---------------------------+--------------+---------------+\n | label picking | | ✓ |\n +---------------------------+--------------+---------------+\n \"\"\"\n\n def __init__(self, subject_id, hemi='both', surf='pial', title=None,\n cortex=\"classic\", alpha=1.0, size=800, background=\"black\",\n foreground=None, figure=None, subjects_dir=None,\n views='auto', offset='auto', show_toolbar=False,\n offscreen=False, interaction='trackball', units='mm',\n view_layout='vertical', silhouette=False, theme='auto',\n show=True):\n from ..backends.renderer import backend, _get_renderer\n\n if hemi is None:\n hemi = 'vol'\n hemi = self._check_hemi(hemi, extras=('both', 'split', 'vol'))\n if hemi in ('both', 'split'):\n self._hemis = ('lh', 'rh')\n else:\n assert hemi in ('lh', 'rh', 'vol')\n self._hemis = (hemi, )\n self._view_layout = _check_option('view_layout', view_layout,\n ('vertical', 'horizontal'))\n\n if figure is not None and not isinstance(figure, int):\n backend._check_3d_figure(figure)\n if title is None:\n self._title = subject_id\n else:\n self._title = title\n self._interaction = 'trackball'\n\n self._bg_color = _to_rgb(background, name='background')\n if foreground is None:\n foreground = 'w' if sum(self._bg_color) < 2 else 'k'\n self._fg_color = _to_rgb(foreground, name='foreground')\n del background, foreground\n views = _check_views(surf, views, hemi)\n col_dict = dict(lh=1, rh=1, both=1, split=2, vol=1)\n shape = (len(views), col_dict[hemi])\n if self._view_layout == 'horizontal':\n shape = shape[::-1]\n self._subplot_shape = shape\n\n size = tuple(np.atleast_1d(size).round(0).astype(int).flat)\n if len(size) not in (1, 2):\n raise ValueError('\"size\" parameter must be an int or length-2 '\n 'sequence of ints.')\n size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple\n subjects_dir = get_subjects_dir(subjects_dir)\n\n self.theme = theme\n\n self.time_viewer = False\n self._hemi = hemi\n self._units = units\n self._alpha = float(alpha)\n self._subject_id = subject_id\n self._subjects_dir = subjects_dir\n self._views = views\n self._times = None\n self._vertex_to_label_id = dict()\n self._annotation_labels = dict()\n self._labels = {'lh': list(), 'rh': list()}\n self._unnamed_label_id = 0 # can only grow\n self._annots = {'lh': list(), 'rh': list()}\n self._layered_meshes = dict()\n self._actors = dict()\n self._elevation_rng = [15, 165] # range of motion of camera on theta\n self._lut_locked = None\n self._cleaned = False\n # default values for silhouette\n self._silhouette = {\n 'color': self._bg_color,\n 'line_width': 2,\n 'alpha': alpha,\n 'decimate': 0.9,\n }\n _validate_type(silhouette, (dict, bool), 'silhouette')\n if isinstance(silhouette, dict):\n self._silhouette.update(silhouette)\n self.silhouette = True\n else:\n self.silhouette = silhouette\n self._scalar_bar = None\n # for now only one time label can be added\n # since it is the same for all figures\n self._time_label_added = False\n # array of data used by TimeViewer\n self._data = {}\n self.geo = {}\n self.set_time_interpolation('nearest')\n\n geo_kwargs = self._cortex_colormap(cortex)\n # evaluate at the midpoint of the used colormap\n val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])\n self._brain_color = geo_kwargs['colormap'](val)\n\n # load geometry for one or both hemispheres as necessary\n _validate_type(offset, (str, bool), 'offset')\n if isinstance(offset, str):\n _check_option('offset', offset, ('auto',), extra='when str')\n offset = (surf in ('inflated', 'flat'))\n offset = None if (not offset or hemi != 'both') else 0.0\n logger.debug(f'Hemi offset: {offset}')\n\n self._renderer = _get_renderer(name=self._title, size=size,\n bgcolor=self._bg_color,\n shape=shape,\n fig=figure)\n self._renderer._window_close_connect(self._clean)\n self._renderer._window_set_theme(theme)\n self.plotter = self._renderer.plotter\n\n self._setup_canonical_rotation()\n\n # plot hemis\n for h in ('lh', 'rh'):\n if h not in self._hemis:\n continue # don't make surface if not chosen\n # Initialize a Surface object as the geometry\n geo = _Surface(self._subject_id, h, surf, self._subjects_dir,\n offset, units=self._units, x_dir=self._rigid[0, :3])\n # Load in the geometry and curvature\n geo.load_geometry()\n geo.load_curvature()\n self.geo[h] = geo\n for _, _, v in self._iter_views(h):\n if self._layered_meshes.get(h) is None:\n mesh = _LayeredMesh(\n renderer=self._renderer,\n vertices=self.geo[h].coords,\n triangles=self.geo[h].faces,\n normals=self.geo[h].nn,\n )\n mesh.map() # send to GPU\n mesh.add_overlay(\n scalars=self.geo[h].bin_curv,\n colormap=geo_kwargs[\"colormap\"],\n rng=[geo_kwargs[\"vmin\"], geo_kwargs[\"vmax\"]],\n opacity=alpha,\n name='curv',\n )\n self._layered_meshes[h] = mesh\n # add metadata to the mesh for picking\n mesh._polydata._hemi = h\n else:\n actor = self._layered_meshes[h]._actor\n self._renderer.plotter.add_actor(actor, render=False)\n if self.silhouette:\n mesh = self._layered_meshes[h]\n self._renderer._silhouette(\n mesh=mesh._polydata,\n color=self._silhouette[\"color\"],\n line_width=self._silhouette[\"line_width\"],\n alpha=self._silhouette[\"alpha\"],\n decimate=self._silhouette[\"decimate\"],\n )\n self._renderer.set_camera(update=False, reset_camera=False,\n **views_dicts[h][v])\n\n self.interaction = interaction\n self._closed = False\n if show:\n self.show()\n # update the views once the geometry is all set\n for h in self._hemis:\n for ri, ci, v in self._iter_views(h):\n self.show_view(v, row=ri, col=ci, hemi=h)\n\n if surf == 'flat':\n self._renderer.set_interaction(\"rubber_band_2d\")\n\n def _setup_canonical_rotation(self):\n from ...coreg import fit_matched_points, _trans_from_params\n self._rigid = np.eye(4)\n try:\n xfm = read_talxfm(self._subject_id, self._subjects_dir)\n except Exception:\n return\n # XYZ+origin + halfway\n pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])\n pts_subj = apply_trans(invert_transform(xfm), pts_tal)\n # we fit with scaling enabled, but then discard it (we just need\n # the rigid-body components)\n params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')\n self._rigid[:] = _trans_from_params((True, True, False), params[:6])\n\n def setup_time_viewer(self, time_viewer=True, show_traces=True):\n \"\"\"Configure the time viewer parameters.\n\n Parameters\n ----------\n time_viewer : bool\n If True, enable widgets interaction. Defaults to True.\n\n show_traces : bool\n If True, enable visualization of time traces. Defaults to True.\n\n Notes\n -----\n The keyboard shortcuts are the following:\n\n '?': Display help window\n 'i': Toggle interface\n 's': Apply auto-scaling\n 'r': Restore original clim\n 'c': Clear all traces\n 'n': Shift the time forward by the playback speed\n 'b': Shift the time backward by the playback speed\n 'Space': Start/Pause playback\n 'Up': Decrease camera elevation angle\n 'Down': Increase camera elevation angle\n 'Left': Decrease camera azimuth angle\n 'Right': Increase camera azimuth angle\n \"\"\"\n if self.time_viewer:\n return\n if not self._data:\n raise ValueError(\"No data to visualize. See ``add_data``.\")\n self.time_viewer = time_viewer\n self.orientation = list(_lh_views_dict.keys())\n self.default_smoothing_range = [-1, 15]\n\n # Default configuration\n self.playback = False\n self.visibility = False\n self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)\n self.default_scaling_range = [0.2, 2.0]\n self.default_playback_speed_range = [0.01, 1]\n self.default_playback_speed_value = 0.01\n self.default_status_bar_msg = \"Press ? for help\"\n self.default_label_extract_modes = {\n \"stc\": [\"mean\", \"max\"],\n \"src\": [\"mean_flip\", \"pca_flip\", \"auto\"],\n }\n self.default_trace_modes = ('vertex', 'label')\n self.annot = None\n self.label_extract_mode = None\n all_keys = ('lh', 'rh', 'vol')\n self.act_data_smooth = {key: (None, None) for key in all_keys}\n self.color_list = _get_color_list()\n # remove grey for better contrast on the brain\n self.color_list.remove(\"#7f7f7f\")\n self.color_cycle = _ReuseCycle(self.color_list)\n self.mpl_canvas = None\n self.help_canvas = None\n self.rms = None\n self.picked_patches = {key: list() for key in all_keys}\n self.picked_points = {key: list() for key in all_keys}\n self.pick_table = dict()\n self._spheres = list()\n self._mouse_no_mvt = -1\n self.callbacks = dict()\n self.widgets = dict()\n self.keys = ('fmin', 'fmid', 'fmax')\n\n # Derived parameters:\n self.playback_speed = self.default_playback_speed_value\n _validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')\n self.interactor_fraction = 0.25\n if isinstance(show_traces, str):\n self.show_traces = True\n self.separate_canvas = False\n self.traces_mode = 'vertex'\n if show_traces == 'separate':\n self.separate_canvas = True\n elif show_traces == 'label':\n self.traces_mode = 'label'\n else:\n assert show_traces == 'vertex' # guaranteed above\n else:\n if isinstance(show_traces, bool):\n self.show_traces = show_traces\n else:\n show_traces = float(show_traces)\n if not 0 < show_traces < 1:\n raise ValueError(\n 'show traces, if numeric, must be between 0 and 1, '\n f'got {show_traces}')\n self.show_traces = True\n self.interactor_fraction = show_traces\n self.traces_mode = 'vertex'\n self.separate_canvas = False\n del show_traces\n\n self._configure_time_label()\n self._configure_scalar_bar()\n self._configure_shortcuts()\n self._configure_picking()\n self._configure_tool_bar()\n self._configure_dock()\n self._configure_menu()\n self._configure_status_bar()\n self._configure_playback()\n self._configure_help()\n # show everything at the end\n self.toggle_interface()\n self._renderer.show()\n\n # sizes could change, update views\n for hemi in ('lh', 'rh'):\n for ri, ci, v in self._iter_views(hemi):\n self.show_view(view=v, row=ri, col=ci)\n self._renderer._process_events()\n\n self._renderer._update()\n # finally, show the MplCanvas\n if self.show_traces:\n self.mpl_canvas.show()\n\n @safe_event\n def _clean(self):\n # resolve the reference cycle\n self.clear_glyphs()\n self.remove_annotations()\n # clear init actors\n for hemi in self._hemis:\n self._layered_meshes[hemi]._clean()\n self._clear_callbacks()\n self._clear_widgets()\n if getattr(self, 'mpl_canvas', None) is not None:\n self.mpl_canvas.clear()\n if getattr(self, 'act_data_smooth', None) is not None:\n for key in list(self.act_data_smooth.keys()):\n self.act_data_smooth[key] = None\n # XXX this should be done in PyVista\n for renderer in self._renderer._all_renderers:\n renderer.RemoveAllLights()\n # app_window cannot be set to None because it is used in __del__\n for key in ('lighting', 'interactor', '_RenderWindow'):\n setattr(self.plotter, key, None)\n # Qt LeaveEvent requires _Iren so we use _FakeIren instead of None\n # to resolve the ref to vtkGenericRenderWindowInteractor\n self.plotter._Iren = _FakeIren()\n if getattr(self.plotter, 'picker', None) is not None:\n self.plotter.picker = None\n # XXX end PyVista\n for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',\n 'interactor', 'mpl_canvas', 'time_actor',\n 'picked_renderer', 'act_data_smooth', '_scalar_bar',\n 'actions', 'widgets', 'geo', '_data'):\n setattr(self, key, None)\n self._cleaned = True\n\n def toggle_interface(self, value=None):\n \"\"\"Toggle the interface.\n\n Parameters\n ----------\n value : bool | None\n If True, the widgets are shown and if False, they\n are hidden. If None, the state of the widgets is\n toggled. Defaults to None.\n \"\"\"\n if value is None:\n self.visibility = not self.visibility\n else:\n self.visibility = value\n\n # update tool bar and dock\n with self._renderer._window_ensure_minimum_sizes():\n if self.visibility:\n self._renderer._dock_show()\n self._renderer._tool_bar_update_button_icon(\n name=\"visibility\", icon_name=\"visibility_on\")\n else:\n self._renderer._dock_hide()\n self._renderer._tool_bar_update_button_icon(\n name=\"visibility\", icon_name=\"visibility_off\")\n\n self._renderer._update()\n\n def apply_auto_scaling(self):\n \"\"\"Detect automatically fitting scaling parameters.\"\"\"\n self._update_auto_scaling()\n\n def restore_user_scaling(self):\n \"\"\"Restore original scaling parameters.\"\"\"\n self._update_auto_scaling(restore=True)\n\n def toggle_playback(self, value=None):\n \"\"\"Toggle time playback.\n\n Parameters\n ----------\n value : bool | None\n If True, automatic time playback is enabled and if False,\n it's disabled. If None, the state of time playback is toggled.\n Defaults to None.\n \"\"\"\n if value is None:\n self.playback = not self.playback\n else:\n self.playback = value\n\n # update tool bar icon\n if self.playback:\n self._renderer._tool_bar_update_button_icon(\n name=\"play\", icon_name=\"pause\")\n else:\n self._renderer._tool_bar_update_button_icon(\n name=\"play\", icon_name=\"play\")\n\n if self.playback:\n time_data = self._data['time']\n max_time = np.max(time_data)\n if self._current_time == max_time: # start over\n self.set_time_point(0) # first index\n self._last_tick = time.time()\n\n def reset(self):\n \"\"\"Reset view and time step.\"\"\"\n self.reset_view()\n max_time = len(self._data['time']) - 1\n if max_time > 0:\n self.callbacks[\"time\"](\n self._data[\"initial_time_idx\"],\n update_widget=True,\n )\n self._renderer._update()\n\n def set_playback_speed(self, speed):\n \"\"\"Set the time playback speed.\n\n Parameters\n ----------\n speed : float\n The speed of the playback.\n \"\"\"\n self.playback_speed = speed\n\n @safe_event\n def _play(self):\n if self.playback:\n try:\n self._advance()\n except Exception:\n self.toggle_playback(value=False)\n raise\n\n def _advance(self):\n this_time = time.time()\n delta = this_time - self._last_tick\n self._last_tick = time.time()\n time_data = self._data['time']\n times = np.arange(self._n_times)\n time_shift = delta * self.playback_speed\n max_time = np.max(time_data)\n time_point = min(self._current_time + time_shift, max_time)\n # always use linear here -- this does not determine the data\n # interpolation mode, it just finds where we are (in time) in\n # terms of the time indices\n idx = np.interp(time_point, time_data, times)\n self.callbacks[\"time\"](idx, update_widget=True)\n if time_point == max_time:\n self.toggle_playback(value=False)\n\n def _configure_time_label(self):\n self.time_actor = self._data.get('time_actor')\n if self.time_actor is not None:\n self.time_actor.SetPosition(0.5, 0.03)\n self.time_actor.GetTextProperty().SetJustificationToCentered()\n self.time_actor.GetTextProperty().BoldOn()\n\n def _configure_scalar_bar(self):\n if self._scalar_bar is not None:\n self._scalar_bar.SetOrientationToVertical()\n self._scalar_bar.SetHeight(0.6)\n self._scalar_bar.SetWidth(0.05)\n self._scalar_bar.SetPosition(0.02, 0.2)\n\n def _configure_dock_time_widget(self, layout=None):\n len_time = len(self._data['time']) - 1\n if len_time < 1:\n return\n layout = self._renderer.dock_layout if layout is None else layout\n hlayout = self._renderer._dock_add_layout(vertical=False)\n self.widgets[\"min_time\"] = self._renderer._dock_add_label(\n value=\"-\", layout=hlayout)\n self._renderer._dock_add_stretch(hlayout)\n self.widgets[\"current_time\"] = self._renderer._dock_add_label(\n value=\"x\", layout=hlayout)\n self._renderer._dock_add_stretch(hlayout)\n self.widgets[\"max_time\"] = self._renderer._dock_add_label(\n value=\"+\", layout=hlayout)\n self._renderer._layout_add_widget(layout, hlayout)\n min_time = float(self._data['time'][0])\n max_time = float(self._data['time'][-1])\n self.widgets[\"min_time\"].set_value(f\"{min_time: .3f}\")\n self.widgets[\"max_time\"].set_value(f\"{max_time: .3f}\")\n self.widgets[\"current_time\"].set_value(f\"{self._current_time: .3f}\")\n\n def _configure_dock_playback_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n len_time = len(self._data['time']) - 1\n\n # Time widget\n if len_time < 1:\n self.callbacks[\"time\"] = None\n self.widgets[\"time\"] = None\n else:\n self.callbacks[\"time\"] = TimeCallBack(\n brain=self,\n callback=self.plot_time_line,\n )\n self.widgets[\"time\"] = self._renderer._dock_add_slider(\n name=\"Time (s)\",\n value=self._data['time_idx'],\n rng=[0, len_time],\n double=True,\n callback=self.callbacks[\"time\"],\n compact=False,\n layout=layout,\n )\n self.callbacks[\"time\"].widget = self.widgets[\"time\"]\n\n # Time labels\n if len_time < 1:\n self.widgets[\"min_time\"] = None\n self.widgets[\"max_time\"] = None\n self.widgets[\"current_time\"] = None\n else:\n self._configure_dock_time_widget(layout)\n self.callbacks[\"time\"].label = self.widgets[\"current_time\"]\n\n # Playback speed widget\n if len_time < 1:\n self.callbacks[\"playback_speed\"] = None\n self.widgets[\"playback_speed\"] = None\n else:\n self.callbacks[\"playback_speed\"] = SmartCallBack(\n callback=self.set_playback_speed,\n )\n self.widgets[\"playback_speed\"] = self._renderer._dock_add_spin_box(\n name=\"Speed\",\n value=self.default_playback_speed_value,\n rng=self.default_playback_speed_range,\n callback=self.callbacks[\"playback_speed\"],\n layout=layout,\n )\n self.callbacks[\"playback_speed\"].widget = \\\n self.widgets[\"playback_speed\"]\n\n # Time label\n current_time = self._current_time\n assert current_time is not None # should never be the case, float\n time_label = self._data['time_label']\n if callable(time_label):\n current_time = time_label(current_time)\n else:\n current_time = time_label\n if self.time_actor is not None:\n self.time_actor.SetInput(current_time)\n del current_time\n\n def _configure_dock_orientation_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n # Renderer widget\n rends = [str(i) for i in range(len(self._renderer._all_renderers))]\n if len(rends) > 1:\n def select_renderer(idx):\n idx = int(idx)\n loc = self._renderer._index_to_loc(idx)\n self.plotter.subplot(*loc)\n\n self.callbacks[\"renderer\"] = SmartCallBack(\n callback=select_renderer,\n )\n self.widgets[\"renderer\"] = self._renderer._dock_add_combo_box(\n name=\"Renderer\",\n value=\"0\",\n rng=rends,\n callback=self.callbacks[\"renderer\"],\n layout=layout,\n )\n self.callbacks[\"renderer\"].widget = \\\n self.widgets[\"renderer\"]\n\n # Use 'lh' as a reference for orientation for 'both'\n if self._hemi == 'both':\n hemis_ref = ['lh']\n else:\n hemis_ref = self._hemis\n orientation_data = [None] * len(rends)\n for hemi in hemis_ref:\n for ri, ci, v in self._iter_views(hemi):\n idx = self._renderer._loc_to_index((ri, ci))\n if v == 'flat':\n _data = None\n else:\n _data = dict(default=v, hemi=hemi, row=ri, col=ci)\n orientation_data[idx] = _data\n self.callbacks[\"orientation\"] = ShowView(\n brain=self,\n data=orientation_data,\n )\n self.widgets[\"orientation\"] = self._renderer._dock_add_combo_box(\n name=None,\n value=self.orientation[0],\n rng=self.orientation,\n callback=self.callbacks[\"orientation\"],\n layout=layout,\n )\n\n def _configure_dock_colormap_widget(self, name):\n layout = self._renderer._dock_add_group_box(name)\n self._renderer._dock_add_label(\n value=\"min / mid / max\",\n align=True,\n layout=layout,\n )\n up = UpdateLUT(brain=self)\n for key in self.keys:\n hlayout = self._renderer._dock_add_layout(vertical=False)\n rng = _get_range(self)\n self.callbacks[key] = lambda value, key=key: up(**{key: value})\n self.widgets[key] = self._renderer._dock_add_slider(\n name=None,\n value=self._data[key],\n rng=rng,\n callback=self.callbacks[key],\n double=True,\n layout=hlayout,\n )\n self.widgets[f\"entry_{key}\"] = self._renderer._dock_add_spin_box(\n name=None,\n value=self._data[key],\n callback=self.callbacks[key],\n rng=rng,\n layout=hlayout,\n )\n up.widgets[key] = [self.widgets[key], self.widgets[f\"entry_{key}\"]]\n self._renderer._layout_add_widget(layout, hlayout)\n\n # reset / minus / plus\n hlayout = self._renderer._dock_add_layout(vertical=False)\n self._renderer._dock_add_label(\n value=\"Rescale\",\n align=True,\n layout=hlayout,\n )\n self.widgets[\"reset\"] = self._renderer._dock_add_button(\n name=\"↺\",\n callback=self.restore_user_scaling,\n layout=hlayout,\n style='toolbutton',\n )\n for key, char, val in ((\"fminus\", \"➖\", 1.2 ** -0.25),\n (\"fplus\", \"➕\", 1.2 ** 0.25)):\n self.callbacks[key] = UpdateColorbarScale(\n brain=self,\n factor=val,\n )\n self.widgets[key] = self._renderer._dock_add_button(\n name=char,\n callback=self.callbacks[key],\n layout=hlayout,\n style='toolbutton',\n )\n self._renderer._layout_add_widget(layout, hlayout)\n\n # register colorbar slider representations\n widgets = {key: self.widgets[key] for key in self.keys}\n for name in (\"fmin\", \"fmid\", \"fmax\", \"fminus\", \"fplus\"):\n self.callbacks[name].widgets = widgets\n\n def _configure_dock_trace_widget(self, name):\n if not self.show_traces:\n return\n # do not show trace mode for volumes\n if (self._data.get('src', None) is not None and\n self._data['src'].kind == 'volume'):\n self._configure_vertex_time_course()\n return\n\n layout = self._renderer._dock_add_group_box(name)\n\n # setup candidate annots\n def _set_annot(annot):\n self.clear_glyphs()\n self.remove_labels()\n self.remove_annotations()\n self.annot = annot\n\n if annot == 'None':\n self.traces_mode = 'vertex'\n self._configure_vertex_time_course()\n else:\n self.traces_mode = 'label'\n self._configure_label_time_course()\n self._renderer._update()\n\n # setup label extraction parameters\n def _set_label_mode(mode):\n if self.traces_mode != 'label':\n return\n glyphs = copy.deepcopy(self.picked_patches)\n self.label_extract_mode = mode\n self.clear_glyphs()\n for hemi in self._hemis:\n for label_id in glyphs[hemi]:\n label = self._annotation_labels[hemi][label_id]\n vertex_id = label.vertices[0]\n self._add_label_glyph(hemi, None, vertex_id)\n self.mpl_canvas.axes.relim()\n self.mpl_canvas.axes.autoscale_view()\n self.mpl_canvas.update_plot()\n self._renderer._update()\n\n from ...source_estimate import _get_allowed_label_modes\n from ...label import _read_annot_cands\n dir_name = op.join(self._subjects_dir, self._subject_id, 'label')\n cands = _read_annot_cands(dir_name, raise_error=False)\n cands = cands + ['None']\n self.annot = cands[0]\n stc = self._data[\"stc\"]\n modes = _get_allowed_label_modes(stc)\n if self._data[\"src\"] is None:\n modes = [m for m in modes if m not in\n self.default_label_extract_modes[\"src\"]]\n self.label_extract_mode = modes[-1]\n if self.traces_mode == 'vertex':\n _set_annot('None')\n else:\n _set_annot(self.annot)\n self.widgets[\"annotation\"] = self._renderer._dock_add_combo_box(\n name=\"Annotation\",\n value=self.annot,\n rng=cands,\n callback=_set_annot,\n layout=layout,\n )\n self.widgets[\"extract_mode\"] = self._renderer._dock_add_combo_box(\n name=\"Extract mode\",\n value=self.label_extract_mode,\n rng=modes,\n callback=_set_label_mode,\n layout=layout,\n )\n\n def _configure_dock(self):\n self._renderer._dock_initialize()\n self._configure_dock_playback_widget(name=\"Playback\")\n self._configure_dock_orientation_widget(name=\"Orientation\")\n self._configure_dock_colormap_widget(name=\"Color Limits\")\n self._configure_dock_trace_widget(name=\"Trace\")\n\n # Smoothing widget\n self.callbacks[\"smoothing\"] = SmartCallBack(\n callback=self.set_data_smoothing,\n )\n self.widgets[\"smoothing\"] = self._renderer._dock_add_spin_box(\n name=\"Smoothing\",\n value=self._data['smoothing_steps'],\n rng=self.default_smoothing_range,\n callback=self.callbacks[\"smoothing\"],\n double=False\n )\n self.callbacks[\"smoothing\"].widget = \\\n self.widgets[\"smoothing\"]\n\n self._renderer._dock_finalize()\n\n def _configure_playback(self):\n self._renderer._playback_initialize(\n func=self._play,\n timeout=self.refresh_rate_ms,\n value=self._data['time_idx'],\n rng=[0, len(self._data['time']) - 1],\n time_widget=self.widgets[\"time\"],\n play_widget=self.widgets[\"play\"],\n )\n\n def _configure_mplcanvas(self):\n # Get the fractional components for the brain and mpl\n self.mpl_canvas = self._renderer._window_get_mplcanvas(\n brain=self,\n interactor_fraction=self.interactor_fraction,\n show_traces=self.show_traces,\n separate_canvas=self.separate_canvas\n )\n xlim = [np.min(self._data['time']),\n np.max(self._data['time'])]\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n self.mpl_canvas.axes.set(xlim=xlim)\n if not self.separate_canvas:\n self._renderer._window_adjust_mplcanvas_layout()\n self.mpl_canvas.set_color(\n bg_color=self._bg_color,\n fg_color=self._fg_color,\n )\n\n def _configure_vertex_time_course(self):\n if not self.show_traces:\n return\n if self.mpl_canvas is None:\n self._configure_mplcanvas()\n else:\n self.clear_glyphs()\n\n # plot RMS of the activation\n y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()\n if v[0] is not None))\n rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))\n del y\n\n self.rms, = self.mpl_canvas.axes.plot(\n self._data['time'], rms,\n lw=3, label='RMS', zorder=3, color=self._fg_color,\n alpha=0.5, ls=':')\n\n # now plot the time line\n self.plot_time_line(update=False)\n\n # then the picked points\n for idx, hemi in enumerate(['lh', 'rh', 'vol']):\n act_data = self.act_data_smooth.get(hemi, [None])[0]\n if act_data is None:\n continue\n hemi_data = self._data[hemi]\n vertices = hemi_data['vertices']\n\n # simulate a picked renderer\n if self._hemi in ('both', 'rh') or hemi == 'vol':\n idx = 0\n self.picked_renderer = self._renderer._all_renderers[idx]\n\n # initialize the default point\n if self._data['initial_time'] is not None:\n # pick at that time\n use_data = act_data[\n :, [np.round(self._data['time_idx']).astype(int)]]\n else:\n use_data = act_data\n ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),\n use_data.shape)\n if hemi == 'vol':\n mesh = hemi_data['grid']\n else:\n mesh = self._layered_meshes[hemi]._polydata\n vertex_id = vertices[ind[0]]\n self._add_vertex_glyph(hemi, mesh, vertex_id, update=False)\n\n def _configure_picking(self):\n # get data for each hemi\n from scipy import sparse\n for idx, hemi in enumerate(['vol', 'lh', 'rh']):\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n act_data = hemi_data['array']\n if act_data.ndim == 3:\n act_data = np.linalg.norm(act_data, axis=1)\n smooth_mat = hemi_data.get('smooth_mat')\n vertices = hemi_data['vertices']\n if hemi == 'vol':\n assert smooth_mat is None\n smooth_mat = sparse.csr_matrix(\n (np.ones(len(vertices)),\n (vertices, np.arange(len(vertices)))))\n self.act_data_smooth[hemi] = (act_data, smooth_mat)\n\n self._renderer._update_picking_callback(\n self._on_mouse_move,\n self._on_button_press,\n self._on_button_release,\n self._on_pick\n )\n\n def _configure_tool_bar(self):\n self._renderer._tool_bar_load_icons()\n self._renderer._tool_bar_set_theme(self.theme)\n self._renderer._tool_bar_initialize(name=\"Toolbar\")\n self._renderer._tool_bar_add_file_button(\n name=\"screenshot\",\n desc=\"Take a screenshot\",\n func=self.save_image,\n )\n self._renderer._tool_bar_add_file_button(\n name=\"movie\",\n desc=\"Save movie...\",\n func=lambda filename: self.save_movie(\n filename=filename,\n time_dilation=(1. / self.playback_speed)),\n shortcut=\"ctrl+shift+s\",\n )\n self._renderer._tool_bar_add_button(\n name=\"visibility\",\n desc=\"Toggle Controls\",\n func=self.toggle_interface,\n icon_name=\"visibility_on\"\n )\n self.widgets[\"play\"] = self._renderer._tool_bar_add_play_button(\n name=\"play\",\n desc=\"Play/Pause\",\n func=self.toggle_playback,\n shortcut=\" \",\n )\n self._renderer._tool_bar_add_button(\n name=\"reset\",\n desc=\"Reset\",\n func=self.reset,\n )\n self._renderer._tool_bar_add_button(\n name=\"scale\",\n desc=\"Auto-Scale\",\n func=self.apply_auto_scaling,\n )\n self._renderer._tool_bar_add_button(\n name=\"clear\",\n desc=\"Clear traces\",\n func=self.clear_glyphs,\n )\n self._renderer._tool_bar_add_spacer()\n self._renderer._tool_bar_add_button(\n name=\"help\",\n desc=\"Help\",\n func=self.help,\n shortcut=\"?\",\n )\n\n def _shift_time(self, op):\n self.callbacks[\"time\"](\n value=(op(self._current_time, self.playback_speed)),\n time_as_index=False,\n update_widget=True,\n )\n\n def _rotate_azimuth(self, value):\n azimuth = (self._renderer.figure._azimuth + value) % 360\n self._renderer.set_camera(azimuth=azimuth, reset_camera=False)\n\n def _rotate_elevation(self, value):\n elevation = np.clip(\n self._renderer.figure._elevation + value,\n self._elevation_rng[0],\n self._elevation_rng[1],\n )\n self._renderer.set_camera(elevation=elevation, reset_camera=False)\n\n def _configure_shortcuts(self):\n # First, we remove the default bindings:\n self._clear_callbacks()\n # Then, we add our own:\n self.plotter.add_key_event(\"i\", self.toggle_interface)\n self.plotter.add_key_event(\"s\", self.apply_auto_scaling)\n self.plotter.add_key_event(\"r\", self.restore_user_scaling)\n self.plotter.add_key_event(\"c\", self.clear_glyphs)\n self.plotter.add_key_event(\"n\", partial(self._shift_time,\n op=lambda x, y: x + y))\n self.plotter.add_key_event(\"b\", partial(self._shift_time,\n op=lambda x, y: x - y))\n for key, func, sign in ((\"Left\", self._rotate_azimuth, 1),\n (\"Right\", self._rotate_azimuth, -1),\n (\"Up\", self._rotate_elevation, 1),\n (\"Down\", self._rotate_elevation, -1)):\n self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))\n\n def _configure_menu(self):\n self._renderer._menu_initialize()\n self._renderer._menu_add_submenu(\n name=\"help\",\n desc=\"Help\",\n )\n self._renderer._menu_add_button(\n menu_name=\"help\",\n name=\"help\",\n desc=\"Show MNE key bindings\\t?\",\n func=self.help,\n )\n\n def _configure_status_bar(self):\n self._renderer._status_bar_initialize()\n self.status_msg = self._renderer._status_bar_add_label(\n self.default_status_bar_msg, stretch=1)\n self.status_progress = self._renderer._status_bar_add_progress_bar()\n if self.status_progress is not None:\n self.status_progress.hide()\n\n def _on_mouse_move(self, vtk_picker, event):\n if self._mouse_no_mvt:\n self._mouse_no_mvt -= 1\n\n def _on_button_press(self, vtk_picker, event):\n self._mouse_no_mvt = 2\n\n def _on_button_release(self, vtk_picker, event):\n if self._mouse_no_mvt > 0:\n x, y = vtk_picker.GetEventPosition()\n # programmatically detect the picked renderer\n try:\n # pyvista<0.30.0\n self.picked_renderer = \\\n self.plotter.iren.FindPokedRenderer(x, y)\n except AttributeError:\n # pyvista>=0.30.0\n self.picked_renderer = \\\n self.plotter.iren.interactor.FindPokedRenderer(x, y)\n # trigger the pick\n self.plotter.picker.Pick(x, y, 0, self.picked_renderer)\n self._mouse_no_mvt = 0\n\n def _on_pick(self, vtk_picker, event):\n if not self.show_traces:\n return\n\n # vtk_picker is a vtkCellPicker\n cell_id = vtk_picker.GetCellId()\n mesh = vtk_picker.GetDataSet()\n\n if mesh is None or cell_id == -1 or not self._mouse_no_mvt:\n return # don't pick\n\n # 1) Check to see if there are any spheres along the ray\n if len(self._spheres):\n collection = vtk_picker.GetProp3Ds()\n found_sphere = None\n for ii in range(collection.GetNumberOfItems()):\n actor = collection.GetItemAsObject(ii)\n for sphere in self._spheres:\n if any(a is actor for a in sphere._actors):\n found_sphere = sphere\n break\n if found_sphere is not None:\n break\n if found_sphere is not None:\n assert found_sphere._is_glyph\n mesh = found_sphere\n\n # 2) Remove sphere if it's what we have\n if hasattr(mesh, \"_is_glyph\"):\n self._remove_vertex_glyph(mesh)\n return\n\n # 3) Otherwise, pick the objects in the scene\n try:\n hemi = mesh._hemi\n except AttributeError: # volume\n hemi = 'vol'\n else:\n assert hemi in ('lh', 'rh')\n if self.act_data_smooth[hemi][0] is None: # no data to add for hemi\n return\n pos = np.array(vtk_picker.GetPickPosition())\n if hemi == 'vol':\n # VTK will give us the point closest to the viewer in the vol.\n # We want to pick the point with the maximum value along the\n # camera-to-click array, which fortunately we can get \"just\"\n # by inspecting the points that are sufficiently close to the\n # ray.\n grid = mesh = self._data[hemi]['grid']\n vertices = self._data[hemi]['vertices']\n coords = self._data[hemi]['grid_coords'][vertices]\n scalars = _cell_data(grid)['values'][vertices]\n spacing = np.array(grid.GetSpacing())\n max_dist = np.linalg.norm(spacing) / 2.\n origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()\n ori = pos - origin\n ori /= np.linalg.norm(ori)\n # the magic formula: distance from a ray to a given point\n dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)\n assert dists.shape == (len(coords),)\n mask = dists <= max_dist\n idx = np.where(mask)[0]\n if len(idx) == 0:\n return # weird point on edge of volume?\n # useful for debugging the ray by mapping it into the volume:\n # dists = dists - dists.min()\n # dists = (1. - dists / dists.max()) * self._cmap_range[1]\n # _cell_data(grid)['values'][vertices] = dists * mask\n idx = idx[np.argmax(np.abs(scalars[idx]))]\n vertex_id = vertices[idx]\n # Naive way: convert pos directly to idx; i.e., apply mri_src_t\n # shape = self._data[hemi]['grid_shape']\n # taking into account the cell vs point difference (spacing/2)\n # shift = np.array(grid.GetOrigin()) + spacing / 2.\n # ijk = np.round((pos - shift) / spacing).astype(int)\n # vertex_id = np.ravel_multi_index(ijk, shape, order='F')\n else:\n vtk_cell = mesh.GetCell(cell_id)\n cell = [vtk_cell.GetPointId(point_id) for point_id\n in range(vtk_cell.GetNumberOfPoints())]\n vertices = mesh.points[cell]\n idx = np.argmin(abs(vertices - pos), axis=0)\n vertex_id = cell[idx[0]]\n\n if self.traces_mode == 'label':\n self._add_label_glyph(hemi, mesh, vertex_id)\n else:\n self._add_vertex_glyph(hemi, mesh, vertex_id)\n\n def _add_label_glyph(self, hemi, mesh, vertex_id):\n if hemi == 'vol':\n return\n label_id = self._vertex_to_label_id[hemi][vertex_id]\n label = self._annotation_labels[hemi][label_id]\n\n # remove the patch if already picked\n if label_id in self.picked_patches[hemi]:\n self._remove_label_glyph(hemi, label_id)\n return\n\n if hemi == label.hemi:\n self.add_label(label, borders=True, reset_camera=False)\n self.picked_patches[hemi].append(label_id)\n\n def _remove_label_glyph(self, hemi, label_id):\n label = self._annotation_labels[hemi][label_id]\n label._line.remove()\n self.color_cycle.restore(label._color)\n self.mpl_canvas.update_plot()\n self._layered_meshes[hemi].remove_overlay(label.name)\n self.picked_patches[hemi].remove(label_id)\n\n def _add_vertex_glyph(self, hemi, mesh, vertex_id, update=True):\n if vertex_id in self.picked_points[hemi]:\n return\n\n # skip if the wrong hemi is selected\n if self.act_data_smooth[hemi][0] is None:\n return\n color = next(self.color_cycle)\n line = self.plot_time_course(hemi, vertex_id, color, update=update)\n if hemi == 'vol':\n ijk = np.unravel_index(\n vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')\n # should just be GetCentroid(center), but apparently it's VTK9+:\n # center = np.empty(3)\n # voxel.GetCentroid(center)\n voxel = mesh.GetCell(*ijk)\n pts = voxel.GetPoints()\n n_pts = pts.GetNumberOfPoints()\n center = np.empty((n_pts, 3))\n for ii in range(pts.GetNumberOfPoints()):\n pts.GetPoint(ii, center[ii])\n center = np.mean(center, axis=0)\n else:\n center = mesh.GetPoints().GetPoint(vertex_id)\n del mesh\n\n # from the picked renderer to the subplot coords\n try:\n lst = self._renderer._all_renderers._renderers\n except AttributeError:\n lst = self._renderer._all_renderers\n rindex = lst.index(self.picked_renderer)\n row, col = self._renderer._index_to_loc(rindex)\n\n actors = list()\n spheres = list()\n for _ in self._iter_views(hemi):\n # Using _sphere() instead of renderer.sphere() for 2 reasons:\n # 1) renderer.sphere() fails on Windows in a scenario where a lot\n # of picking requests are done in a short span of time (could be\n # mitigated with synchronization/delay?)\n # 2) the glyph filter is used in renderer.sphere() but only one\n # sphere is required in this function.\n actor, sphere = self._renderer._sphere(\n center=np.array(center),\n color=color,\n radius=4.0,\n )\n actors.append(actor)\n spheres.append(sphere)\n\n # add metadata for picking\n for sphere in spheres:\n sphere._is_glyph = True\n sphere._hemi = hemi\n sphere._line = line\n sphere._actors = actors\n sphere._color = color\n sphere._vertex_id = vertex_id\n\n self.picked_points[hemi].append(vertex_id)\n self._spheres.extend(spheres)\n self.pick_table[vertex_id] = spheres\n return sphere\n\n def _remove_vertex_glyph(self, mesh, render=True):\n vertex_id = mesh._vertex_id\n if vertex_id not in self.pick_table:\n return\n\n hemi = mesh._hemi\n color = mesh._color\n spheres = self.pick_table[vertex_id]\n spheres[0]._line.remove()\n self.mpl_canvas.update_plot()\n self.picked_points[hemi].remove(vertex_id)\n\n with warnings.catch_warnings(record=True):\n # We intentionally ignore these in case we have traversed the\n # entire color cycle\n warnings.simplefilter('ignore')\n self.color_cycle.restore(color)\n for sphere in spheres:\n # remove all actors\n self.plotter.remove_actor(sphere._actors, render=render)\n sphere._actors = None\n self._spheres.pop(self._spheres.index(sphere))\n self.pick_table.pop(vertex_id)\n\n def clear_glyphs(self):\n \"\"\"Clear the picking glyphs.\"\"\"\n if not self.time_viewer:\n return\n for sphere in list(self._spheres): # will remove itself, so copy\n self._remove_vertex_glyph(sphere, render=False)\n assert sum(len(v) for v in self.picked_points.values()) == 0\n assert len(self.pick_table) == 0\n assert len(self._spheres) == 0\n for hemi in self._hemis:\n for label_id in list(self.picked_patches[hemi]):\n self._remove_label_glyph(hemi, label_id)\n assert sum(len(v) for v in self.picked_patches.values()) == 0\n if self.rms is not None:\n self.rms.remove()\n self.rms = None\n self._renderer._update()\n\n def plot_time_course(self, hemi, vertex_id, color, update=True):\n \"\"\"Plot the vertex time course.\n\n Parameters\n ----------\n hemi : str\n The hemisphere id of the vertex.\n vertex_id : int\n The vertex identifier in the mesh.\n color : matplotlib color\n The color of the time course.\n update : bool\n Force an update of the plot. Defaults to True.\n\n Returns\n -------\n line : matplotlib object\n The time line object.\n \"\"\"\n if self.mpl_canvas is None:\n return\n time = self._data['time'].copy() # avoid circular ref\n mni = None\n if hemi == 'vol':\n hemi_str = 'V'\n xfm = read_talxfm(\n self._subject_id, self._subjects_dir)\n if self._units == 'mm':\n xfm['trans'][:3, 3] *= 1000.\n ijk = np.unravel_index(\n vertex_id, self._data[hemi]['grid_shape'], order='F')\n src_mri_t = self._data[hemi]['grid_src_mri_t']\n mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)\n else:\n hemi_str = 'L' if hemi == 'lh' else 'R'\n try:\n mni = vertex_to_mni(\n vertices=vertex_id,\n hemis=0 if hemi == 'lh' else 1,\n subject=self._subject_id,\n subjects_dir=self._subjects_dir\n )\n except Exception:\n mni = None\n if mni is not None:\n mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)\n else:\n mni = ''\n label = \"{}:{}{}\".format(hemi_str, str(vertex_id).ljust(6), mni)\n act_data, smooth = self.act_data_smooth[hemi]\n if smooth is not None:\n act_data = smooth[vertex_id].dot(act_data)[0]\n else:\n act_data = act_data[vertex_id].copy()\n line = self.mpl_canvas.plot(\n time,\n act_data,\n label=label,\n lw=1.,\n color=color,\n zorder=4,\n update=update,\n )\n return line\n\n def plot_time_line(self, update=True):\n \"\"\"Add the time line to the MPL widget.\n\n Parameters\n ----------\n update : bool\n Force an update of the plot. Defaults to True.\n \"\"\"\n if self.mpl_canvas is None:\n return\n if isinstance(self.show_traces, bool) and self.show_traces:\n # add time information\n current_time = self._current_time\n if not hasattr(self, \"time_line\"):\n self.time_line = self.mpl_canvas.plot_time_line(\n x=current_time,\n label='time',\n color=self._fg_color,\n lw=1,\n update=update,\n )\n self.time_line.set_xdata(current_time)\n if update:\n self.mpl_canvas.update_plot()\n\n def _configure_help(self):\n pairs = [\n ('?', 'Display help window'),\n ('i', 'Toggle interface'),\n ('s', 'Apply auto-scaling'),\n ('r', 'Restore original clim'),\n ('c', 'Clear all traces'),\n ('n', 'Shift the time forward by the playback speed'),\n ('b', 'Shift the time backward by the playback speed'),\n ('Space', 'Start/Pause playback'),\n ('Up', 'Decrease camera elevation angle'),\n ('Down', 'Increase camera elevation angle'),\n ('Left', 'Decrease camera azimuth angle'),\n ('Right', 'Increase camera azimuth angle'),\n ]\n text1, text2 = zip(*pairs)\n text1 = '\\n'.join(text1)\n text2 = '\\n'.join(text2)\n self.help_canvas = self._renderer._window_get_simple_canvas(\n width=5, height=2, dpi=80)\n _show_help_fig(\n col1=text1,\n col2=text2,\n fig_help=self.help_canvas.fig,\n ax=self.help_canvas.axes,\n show=False,\n )\n\n def help(self):\n \"\"\"Display the help window.\"\"\"\n self.help_canvas.show()\n\n def _clear_callbacks(self):\n if not hasattr(self, 'callbacks'):\n return\n for callback in self.callbacks.values():\n if callback is not None:\n for key in ('plotter', 'brain', 'callback',\n 'widget', 'widgets'):\n setattr(callback, key, None)\n self.callbacks.clear()\n # Remove the default key binding\n if getattr(self, \"iren\", None) is not None:\n self.plotter.iren.clear_key_event_callbacks()\n\n def _clear_widgets(self):\n if not hasattr(self, 'widgets'):\n return\n for widget in self.widgets.values():\n if widget is not None:\n for key in ('triggered', 'valueChanged'):\n setattr(widget, key, None)\n self.widgets.clear()\n\n @property\n def interaction(self):\n \"\"\"The interaction style.\"\"\"\n return self._interaction\n\n @interaction.setter\n def interaction(self, interaction):\n \"\"\"Set the interaction style.\"\"\"\n _validate_type(interaction, str, 'interaction')\n _check_option('interaction', interaction, ('trackball', 'terrain'))\n for _ in self._iter_views('vol'): # will traverse all\n self._renderer.set_interaction(interaction)\n\n def _cortex_colormap(self, cortex):\n \"\"\"Return the colormap corresponding to the cortex.\"\"\"\n from .._3d import _get_cmap\n from matplotlib.colors import ListedColormap\n colormap_map = dict(classic=dict(colormap=\"Greys\",\n vmin=-1, vmax=2),\n high_contrast=dict(colormap=\"Greys\",\n vmin=-.1, vmax=1.3),\n low_contrast=dict(colormap=\"Greys\",\n vmin=-5, vmax=5),\n bone=dict(colormap=\"bone_r\",\n vmin=-.2, vmax=2),\n )\n _validate_type(cortex, (str, dict, list, tuple), 'cortex')\n if isinstance(cortex, str):\n if cortex in colormap_map:\n cortex = colormap_map[cortex]\n else:\n cortex = [cortex] * 2\n if isinstance(cortex, (list, tuple)):\n _check_option('len(cortex)', len(cortex), (2, 3),\n extra='when cortex is a list or tuple')\n if len(cortex) == 3:\n cortex = [cortex] * 2\n cortex = list(cortex)\n for ci, c in enumerate(cortex):\n cortex[ci] = _to_rgb(c, name='cortex')\n cortex = dict(\n colormap=ListedColormap(cortex, name='custom binary'),\n vmin=0, vmax=1)\n cortex = dict(\n vmin=float(cortex['vmin']),\n vmax=float(cortex['vmax']),\n colormap=_get_cmap(cortex['colormap']),\n )\n return cortex\n\n def _remove(self, item, render=False):\n \"\"\"Remove actors from the rendered scene.\"\"\"\n if item in self._actors:\n logger.debug(\n f'Removing {len(self._actors[item])} {item} actor(s)')\n for actor in self._actors[item]:\n self._renderer.plotter.remove_actor(actor)\n self._actors.pop(item) # remove actor list\n if render:\n self._renderer._update()\n\n def _add_actor(self, item, actor):\n \"\"\"Add an actor to the internal register.\"\"\"\n if item in self._actors: # allows adding more than one\n self._actors[item].append(actor)\n else:\n self._actors[item] = [actor]\n\n @verbose\n def add_data(self, array, fmin=None, fmid=None, fmax=None,\n thresh=None, center=None, transparent=False, colormap=\"auto\",\n alpha=1, vertices=None, smoothing_steps=None, time=None,\n time_label=\"auto\", colorbar=True,\n hemi=None, remove_existing=None, time_label_size=None,\n initial_time=None, scale_factor=None, vector_alpha=None,\n clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,\n verbose=None):\n \"\"\"Display data from a numpy array on the surface or volume.\n\n This provides a similar interface to\n :meth:`surfer.Brain.add_overlay`, but it displays\n it with a single colormap. It offers more flexibility over the\n colormap, and provides a way to display four-dimensional data\n (i.e., a timecourse) or five-dimensional data (i.e., a\n vector-valued timecourse).\n\n .. note:: ``fmin`` sets the low end of the colormap, and is separate\n from thresh (this is a different convention from\n :meth:`surfer.Brain.add_overlay`).\n\n Parameters\n ----------\n array : numpy array, shape (n_vertices[, 3][, n_times])\n Data array. For the data to be understood as vector-valued\n (3 values per vertex corresponding to X/Y/Z surface RAS),\n then ``array`` must be have all 3 dimensions.\n If vectors with no time dimension are desired, consider using a\n singleton (e.g., ``np.newaxis``) to create a \"time\" dimension\n and pass ``time_label=None`` (vector values are not supported).\n %(fmin_fmid_fmax)s\n %(thresh)s\n %(center)s\n %(transparent)s\n colormap : str, list of color, or array\n Name of matplotlib colormap to use, a list of matplotlib colors,\n or a custom look up table (an n x 4 array coded with RBGA values\n between 0 and 255), the default \"auto\" chooses a default divergent\n colormap, if \"center\" is given (currently \"icefire\"), otherwise a\n default sequential colormap (currently \"rocket\").\n alpha : float in [0, 1]\n Alpha level to control opacity of the overlay.\n vertices : numpy array\n Vertices for which the data is defined (needed if\n ``len(data) < nvtx``).\n smoothing_steps : int or None\n Number of smoothing steps (smoothing is used if len(data) < nvtx)\n The value 'nearest' can be used too. None (default) will use as\n many as necessary to fill the surface.\n time : numpy array\n Time points in the data array (if data is 2D or 3D).\n %(time_label)s\n colorbar : bool\n Whether to add a colorbar to the figure. Can also be a tuple\n to give the (row, col) index of where to put the colorbar.\n hemi : str | None\n If None, it is assumed to belong to the hemisphere being\n shown. If two hemispheres are being shown, an error will\n be thrown.\n remove_existing : bool\n Not supported yet.\n Remove surface added by previous \"add_data\" call. Useful for\n conserving memory when displaying different data in a loop.\n time_label_size : int\n Font size of the time label (default 14).\n initial_time : float | None\n Time initially shown in the plot. ``None`` to use the first time\n sample (default).\n scale_factor : float | None (default)\n The scale factor to use when displaying glyphs for vector-valued\n data.\n vector_alpha : float | None\n Alpha level to control opacity of the arrows. Only used for\n vector-valued data. If None (default), ``alpha`` is used.\n clim : dict\n Original clim arguments.\n %(src_volume_options)s\n colorbar_kwargs : dict | None\n Options to pass to :meth:`pyvista.Plotter.add_scalar_bar`\n (e.g., ``dict(title_font_size=10)``).\n %(verbose)s\n\n Notes\n -----\n If the data is defined for a subset of vertices (specified\n by the \"vertices\" parameter), a smoothing method is used to interpolate\n the data onto the high resolution surface. If the data is defined for\n subsampled version of the surface, smoothing_steps can be set to None,\n in which case only as many smoothing steps are applied until the whole\n surface is filled with non-zeros.\n\n Due to a VTK alpha rendering bug, ``vector_alpha`` is\n clamped to be strictly < 1.\n \"\"\"\n _validate_type(transparent, bool, 'transparent')\n _validate_type(vector_alpha, ('numeric', None), 'vector_alpha')\n _validate_type(scale_factor, ('numeric', None), 'scale_factor')\n\n # those parameters are not supported yet, only None is allowed\n _check_option('thresh', thresh, [None])\n _check_option('remove_existing', remove_existing, [None])\n _validate_type(time_label_size, (None, 'numeric'), 'time_label_size')\n if time_label_size is not None:\n time_label_size = float(time_label_size)\n if time_label_size < 0:\n raise ValueError('time_label_size must be positive, got '\n f'{time_label_size}')\n\n hemi = self._check_hemi(hemi, extras=['vol'])\n stc, array, vertices = self._check_stc(hemi, array, vertices)\n array = np.asarray(array)\n vector_alpha = alpha if vector_alpha is None else vector_alpha\n self._data['vector_alpha'] = vector_alpha\n self._data['scale_factor'] = scale_factor\n\n # Create time array and add label if > 1D\n if array.ndim <= 1:\n time_idx = 0\n else:\n # check time array\n if time is None:\n time = np.arange(array.shape[-1])\n else:\n time = np.asarray(time)\n if time.shape != (array.shape[-1],):\n raise ValueError('time has shape %s, but need shape %s '\n '(array.shape[-1])' %\n (time.shape, (array.shape[-1],)))\n self._data[\"time\"] = time\n\n if self._n_times is None:\n self._times = time\n elif len(time) != self._n_times:\n raise ValueError(\"New n_times is different from previous \"\n \"n_times\")\n elif not np.array_equal(time, self._times):\n raise ValueError(\"Not all time values are consistent with \"\n \"previously set times.\")\n\n # initial time\n if initial_time is None:\n time_idx = 0\n else:\n time_idx = self._to_time_index(initial_time)\n\n # time label\n time_label, _ = _handle_time(time_label, 's', time)\n y_txt = 0.05 + 0.1 * bool(colorbar)\n\n if array.ndim == 3:\n if array.shape[1] != 3:\n raise ValueError('If array has 3 dimensions, array.shape[1] '\n 'must equal 3, got %s' % (array.shape[1],))\n fmin, fmid, fmax = _update_limits(\n fmin, fmid, fmax, center, array\n )\n if colormap == 'auto':\n colormap = 'mne' if center is not None else 'hot'\n\n if smoothing_steps is None:\n smoothing_steps = 7\n elif smoothing_steps == 'nearest':\n smoothing_steps = -1\n elif isinstance(smoothing_steps, int):\n if smoothing_steps < 0:\n raise ValueError('Expected value of `smoothing_steps` is'\n ' positive but {} was given.'.format(\n smoothing_steps))\n else:\n raise TypeError('Expected type of `smoothing_steps` is int or'\n ' NoneType but {} was given.'.format(\n type(smoothing_steps)))\n\n self._data['stc'] = stc\n self._data['src'] = src\n self._data['smoothing_steps'] = smoothing_steps\n self._data['clim'] = clim\n self._data['time'] = time\n self._data['initial_time'] = initial_time\n self._data['time_label'] = time_label\n self._data['initial_time_idx'] = time_idx\n self._data['time_idx'] = time_idx\n self._data['transparent'] = transparent\n # data specific for a hemi\n self._data[hemi] = dict()\n self._data[hemi]['glyph_dataset'] = None\n self._data[hemi]['glyph_mapper'] = None\n self._data[hemi]['glyph_actor'] = None\n self._data[hemi]['array'] = array\n self._data[hemi]['vertices'] = vertices\n self._data['alpha'] = alpha\n self._data['colormap'] = colormap\n self._data['center'] = center\n self._data['fmin'] = fmin\n self._data['fmid'] = fmid\n self._data['fmax'] = fmax\n self.update_lut()\n\n # 1) add the surfaces first\n actor = None\n for _ in self._iter_views(hemi):\n if hemi in ('lh', 'rh'):\n actor = self._layered_meshes[hemi]._actor\n else:\n src_vol = src[2:] if src.kind == 'mixed' else src\n actor, _ = self._add_volume_data(hemi, src_vol, volume_options)\n assert actor is not None # should have added one\n self._add_actor('data', actor)\n\n # 2) update time and smoothing properties\n # set_data_smoothing calls \"set_time_point\" for us, which will set\n # _current_time\n self.set_time_interpolation(self.time_interpolation)\n self.set_data_smoothing(self._data['smoothing_steps'])\n\n # 3) add the other actors\n if colorbar is True:\n # botto left by default\n colorbar = (self._subplot_shape[0] - 1, 0)\n for ri, ci, v in self._iter_views(hemi):\n # Add the time label to the bottommost view\n do = (ri, ci) == colorbar\n if not self._time_label_added and time_label is not None and do:\n time_actor = self._renderer.text2d(\n x_window=0.95, y_window=y_txt,\n color=self._fg_color,\n size=time_label_size,\n text=time_label(self._current_time),\n justification='right'\n )\n self._data['time_actor'] = time_actor\n self._time_label_added = True\n if colorbar and self._scalar_bar is None and do:\n kwargs = dict(source=actor, n_labels=8, color=self._fg_color,\n bgcolor=self._brain_color[:3])\n kwargs.update(colorbar_kwargs or {})\n self._scalar_bar = self._renderer.scalarbar(**kwargs)\n self._renderer.set_camera(\n update=False, reset_camera=False, **views_dicts[hemi][v])\n\n # 4) update the scalar bar and opacity\n self.update_lut(alpha=alpha)\n\n def remove_data(self):\n \"\"\"Remove rendered data from the mesh.\"\"\"\n self._remove('data', render=True)\n\n def _iter_views(self, hemi):\n \"\"\"Iterate over rows and columns that need to be added to.\"\"\"\n hemi_dict = dict(lh=[0], rh=[0], vol=[0])\n if self._hemi == 'split':\n hemi_dict.update(rh=[1], vol=[0, 1])\n for vi, view in enumerate(self._views):\n view_dict = dict(lh=[vi], rh=[vi], vol=[vi])\n if self._hemi == 'split':\n view_dict.update(vol=[vi, vi])\n if self._view_layout == 'vertical':\n rows, cols = view_dict, hemi_dict # views are rows, hemis cols\n else:\n rows, cols = hemi_dict, view_dict # hemis are rows, views cols\n for ri, ci in zip(rows[hemi], cols[hemi]):\n self._renderer.subplot(ri, ci)\n yield ri, ci, view\n\n def remove_labels(self):\n \"\"\"Remove all the ROI labels from the image.\"\"\"\n for hemi in self._hemis:\n mesh = self._layered_meshes[hemi]\n for label in self._labels[hemi]:\n mesh.remove_overlay(label.name)\n self._labels[hemi].clear()\n self._renderer._update()\n\n def remove_annotations(self):\n \"\"\"Remove all annotations from the image.\"\"\"\n for hemi in self._hemis:\n mesh = self._layered_meshes[hemi]\n mesh.remove_overlay(self._annots[hemi])\n self._annots[hemi].clear()\n self._renderer._update()\n\n def _add_volume_data(self, hemi, src, volume_options):\n from ..backends._pyvista import _hide_testing_actor\n _validate_type(src, SourceSpaces, 'src')\n _check_option('src.kind', src.kind, ('volume',))\n _validate_type(\n volume_options, (dict, 'numeric', None), 'volume_options')\n assert hemi == 'vol'\n if not isinstance(volume_options, dict):\n volume_options = dict(\n resolution=float(volume_options) if volume_options is not None\n else None)\n volume_options = _handle_default('volume_options', volume_options)\n allowed_types = (\n ['resolution', (None, 'numeric')],\n ['blending', (str,)],\n ['alpha', ('numeric', None)],\n ['surface_alpha', (None, 'numeric')],\n ['silhouette_alpha', (None, 'numeric')],\n ['silhouette_linewidth', ('numeric',)],\n )\n for key, types in allowed_types:\n _validate_type(volume_options[key], types,\n f'volume_options[{repr(key)}]')\n extra_keys = set(volume_options) - set(a[0] for a in allowed_types)\n if len(extra_keys):\n raise ValueError(\n f'volume_options got unknown keys {sorted(extra_keys)}')\n blending = _check_option('volume_options[\"blending\"]',\n volume_options['blending'],\n ('composite', 'mip'))\n alpha = volume_options['alpha']\n if alpha is None:\n alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.\n alpha = np.clip(float(alpha), 0., 1.)\n resolution = volume_options['resolution']\n surface_alpha = volume_options['surface_alpha']\n if surface_alpha is None:\n surface_alpha = min(alpha / 2., 0.1)\n silhouette_alpha = volume_options['silhouette_alpha']\n if silhouette_alpha is None:\n silhouette_alpha = surface_alpha / 4.\n silhouette_linewidth = volume_options['silhouette_linewidth']\n del volume_options\n volume_pos = self._data[hemi].get('grid_volume_pos')\n volume_neg = self._data[hemi].get('grid_volume_neg')\n center = self._data['center']\n if volume_pos is None:\n xyz = np.meshgrid(\n *[np.arange(s) for s in src[0]['shape']], indexing='ij')\n dimensions = np.array(src[0]['shape'], int)\n mult = 1000 if self._units == 'mm' else 1\n src_mri_t = src[0]['src_mri_t']['trans'].copy()\n src_mri_t[:3] *= mult\n if resolution is not None:\n resolution = resolution * mult / 1000. # to mm\n del src, mult\n coords = np.array([c.ravel(order='F') for c in xyz]).T\n coords = apply_trans(src_mri_t, coords)\n self.geo[hemi] = Bunch(coords=coords)\n vertices = self._data[hemi]['vertices']\n assert self._data[hemi]['array'].shape[0] == len(vertices)\n # MNE constructs the source space on a uniform grid in MRI space,\n # but mne coreg can change it to be non-uniform, so we need to\n # use all three elements here\n assert np.allclose(\n src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))\n spacing = np.diag(src_mri_t)[:3]\n origin = src_mri_t[:3, 3] - spacing / 2.\n scalars = np.zeros(np.prod(dimensions))\n scalars[vertices] = 1. # for the outer mesh\n grid, grid_mesh, volume_pos, volume_neg = \\\n self._renderer._volume(dimensions, origin, spacing, scalars,\n surface_alpha, resolution, blending,\n center)\n self._data[hemi]['alpha'] = alpha # incorrectly set earlier\n self._data[hemi]['grid'] = grid\n self._data[hemi]['grid_mesh'] = grid_mesh\n self._data[hemi]['grid_coords'] = coords\n self._data[hemi]['grid_src_mri_t'] = src_mri_t\n self._data[hemi]['grid_shape'] = dimensions\n self._data[hemi]['grid_volume_pos'] = volume_pos\n self._data[hemi]['grid_volume_neg'] = volume_neg\n actor_pos, _ = self._renderer.plotter.add_actor(\n volume_pos, reset_camera=False, name=None, culling=False,\n render=False)\n actor_neg = actor_mesh = None\n if volume_neg is not None:\n actor_neg, _ = self._renderer.plotter.add_actor(\n volume_neg, reset_camera=False, name=None, culling=False,\n render=False)\n grid_mesh = self._data[hemi]['grid_mesh']\n if grid_mesh is not None:\n actor_mesh, prop = self._renderer.plotter.add_actor(\n grid_mesh, reset_camera=False, name=None, culling=False,\n pickable=False, render=False)\n prop.SetColor(*self._brain_color[:3])\n prop.SetOpacity(surface_alpha)\n if silhouette_alpha > 0 and silhouette_linewidth > 0:\n for _ in self._iter_views('vol'):\n self._renderer._silhouette(\n mesh=grid_mesh.GetInput(),\n color=self._brain_color[:3],\n line_width=silhouette_linewidth,\n alpha=silhouette_alpha,\n )\n for actor in (actor_pos, actor_neg, actor_mesh):\n if actor is not None:\n _hide_testing_actor(actor)\n\n return actor_pos, actor_neg\n\n def add_label(self, label, color=None, alpha=1, scalar_thresh=None,\n borders=False, hemi=None, subdir=None,\n reset_camera=True):\n \"\"\"Add an ROI label to the image.\n\n Parameters\n ----------\n label : str | instance of Label\n Label filepath or name. Can also be an instance of\n an object with attributes \"hemi\", \"vertices\", \"name\", and\n optionally \"color\" and \"values\" (if scalar_thresh is not None).\n color : matplotlib-style color | None\n Anything matplotlib accepts: string, RGB, hex, etc. (default\n \"crimson\").\n alpha : float in [0, 1]\n Alpha level to control opacity.\n scalar_thresh : None | float\n Threshold the label ids using this value in the label\n file's scalar field (i.e. label only vertices with\n scalar >= thresh).\n borders : bool | int\n Show only label borders. If int, specify the number of steps\n (away from the true border) along the cortical mesh to include\n as part of the border definition.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown.\n subdir : None | str\n If a label is specified as name, subdir can be used to indicate\n that the label file is in a sub-directory of the subject's\n label directory rather than in the label directory itself (e.g.\n for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``\n ``brain.add_label('cuneus', subdir='aparc')``).\n reset_camera : bool\n If True, reset the camera view after adding the label. Defaults\n to True.\n\n Notes\n -----\n To remove previously added labels, run Brain.remove_labels().\n \"\"\"\n from ...label import read_label\n if isinstance(label, str):\n if color is None:\n color = \"crimson\"\n\n if os.path.isfile(label):\n filepath = label\n label = read_label(filepath)\n hemi = label.hemi\n label_name = os.path.basename(filepath).split('.')[1]\n else:\n hemi = self._check_hemi(hemi)\n label_name = label\n label_fname = \".\".join([hemi, label_name, 'label'])\n if subdir is None:\n filepath = op.join(self._subjects_dir, self._subject_id,\n 'label', label_fname)\n else:\n filepath = op.join(self._subjects_dir, self._subject_id,\n 'label', subdir, label_fname)\n if not os.path.exists(filepath):\n raise ValueError('Label file %s does not exist'\n % filepath)\n label = read_label(filepath)\n ids = label.vertices\n scalars = label.values\n else:\n # try to extract parameters from label instance\n try:\n hemi = label.hemi\n ids = label.vertices\n if label.name is None:\n label.name = 'unnamed' + str(self._unnamed_label_id)\n self._unnamed_label_id += 1\n label_name = str(label.name)\n\n if color is None:\n if hasattr(label, 'color') and label.color is not None:\n color = label.color\n else:\n color = \"crimson\"\n\n if scalar_thresh is not None:\n scalars = label.values\n except Exception:\n raise ValueError('Label was not a filename (str), and could '\n 'not be understood as a class. The class '\n 'must have attributes \"hemi\", \"vertices\", '\n '\"name\", and (if scalar_thresh is not None)'\n '\"values\"')\n hemi = self._check_hemi(hemi)\n\n if scalar_thresh is not None:\n ids = ids[scalars >= scalar_thresh]\n\n if self.time_viewer and self.show_traces \\\n and self.traces_mode == 'label':\n stc = self._data[\"stc\"]\n src = self._data[\"src\"]\n tc = stc.extract_label_time_course(label, src=src,\n mode=self.label_extract_mode)\n tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]\n color = next(self.color_cycle)\n line = self.mpl_canvas.plot(\n self._data['time'], tc, label=label_name,\n color=color)\n else:\n line = None\n\n orig_color = color\n color = _to_rgb(color, alpha, alpha=True)\n cmap = np.array([(0, 0, 0, 0,), color])\n ctable = np.round(cmap * 255).astype(np.uint8)\n\n scalars = np.zeros(self.geo[hemi].coords.shape[0])\n scalars[ids] = 1\n if borders:\n keep_idx = _mesh_borders(self.geo[hemi].faces, scalars)\n show = np.zeros(scalars.size, dtype=np.int64)\n if isinstance(borders, int):\n for _ in range(borders):\n keep_idx = np.in1d(\n self.geo[hemi].faces.ravel(), keep_idx)\n keep_idx.shape = self.geo[hemi].faces.shape\n keep_idx = self.geo[hemi].faces[np.any(\n keep_idx, axis=1)]\n keep_idx = np.unique(keep_idx)\n show[keep_idx] = 1\n scalars *= show\n for _, _, v in self._iter_views(hemi):\n mesh = self._layered_meshes[hemi]\n mesh.add_overlay(\n scalars=scalars,\n colormap=ctable,\n rng=[np.min(scalars), np.max(scalars)],\n opacity=alpha,\n name=label_name,\n )\n if reset_camera:\n self._renderer.set_camera(update=False, **views_dicts[hemi][v])\n if self.time_viewer and self.show_traces \\\n and self.traces_mode == 'label':\n label._color = orig_color\n label._line = line\n self._labels[hemi].append(label)\n self._renderer._update()\n\n @fill_doc\n def add_head(self, dense=True, color='gray', alpha=0.5):\n \"\"\"Add a mesh to render the outer head surface.\n\n Parameters\n ----------\n dense : bool\n Whether to plot the dense head (``seghead``) or the less dense head\n (``head``).\n color : color\n A list of anything matplotlib accepts: string, RGB, hex, etc.\n alpha : float in [0, 1]\n Alpha level to control opacity.\n\n Notes\n -----\n .. versionadded:: 0.24\n \"\"\"\n # load head\n surf = _get_head_surface('seghead' if dense else 'head',\n self._subject_id, self._subjects_dir)\n verts, triangles = surf['rr'], surf['tris']\n verts *= 1e3 if self._units == 'mm' else 1\n color = _to_rgb(color)\n\n for _ in self._iter_views('vol'):\n actor, _ = self._renderer.mesh(\n *verts.T, triangles=triangles, color=color,\n opacity=alpha, reset_camera=False, render=False)\n self._add_actor('head', actor)\n\n self._renderer._update()\n\n def remove_head(self):\n \"\"\"Remove head objects from the rendered scene.\"\"\"\n self._remove('head', render=True)\n\n @fill_doc\n def add_skull(self, outer=True, color='gray', alpha=0.5):\n \"\"\"Add a mesh to render the skull surface.\n\n Parameters\n ----------\n outer : bool\n Adds the outer skull if ``True``, otherwise adds the inner skull.\n color : color\n A list of anything matplotlib accepts: string, RGB, hex, etc.\n alpha : float in [0, 1]\n Alpha level to control opacity.\n\n Notes\n -----\n .. versionadded:: 0.24\n \"\"\"\n surf = _get_skull_surface('outer' if outer else 'inner',\n self._subject_id, self._subjects_dir)\n verts, triangles = surf['rr'], surf['tris']\n verts *= 1e3 if self._units == 'mm' else 1\n color = _to_rgb(color)\n\n for _ in self._iter_views('vol'):\n actor, _ = self._renderer.mesh(\n *verts.T, triangles=triangles, color=color,\n opacity=alpha, reset_camera=False, render=False)\n self._add_actor('skull', actor)\n\n self._renderer._update()\n\n def remove_skull(self):\n \"\"\"Remove skull objects from the rendered scene.\"\"\"\n self._remove('skull', render=True)\n\n @fill_doc\n def add_volume_labels(self, aseg='aparc+aseg', labels=None, colors=None,\n alpha=0.5, smooth=0.9, fill_hole_size=None,\n legend=None):\n \"\"\"Add labels to the rendering from an anatomical segmentation.\n\n Parameters\n ----------\n %(aseg)s\n labels : list\n Labeled regions of interest to plot. See\n :func:`mne.get_montage_volume_labels`\n for one way to determine regions of interest. Regions can also be\n chosen from the :term:`FreeSurfer LUT`.\n colors : list | matplotlib-style color | None\n A list of anything matplotlib accepts: string, RGB, hex, etc.\n (default :term:`FreeSurfer LUT` colors).\n alpha : float in [0, 1]\n Alpha level to control opacity.\n %(smooth)s\n fill_hole_size : int | None\n The size of holes to remove in the mesh in voxels. Default is None,\n no holes are removed. Warning, this dilates the boundaries of the\n surface by ``fill_hole_size`` number of voxels so use the minimal\n size.\n legend : bool | None | dict\n Add a legend displaying the names of the ``labels``. Default (None)\n is ``True`` if the number of ``labels`` is 10 or fewer.\n Can also be a dict of ``kwargs`` to pass to\n :meth:`pyvista.Plotter.add_legend`.\n\n Notes\n -----\n .. versionadded:: 0.24\n \"\"\"\n import nibabel as nib\n\n # load anatomical segmentation image\n if not aseg.endswith('aseg'):\n raise RuntimeError(\n f'`aseg` file path must end with \"aseg\", got {aseg}')\n aseg = _check_fname(op.join(self._subjects_dir, self._subject_id,\n 'mri', aseg + '.mgz'),\n overwrite='read', must_exist=True)\n aseg_fname = aseg\n aseg = nib.load(aseg_fname)\n aseg_data = np.asarray(aseg.dataobj)\n vox_mri_t = aseg.header.get_vox2ras_tkr()\n mult = 1e-3 if self._units == 'm' else 1\n vox_mri_t[:3] *= mult\n del aseg\n\n # read freesurfer lookup table\n lut, fs_colors = read_freesurfer_lut()\n if labels is None: # assign default ROI labels based on indices\n lut_r = {v: k for k, v in lut.items()}\n labels = [lut_r[idx] for idx in DEFAULTS['volume_label_indices']]\n\n _validate_type(fill_hole_size, (int, None), 'fill_hole_size')\n _validate_type(legend, (bool, None), 'legend')\n if legend is None:\n legend = len(labels) < 11\n\n if colors is None:\n colors = [fs_colors[label] / 255 for label in labels]\n elif not isinstance(colors, (list, tuple)):\n colors = [colors] * len(labels) # make into list\n colors = [_to_rgb(color, name=f'colors[{ci}]')\n for ci, color in enumerate(colors)]\n surfs = _marching_cubes(\n aseg_data, [lut[label] for label in labels], smooth=smooth,\n fill_hole_size=fill_hole_size)\n for label, color, (verts, triangles) in zip(labels, colors, surfs):\n if len(verts) == 0: # not in aseg vals\n warn(f'Value {lut[label]} not found for label '\n f'{repr(label)} in: {aseg_fname}')\n continue\n verts = apply_trans(vox_mri_t, verts)\n for _ in self._iter_views('vol'):\n actor, _ = self._renderer.mesh(\n *verts.T, triangles=triangles, color=color,\n opacity=alpha, reset_camera=False, render=False)\n self._add_actor('volume_labels', actor)\n\n if legend or isinstance(legend, dict):\n # use empty kwargs for legend = True\n legend = legend if isinstance(legend, dict) else dict()\n self._renderer.plotter.add_legend(\n list(zip(labels, colors)), **legend)\n\n self._renderer._update()\n\n def remove_volume_labels(self):\n \"\"\"Remove the volume labels from the rendered scene.\"\"\"\n self._remove('volume_labels', render=True)\n self._renderer.plotter.remove_legend()\n\n def add_foci(self, coords, coords_as_verts=False, map_surface=None,\n scale_factor=1, color=\"white\", alpha=1, name=None,\n hemi=None, resolution=50):\n \"\"\"Add spherical foci, possibly mapping to displayed surf.\n\n The foci spheres can be displayed at the coordinates given, or\n mapped through a surface geometry. In other words, coordinates\n from a volume-based analysis in MNI space can be displayed on an\n inflated average surface by finding the closest vertex on the\n white surface and mapping to that vertex on the inflated mesh.\n\n Parameters\n ----------\n coords : ndarray, shape (n_coords, 3)\n Coordinates in stereotaxic space (default) or array of\n vertex ids (with ``coord_as_verts=True``).\n coords_as_verts : bool\n Whether the coords parameter should be interpreted as vertex ids.\n map_surface : None\n Surface to map coordinates through, or None to use raw coords.\n scale_factor : float\n Controls the size of the foci spheres (relative to 1cm).\n color : matplotlib color code\n HTML name, RBG tuple, or hex code.\n alpha : float in [0, 1]\n Opacity of focus gylphs.\n name : str\n Internal name to use.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown. If two hemispheres are being shown, an error will\n be thrown.\n resolution : int\n The resolution of the spheres.\n \"\"\"\n hemi = self._check_hemi(hemi, extras=['vol'])\n\n # those parameters are not supported yet, only None is allowed\n _check_option('map_surface', map_surface, [None])\n\n # Figure out how to interpret the first parameter\n if coords_as_verts:\n coords = self.geo[hemi].coords[coords]\n\n # Convert the color code\n color = _to_rgb(color)\n\n if self._units == 'm':\n scale_factor = scale_factor / 1000.\n for _, _, v in self._iter_views(hemi):\n self._renderer.sphere(center=coords, color=color,\n scale=(10. * scale_factor),\n opacity=alpha, resolution=resolution)\n self._renderer.set_camera(**views_dicts[hemi][v])\n\n @verbose\n def add_sensors(self, info, trans, meg=None, eeg='original', fnirs=True,\n ecog=True, seeg=True, dbs=True, verbose=None):\n \"\"\"Add mesh objects to represent sensor positions.\n\n Parameters\n ----------\n %(info_not_none)s\n %(trans_not_none)s\n %(meg)s\n %(eeg)s\n %(fnirs)s\n %(ecog)s\n %(seeg)s\n %(dbs)s\n %(verbose)s\n\n Notes\n -----\n .. versionadded:: 0.24\n \"\"\"\n _validate_type(info, Info, 'info')\n meg, eeg, fnirs, warn_meg = _handle_sensor_types(meg, eeg, fnirs)\n picks = pick_types(info, meg=('sensors' in meg),\n ref_meg=('ref' in meg), eeg=(len(eeg) > 0),\n ecog=ecog, seeg=seeg, dbs=dbs,\n fnirs=(len(fnirs) > 0))\n head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0]\n del trans\n # get transforms to \"mri\"window\n to_cf_t = _get_transforms_to_coord_frame(\n info, head_mri_t, coord_frame='mri')\n if pick_types(info, eeg=True, exclude=()).size > 0 and \\\n 'projected' in eeg:\n head_surf = _get_head_surface(\n 'seghead', self._subject_id, self._subjects_dir)\n else:\n head_surf = None\n # Do the main plotting\n for _ in self._iter_views('vol'):\n if picks.size > 0:\n sensors_actors = _plot_sensors(\n self._renderer, info, to_cf_t, picks, meg, eeg,\n fnirs, warn_meg, head_surf, self._units)\n for item, actors in sensors_actors.items():\n for actor in actors:\n self._add_actor(item, actor)\n\n if 'helmet' in meg and pick_types(info, meg=True).size > 0:\n surf = get_meg_helmet_surf(info, head_mri_t)\n verts = surf['rr'] * (1 if self._units == 'm' else 1e3)\n actor, _ = self._renderer.mesh(\n *verts.T, surf['tris'],\n color=DEFAULTS['coreg']['helmet_color'],\n opacity=0.25, reset_camera=False, render=False)\n self._add_actor('helmet', actor)\n\n self._renderer._update()\n\n def remove_sensors(self, kind=None):\n \"\"\"Remove sensors from the rendered scene.\n\n Parameters\n ----------\n kind : str | list | None\n If None, removes all sensor-related data including the helmet.\n Can be \"meg\", \"eeg\", \"fnirs\", \"ecog\", \"seeg\", \"dbs\" or \"helmet\"\n to remove that item.\n \"\"\"\n all_kinds = ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet')\n if kind is None:\n for item in all_kinds:\n self._remove(item, render=False)\n else:\n if isinstance(kind, str):\n kind = [kind]\n for this_kind in kind:\n _check_option('kind', this_kind, all_kinds)\n self._remove(this_kind, render=False)\n self._renderer._update()\n\n def add_text(self, x, y, text, name=None, color=None, opacity=1.0,\n row=0, col=0, font_size=None, justification=None):\n \"\"\"Add a text to the visualization.\n\n Parameters\n ----------\n x : float\n X coordinate.\n y : float\n Y coordinate.\n text : str\n Text to add.\n name : str\n Name of the text (text label can be updated using update_text()).\n color : tuple\n Color of the text. Default is the foreground color set during\n initialization (default is black or white depending on the\n background color).\n opacity : float\n Opacity of the text (default 1.0).\n row : int | None\n Row index of which brain to use. Default is the top row.\n col : int | None\n Column index of which brain to use. Default is the left-most\n column.\n font_size : float | None\n The font size to use.\n justification : str | None\n The text justification.\n \"\"\"\n _validate_type(name, (str, None), 'name')\n name = text if name is None else name\n if 'text' in self._actors and name in self._actors['text']:\n raise ValueError(f'Text with the name {name} already exists')\n for ri, ci, _ in self._iter_views('vol'):\n if (row is None or row == ri) and (col is None or col == ci):\n actor = self._renderer.text2d(\n x_window=x, y_window=y, text=text, color=color,\n size=font_size, justification=justification)\n if 'text' not in self._actors:\n self._actors['text'] = dict()\n self._actors['text'][name] = actor\n\n def remove_text(self, name=None):\n \"\"\"Remove text from the rendered scene.\n\n Parameters\n ----------\n name : str | None\n Remove specific text by name. If None, all text will be removed.\n \"\"\"\n _validate_type(name, (str, None), 'name')\n if name is None:\n for actor in self._actors['text'].values():\n self._renderer.plotter.remove_actor(actor)\n self._actors.pop('text')\n else:\n names = [None]\n if 'text' in self._actors:\n names += list(self._actors['text'].keys())\n _check_option('name', name, names)\n self._renderer.plotter.remove_actor(\n self._actors['text'][name])\n self._actors['text'].pop(name)\n self._renderer._update()\n\n def _configure_label_time_course(self):\n from ...label import read_labels_from_annot\n if not self.show_traces:\n return\n if self.mpl_canvas is None:\n self._configure_mplcanvas()\n else:\n self.clear_glyphs()\n self.traces_mode = 'label'\n self.add_annotation(self.annot, color=\"w\", alpha=0.75)\n\n # now plot the time line\n self.plot_time_line(update=False)\n self.mpl_canvas.update_plot()\n\n for hemi in self._hemis:\n labels = read_labels_from_annot(\n subject=self._subject_id,\n parc=self.annot,\n hemi=hemi,\n subjects_dir=self._subjects_dir\n )\n self._vertex_to_label_id[hemi] = np.full(\n self.geo[hemi].coords.shape[0], -1)\n self._annotation_labels[hemi] = labels\n for idx, label in enumerate(labels):\n self._vertex_to_label_id[hemi][label.vertices] = idx\n\n def add_annotation(self, annot, borders=True, alpha=1, hemi=None,\n remove_existing=True, color=None):\n \"\"\"Add an annotation file.\n\n Parameters\n ----------\n annot : str | tuple\n Either path to annotation file or annotation name. Alternatively,\n the annotation can be specified as a ``(labels, ctab)`` tuple per\n hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere\n or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both\n hemispheres. ``labels`` and ``ctab`` should be arrays as returned\n by :func:`nibabel.freesurfer.io.read_annot`.\n borders : bool | int\n Show only label borders. If int, specify the number of steps\n (away from the true border) along the cortical mesh to include\n as part of the border definition.\n alpha : float\n Opacity of the head surface. Must be between 0 and 1 (inclusive).\n Default is 0.5.\n hemi : str | None\n If None, it is assumed to belong to the hemipshere being\n shown. If two hemispheres are being shown, data must exist\n for both hemispheres.\n remove_existing : bool\n If True (default), remove old annotations.\n color : matplotlib-style color code\n If used, show all annotations in the same (specified) color.\n Probably useful only when showing annotation borders.\n \"\"\"\n from ...label import _read_annot\n hemis = self._check_hemis(hemi)\n\n # Figure out where the data is coming from\n if isinstance(annot, str):\n if os.path.isfile(annot):\n filepath = annot\n path = os.path.split(filepath)[0]\n file_hemi, annot = os.path.basename(filepath).split('.')[:2]\n if len(hemis) > 1:\n if annot[:2] == 'lh.':\n filepaths = [filepath, op.join(path, 'rh' + annot[2:])]\n elif annot[:2] == 'rh.':\n filepaths = [op.join(path, 'lh' + annot[2:], filepath)]\n else:\n raise RuntimeError('To add both hemispheres '\n 'simultaneously, filename must '\n 'begin with \"lh.\" or \"rh.\"')\n else:\n filepaths = [filepath]\n else:\n filepaths = []\n for hemi in hemis:\n filepath = op.join(self._subjects_dir,\n self._subject_id,\n 'label',\n \".\".join([hemi, annot, 'annot']))\n if not os.path.exists(filepath):\n raise ValueError('Annotation file %s does not exist'\n % filepath)\n filepaths += [filepath]\n annots = []\n for hemi, filepath in zip(hemis, filepaths):\n # Read in the data\n labels, cmap, _ = _read_annot(filepath)\n annots.append((labels, cmap))\n else:\n annots = [annot] if len(hemis) == 1 else annot\n annot = 'annotation'\n\n for hemi, (labels, cmap) in zip(hemis, annots):\n # Maybe zero-out the non-border vertices\n self._to_borders(labels, hemi, borders)\n\n # Handle null labels properly\n cmap[:, 3] = 255\n bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)\n bgcolor[-1] = 0\n cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive\n cmap[cmap[:, 4] <= 0, :4] = bgcolor\n if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):\n cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))\n\n # Set label ids sensibly\n order = np.argsort(cmap[:, -1])\n cmap = cmap[order]\n ids = np.searchsorted(cmap[:, -1], labels)\n cmap = cmap[:, :4]\n\n # Set the alpha level\n alpha_vec = cmap[:, 3]\n alpha_vec[alpha_vec > 0] = alpha * 255\n\n # Override the cmap when a single color is used\n if color is not None:\n rgb = np.round(np.multiply(_to_rgb(color), 255))\n cmap[:, :3] = rgb.astype(cmap.dtype)\n\n ctable = cmap.astype(np.float64)\n for _ in self._iter_views(hemi):\n mesh = self._layered_meshes[hemi]\n mesh.add_overlay(\n scalars=ids,\n colormap=ctable,\n rng=[np.min(ids), np.max(ids)],\n opacity=alpha,\n name=annot,\n )\n self._annots[hemi].append(annot)\n if not self.time_viewer or self.traces_mode == 'vertex':\n self._renderer._set_colormap_range(\n mesh._actor, cmap.astype(np.uint8), None)\n\n self._renderer._update()\n\n def close(self):\n \"\"\"Close all figures and cleanup data structure.\"\"\"\n self._closed = True\n self._renderer.close()\n\n def show(self):\n \"\"\"Display the window.\"\"\"\n self._renderer.show()\n\n @fill_doc\n def show_view(self, view=None, roll=None, distance=None, *,\n row=None, col=None, hemi=None, align=True,\n azimuth=None, elevation=None, focalpoint=None):\n \"\"\"Orient camera to display view.\n\n Parameters\n ----------\n %(view)s\n %(roll)s\n %(distance)s\n row : int | None\n The row to set. Default all rows.\n col : int | None\n The column to set. Default all columns.\n hemi : str | None\n Which hemi to use for view lookup (when in \"both\" mode).\n align : bool\n If True, consider view arguments relative to canonical MRI\n directions (closest to MNI for the subject) rather than native MRI\n space. This helps when MRIs are not in standard orientation (e.g.,\n have large rotations).\n %(azimuth)s\n %(elevation)s\n %(focalpoint)s\n \"\"\"\n _validate_type(row, ('int-like', None), 'row')\n _validate_type(col, ('int-like', None), 'col')\n hemi = self._hemi if hemi is None else hemi\n if hemi == 'split':\n if (self._view_layout == 'vertical' and col == 1 or\n self._view_layout == 'horizontal' and row == 1):\n hemi = 'rh'\n else:\n hemi = 'lh'\n _validate_type(view, (str, None), 'view')\n view_params = dict(azimuth=azimuth, elevation=elevation, roll=roll,\n distance=distance, focalpoint=focalpoint)\n if view is not None: # view_params take precedence\n view_params = {param: val for param, val in view_params.items()\n if val is not None} # no overwriting with None\n view_params = dict(views_dicts[hemi].get(view), **view_params)\n xfm = self._rigid if align else None\n for h in self._hemis:\n for ri, ci, _ in self._iter_views(h):\n if (row is None or row == ri) and (col is None or col == ci):\n self._renderer.set_camera(\n **view_params, reset_camera=False, rigid=xfm)\n self._renderer._update()\n\n def reset_view(self):\n \"\"\"Reset the camera.\"\"\"\n for h in self._hemis:\n for _, _, v in self._iter_views(h):\n self._renderer.set_camera(**views_dicts[h][v],\n reset_camera=False)\n\n def save_image(self, filename=None, mode='rgb'):\n \"\"\"Save view from all panels to disk.\n\n Parameters\n ----------\n filename : str\n Path to new image file.\n mode : str\n Either 'rgb' or 'rgba' for values to return.\n \"\"\"\n if filename is None:\n filename = _generate_default_filename(\".png\")\n _save_ndarray_img(\n filename, self.screenshot(mode=mode, time_viewer=True))\n\n @fill_doc\n def screenshot(self, mode='rgb', time_viewer=False):\n \"\"\"Generate a screenshot of current view.\n\n Parameters\n ----------\n mode : str\n Either 'rgb' or 'rgba' for values to return.\n %(brain_screenshot_time_viewer)s\n\n Returns\n -------\n screenshot : array\n Image pixel values.\n \"\"\"\n n_channels = 3 if mode == 'rgb' else 4\n img = self._renderer.screenshot(mode)\n logger.debug(f'Got screenshot of size {img.shape}')\n if time_viewer and self.time_viewer and \\\n self.show_traces and \\\n not self.separate_canvas:\n from matplotlib.image import imread\n canvas = self.mpl_canvas.fig.canvas\n canvas.draw_idle()\n fig = self.mpl_canvas.fig\n with BytesIO() as output:\n # Need to pass dpi here so it uses the physical (HiDPI) DPI\n # rather than logical DPI when saving in most cases.\n # But when matplotlib uses HiDPI and VTK doesn't\n # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,\n # so let's just calculate the DPI we need to get\n # the correct size output based on the widths being equal\n size_in = fig.get_size_inches()\n dpi = fig.get_dpi()\n want_size = tuple(x * dpi for x in size_in)\n n_pix = want_size[0] * want_size[1]\n logger.debug(\n f'Saving figure of size {size_in} @ {dpi} DPI '\n f'({want_size} = {n_pix} pixels)')\n # Sometimes there can be off-by-one errors here (e.g.,\n # if in mpl int() rather than int(round()) is used to\n # compute the number of pixels) so rather than use \"raw\"\n # format and try to reshape ourselves, just write to PNG\n # and read it, which has the dimensions encoded for us.\n fig.savefig(output, dpi=dpi, format='png',\n facecolor=self._bg_color, edgecolor='none')\n output.seek(0)\n trace_img = imread(output, format='png')[:, :, :n_channels]\n trace_img = np.clip(\n np.round(trace_img * 255), 0, 255).astype(np.uint8)\n bgcolor = np.array(self._brain_color[:n_channels]) / 255\n img = concatenate_images([img, trace_img], bgcolor=bgcolor,\n n_channels=n_channels)\n return img\n\n @contextlib.contextmanager\n def _no_lut_update(self, why):\n orig = self._lut_locked\n self._lut_locked = why\n try:\n yield\n finally:\n self._lut_locked = orig\n\n @fill_doc\n def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):\n \"\"\"Update color map.\n\n Parameters\n ----------\n %(fmin_fmid_fmax)s\n alpha : float | None\n Alpha to use in the update.\n \"\"\"\n args = f'{fmin}, {fmid}, {fmax}, {alpha}'\n if self._lut_locked is not None:\n logger.debug(f'LUT update postponed with {args}')\n return\n logger.debug(f'Updating LUT with {args}')\n center = self._data['center']\n colormap = self._data['colormap']\n transparent = self._data['transparent']\n lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}\n _update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)\n assert all(val is not None for val in lims.values())\n\n self._data.update(lims)\n self._data['ctable'] = np.round(\n calculate_lut(colormap, alpha=1., center=center,\n transparent=transparent, **lims) *\n 255).astype(np.uint8)\n # update our values\n rng = self._cmap_range\n ctable = self._data['ctable']\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n if hemi in self._layered_meshes:\n mesh = self._layered_meshes[hemi]\n mesh.update_overlay(name='data',\n colormap=self._data['ctable'],\n opacity=alpha,\n rng=rng)\n self._renderer._set_colormap_range(\n mesh._actor, ctable, self._scalar_bar, rng,\n self._brain_color)\n\n grid_volume_pos = hemi_data.get('grid_volume_pos')\n grid_volume_neg = hemi_data.get('grid_volume_neg')\n for grid_volume in (grid_volume_pos, grid_volume_neg):\n if grid_volume is not None:\n self._renderer._set_volume_range(\n grid_volume, ctable, hemi_data['alpha'],\n self._scalar_bar, rng)\n\n glyph_actor = hemi_data.get('glyph_actor')\n if glyph_actor is not None:\n for glyph_actor_ in glyph_actor:\n self._renderer._set_colormap_range(\n glyph_actor_, ctable, self._scalar_bar, rng)\n if self.time_viewer:\n with self._no_lut_update(f'update_lut {args}'):\n for key in ('fmin', 'fmid', 'fmax'):\n self.callbacks[key](lims[key])\n self._renderer._update()\n\n def set_data_smoothing(self, n_steps):\n \"\"\"Set the number of smoothing steps.\n\n Parameters\n ----------\n n_steps : int\n Number of smoothing steps.\n \"\"\"\n from ...morph import _hemi_morph\n for hemi in ['lh', 'rh']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:\n continue\n vertices = hemi_data['vertices']\n if vertices is None:\n raise ValueError(\n 'len(data) < nvtx (%s < %s): the vertices '\n 'parameter must not be None'\n % (len(hemi_data), self.geo[hemi].x.shape[0]))\n morph_n_steps = 'nearest' if n_steps == -1 else n_steps\n with use_log_level(False):\n smooth_mat = _hemi_morph(\n self.geo[hemi].orig_faces,\n np.arange(len(self.geo[hemi].coords)),\n vertices, morph_n_steps, maps=None, warn=False)\n self._data[hemi]['smooth_mat'] = smooth_mat\n self.set_time_point(self._data['time_idx'])\n self._data['smoothing_steps'] = n_steps\n\n @property\n def _n_times(self):\n return len(self._times) if self._times is not None else None\n\n @property\n def time_interpolation(self):\n \"\"\"The interpolation mode.\"\"\"\n return self._time_interpolation\n\n @fill_doc\n def set_time_interpolation(self, interpolation):\n \"\"\"Set the interpolation mode.\n\n Parameters\n ----------\n %(brain_time_interpolation)s\n \"\"\"\n self._time_interpolation = _check_option(\n 'interpolation',\n interpolation,\n ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')\n )\n self._time_interp_funcs = dict()\n self._time_interp_inv = None\n if self._times is not None:\n idx = np.arange(self._n_times)\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n array = hemi_data['array']\n self._time_interp_funcs[hemi] = _safe_interp1d(\n idx, array, self._time_interpolation, axis=-1,\n assume_sorted=True)\n self._time_interp_inv = _safe_interp1d(idx, self._times)\n\n def set_time_point(self, time_idx):\n \"\"\"Set the time point shown (can be a float to interpolate).\n\n Parameters\n ----------\n time_idx : int | float\n The time index to use. Can be a float to use interpolation\n between indices.\n \"\"\"\n self._current_act_data = dict()\n time_actor = self._data.get('time_actor', None)\n time_label = self._data.get('time_label', None)\n for hemi in ['lh', 'rh', 'vol']:\n hemi_data = self._data.get(hemi)\n if hemi_data is not None:\n array = hemi_data['array']\n # interpolate in time\n vectors = None\n if array.ndim == 1:\n act_data = array\n self._current_time = 0\n else:\n act_data = self._time_interp_funcs[hemi](time_idx)\n self._current_time = self._time_interp_inv(time_idx)\n if array.ndim == 3:\n vectors = act_data\n act_data = np.linalg.norm(act_data, axis=1)\n self._current_time = self._time_interp_inv(time_idx)\n self._current_act_data[hemi] = act_data\n if time_actor is not None and time_label is not None:\n time_actor.SetInput(time_label(self._current_time))\n\n # update the volume interpolation\n grid = hemi_data.get('grid')\n if grid is not None:\n vertices = self._data['vol']['vertices']\n values = self._current_act_data['vol']\n rng = self._cmap_range\n fill = 0 if self._data['center'] is not None else rng[0]\n _cell_data(grid)['values'].fill(fill)\n # XXX for sided data, we probably actually need two\n # volumes as composite/MIP needs to look at two\n # extremes... for now just use abs. Eventually we can add\n # two volumes if we want.\n _cell_data(grid)['values'][vertices] = values\n\n # interpolate in space\n smooth_mat = hemi_data.get('smooth_mat')\n if smooth_mat is not None:\n act_data = smooth_mat.dot(act_data)\n\n # update the mesh scalar values\n if hemi in self._layered_meshes:\n mesh = self._layered_meshes[hemi]\n if 'data' in mesh._overlays:\n mesh.update_overlay(name='data', scalars=act_data)\n else:\n mesh.add_overlay(\n scalars=act_data,\n colormap=self._data['ctable'],\n rng=self._cmap_range,\n opacity=None,\n name='data',\n )\n\n # update the glyphs\n if vectors is not None:\n self._update_glyphs(hemi, vectors)\n\n self._data['time_idx'] = time_idx\n self._renderer._update()\n\n def set_time(self, time):\n \"\"\"Set the time to display (in seconds).\n\n Parameters\n ----------\n time : float\n The time to show, in seconds.\n \"\"\"\n if self._times is None:\n raise ValueError(\n 'Cannot set time when brain has no defined times.')\n elif min(self._times) <= time <= max(self._times):\n self.set_time_point(np.interp(float(time), self._times,\n np.arange(self._n_times)))\n else:\n raise ValueError(\n f'Requested time ({time} s) is outside the range of '\n f'available times ({min(self._times)}-{max(self._times)} s).')\n\n def _update_glyphs(self, hemi, vectors):\n hemi_data = self._data.get(hemi)\n assert hemi_data is not None\n vertices = hemi_data['vertices']\n vector_alpha = self._data['vector_alpha']\n scale_factor = self._data['scale_factor']\n vertices = slice(None) if vertices is None else vertices\n x, y, z = np.array(self.geo[hemi].coords)[vertices].T\n\n if hemi_data['glyph_actor'] is None:\n add = True\n hemi_data['glyph_actor'] = list()\n else:\n add = False\n count = 0\n for _ in self._iter_views(hemi):\n if hemi_data['glyph_dataset'] is None:\n glyph_mapper, glyph_dataset = self._renderer.quiver3d(\n x, y, z,\n vectors[:, 0], vectors[:, 1], vectors[:, 2],\n color=None,\n mode='2darrow',\n scale_mode='vector',\n scale=scale_factor,\n opacity=vector_alpha,\n name=str(hemi) + \"_glyph\"\n )\n hemi_data['glyph_dataset'] = glyph_dataset\n hemi_data['glyph_mapper'] = glyph_mapper\n else:\n glyph_dataset = hemi_data['glyph_dataset']\n _point_data(glyph_dataset)['vec'] = vectors\n glyph_mapper = hemi_data['glyph_mapper']\n if add:\n glyph_actor = self._renderer._actor(glyph_mapper)\n prop = glyph_actor.GetProperty()\n prop.SetLineWidth(2.)\n prop.SetOpacity(vector_alpha)\n self._renderer.plotter.add_actor(glyph_actor, render=False)\n hemi_data['glyph_actor'].append(glyph_actor)\n else:\n glyph_actor = hemi_data['glyph_actor'][count]\n count += 1\n self._renderer._set_colormap_range(\n actor=glyph_actor,\n ctable=self._data['ctable'],\n scalar_bar=None,\n rng=self._cmap_range,\n )\n\n @property\n def _cmap_range(self):\n dt_max = self._data['fmax']\n if self._data['center'] is None:\n dt_min = self._data['fmin']\n else:\n dt_min = -1 * dt_max\n rng = [dt_min, dt_max]\n return rng\n\n def _update_fscale(self, fscale):\n \"\"\"Scale the colorbar points.\"\"\"\n fmin = self._data['fmin'] * fscale\n fmid = self._data['fmid'] * fscale\n fmax = self._data['fmax'] * fscale\n self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)\n\n def _update_auto_scaling(self, restore=False):\n user_clim = self._data['clim']\n if user_clim is not None and 'lims' in user_clim:\n allow_pos_lims = False\n else:\n allow_pos_lims = True\n if user_clim is not None and restore:\n clim = user_clim\n else:\n clim = 'auto'\n colormap = self._data['colormap']\n transparent = self._data['transparent']\n mapdata = _process_clim(\n clim, colormap, transparent,\n np.concatenate(list(self._current_act_data.values())),\n allow_pos_lims)\n diverging = 'pos_lims' in mapdata['clim']\n colormap = mapdata['colormap']\n scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']\n transparent = mapdata['transparent']\n del mapdata\n fmin, fmid, fmax = scale_pts\n center = 0. if diverging else None\n self._data['center'] = center\n self._data['colormap'] = colormap\n self._data['transparent'] = transparent\n self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)\n\n def _to_time_index(self, value):\n \"\"\"Return the interpolated time index of the given time value.\"\"\"\n time = self._data['time']\n value = np.interp(value, time, np.arange(len(time)))\n return value\n\n @property\n def data(self):\n \"\"\"Data used by time viewer and color bar widgets.\"\"\"\n return self._data\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def views(self):\n return self._views\n\n @property\n def hemis(self):\n return self._hemis\n\n def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False, **kwargs):\n import imageio\n with self._renderer._disabled_interaction():\n images = self._make_movie_frames(\n time_dilation, tmin, tmax, framerate, interpolation, callback,\n time_viewer)\n # find imageio FFMPEG parameters\n if 'fps' not in kwargs:\n kwargs['fps'] = framerate\n if codec is not None:\n kwargs['codec'] = codec\n if bitrate is not None:\n kwargs['bitrate'] = bitrate\n imageio.mimwrite(filename, images, **kwargs)\n\n def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False,\n **kwargs):\n def frame_callback(frame, n_frames):\n if frame == n_frames:\n # On the ImageIO step\n self.status_msg.set_value(\n \"Saving with ImageIO: %s\"\n % filename\n )\n self.status_msg.show()\n self.status_progress.hide()\n self._renderer._status_bar_update()\n else:\n self.status_msg.set_value(\n \"Rendering images (frame %d / %d) ...\"\n % (frame + 1, n_frames)\n )\n self.status_msg.show()\n self.status_progress.show()\n self.status_progress.set_range([0, n_frames - 1])\n self.status_progress.set_value(frame)\n self.status_progress.update()\n self.status_msg.update()\n self._renderer._status_bar_update()\n\n # set cursor to busy\n default_cursor = self._renderer._window_get_cursor()\n self._renderer._window_set_cursor(\n self._renderer._window_new_cursor(\"WaitCursor\"))\n\n try:\n self._save_movie(filename, time_dilation, tmin, tmax,\n framerate, interpolation, codec,\n bitrate, frame_callback, time_viewer, **kwargs)\n except (Exception, KeyboardInterrupt):\n warn('Movie saving aborted:\\n' + traceback.format_exc())\n finally:\n self._renderer._window_set_cursor(default_cursor)\n\n @fill_doc\n def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,\n framerate=24, interpolation=None, codec=None,\n bitrate=None, callback=None, time_viewer=False, **kwargs):\n \"\"\"Save a movie (for data with a time axis).\n\n The movie is created through the :mod:`imageio` module. The format is\n determined by the extension, and additional options can be specified\n through keyword arguments that depend on the format, see\n :doc:`imageio's format page <imageio:formats/index>`.\n\n .. Warning::\n This method assumes that time is specified in seconds when adding\n data. If time is specified in milliseconds this will result in\n movies 1000 times longer than expected.\n\n Parameters\n ----------\n filename : str\n Path at which to save the movie. The extension determines the\n format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`\n documentation for available formats).\n time_dilation : float\n Factor by which to stretch time (default 4). For example, an epoch\n from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this\n would result in a 2.8 s long movie.\n tmin : float\n First time point to include (default: all data).\n tmax : float\n Last time point to include (default: all data).\n framerate : float\n Framerate of the movie (frames per second, default 24).\n %(brain_time_interpolation)s\n If None, it uses the current ``brain.interpolation``,\n which defaults to ``'nearest'``. Defaults to None.\n codec : str | None\n The codec to use.\n bitrate : float | None\n The bitrate to use.\n callback : callable | None\n A function to call on each iteration. Useful for status message\n updates. It will be passed keyword arguments ``frame`` and\n ``n_frames``.\n %(brain_screenshot_time_viewer)s\n **kwargs : dict\n Specify additional options for :mod:`imageio`.\n \"\"\"\n if filename is None:\n filename = _generate_default_filename(\".mp4\")\n func = self._save_movie_tv if self.time_viewer else self._save_movie\n func(filename, time_dilation, tmin, tmax,\n framerate, interpolation, codec,\n bitrate, callback, time_viewer, **kwargs)\n\n def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,\n interpolation, callback, time_viewer):\n from math import floor\n\n # find tmin\n if tmin is None:\n tmin = self._times[0]\n elif tmin < self._times[0]:\n raise ValueError(\"tmin=%r is smaller than the first time point \"\n \"(%r)\" % (tmin, self._times[0]))\n\n # find indexes at which to create frames\n if tmax is None:\n tmax = self._times[-1]\n elif tmax > self._times[-1]:\n raise ValueError(\"tmax=%r is greater than the latest time point \"\n \"(%r)\" % (tmax, self._times[-1]))\n n_frames = floor((tmax - tmin) * time_dilation * framerate)\n times = np.arange(n_frames, dtype=float)\n times /= framerate * time_dilation\n times += tmin\n time_idx = np.interp(times, self._times, np.arange(self._n_times))\n\n n_times = len(time_idx)\n if n_times == 0:\n raise ValueError(\"No time points selected\")\n\n logger.debug(\"Save movie for time points/samples\\n%s\\n%s\"\n % (times, time_idx))\n # Sometimes the first screenshot is rendered with a different\n # resolution on OS X\n self.screenshot(time_viewer=time_viewer)\n old_mode = self.time_interpolation\n if interpolation is not None:\n self.set_time_interpolation(interpolation)\n try:\n images = [\n self.screenshot(time_viewer=time_viewer)\n for _ in self._iter_time(time_idx, callback)]\n finally:\n self.set_time_interpolation(old_mode)\n if callback is not None:\n callback(frame=len(time_idx), n_frames=len(time_idx))\n return images\n\n def _iter_time(self, time_idx, callback):\n \"\"\"Iterate through time points, then reset to current time.\n\n Parameters\n ----------\n time_idx : array_like\n Time point indexes through which to iterate.\n callback : callable | None\n Callback to call before yielding each frame.\n\n Yields\n ------\n idx : int | float\n Current index.\n\n Notes\n -----\n Used by movie and image sequence saving functions.\n \"\"\"\n if self.time_viewer:\n func = partial(self.callbacks[\"time\"],\n update_widget=True)\n else:\n func = self.set_time_point\n current_time_idx = self._data[\"time_idx\"]\n for ii, idx in enumerate(time_idx):\n func(idx)\n if callback is not None:\n callback(frame=ii, n_frames=len(time_idx))\n yield idx\n\n # Restore original time index\n func(current_time_idx)\n\n def _check_stc(self, hemi, array, vertices):\n from ...source_estimate import (\n _BaseSourceEstimate, _BaseSurfaceSourceEstimate,\n _BaseMixedSourceEstimate, _BaseVolSourceEstimate\n )\n if isinstance(array, _BaseSourceEstimate):\n stc = array\n stc_surf = stc_vol = None\n if isinstance(stc, _BaseSurfaceSourceEstimate):\n stc_surf = stc\n elif isinstance(stc, _BaseMixedSourceEstimate):\n stc_surf = stc.surface() if hemi != 'vol' else None\n stc_vol = stc.volume() if hemi == 'vol' else None\n elif isinstance(stc, _BaseVolSourceEstimate):\n stc_vol = stc if hemi == 'vol' else None\n else:\n raise TypeError(\"stc not supported\")\n\n if stc_surf is None and stc_vol is None:\n raise ValueError(\"No data to be added\")\n if stc_surf is not None:\n array = getattr(stc_surf, hemi + '_data')\n vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]\n if stc_vol is not None:\n array = stc_vol.data\n vertices = np.concatenate(stc_vol.vertices)\n else:\n stc = None\n return stc, array, vertices\n\n def _check_hemi(self, hemi, extras=()):\n \"\"\"Check for safe single-hemi input, returns str.\"\"\"\n _validate_type(hemi, (None, str), 'hemi')\n if hemi is None:\n if self._hemi not in ['lh', 'rh']:\n raise ValueError('hemi must not be None when both '\n 'hemispheres are displayed')\n hemi = self._hemi\n _check_option('hemi', hemi, ('lh', 'rh') + tuple(extras))\n return hemi\n\n def _check_hemis(self, hemi):\n \"\"\"Check for safe dual or single-hemi input, returns list.\"\"\"\n if hemi is None:\n if self._hemi not in ['lh', 'rh']:\n hemi = ['lh', 'rh']\n else:\n hemi = [self._hemi]\n elif hemi not in ['lh', 'rh']:\n extra = ' or None' if self._hemi in ['lh', 'rh'] else ''\n raise ValueError('hemi must be either \"lh\" or \"rh\"' + extra)\n else:\n hemi = [hemi]\n return hemi\n\n def _to_borders(self, label, hemi, borders, restrict_idx=None):\n \"\"\"Convert a label/parc to borders.\"\"\"\n if not isinstance(borders, (bool, int)) or borders < 0:\n raise ValueError('borders must be a bool or positive integer')\n if borders:\n n_vertices = label.size\n edges = mesh_edges(self.geo[hemi].orig_faces)\n edges = edges.tocoo()\n border_edges = label[edges.row] != label[edges.col]\n show = np.zeros(n_vertices, dtype=np.int64)\n keep_idx = np.unique(edges.row[border_edges])\n if isinstance(borders, int):\n for _ in range(borders):\n keep_idx = np.in1d(\n self.geo[hemi].orig_faces.ravel(), keep_idx)\n keep_idx.shape = self.geo[hemi].orig_faces.shape\n keep_idx = self.geo[hemi].orig_faces[\n np.any(keep_idx, axis=1)]\n keep_idx = np.unique(keep_idx)\n if restrict_idx is not None:\n keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]\n show[keep_idx] = 1\n label *= show\n\n def enable_depth_peeling(self):\n \"\"\"Enable depth peeling.\"\"\"\n self._renderer.enable_depth_peeling()\n\n def get_picked_points(self):\n \"\"\"Return the vertices of the picked points.\n\n Returns\n -------\n points : list of int | None\n The vertices picked by the time viewer.\n \"\"\"\n if hasattr(self, \"time_viewer\"):\n return self.picked_points\n\n def __hash__(self):\n \"\"\"Hash the object.\"\"\"\n raise NotImplementedError\n\n\ndef _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):\n \"\"\"Work around interp1d not liking singleton dimensions.\"\"\"\n from scipy.interpolate import interp1d\n if y.shape[axis] == 1:\n def func(x):\n return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)\n return func\n else:\n return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)\n\n\ndef _update_limits(fmin, fmid, fmax, center, array):\n if center is None:\n if fmin is None:\n fmin = array.min() if array.size > 0 else 0\n if fmax is None:\n fmax = array.max() if array.size > 0 else 1\n else:\n if fmin is None:\n fmin = 0\n if fmax is None:\n fmax = np.abs(center - array).max() if array.size > 0 else 1\n if fmid is None:\n fmid = (fmin + fmax) / 2.\n\n if fmin >= fmid:\n raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'\n % (fmin, fmid))\n if fmid >= fmax:\n raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'\n % (fmid, fmax))\n\n return fmin, fmid, fmax\n\n\ndef _update_monotonic(lims, fmin, fmid, fmax):\n if fmin is not None:\n lims['fmin'] = fmin\n if lims['fmax'] < fmin:\n logger.debug(f' Bumping fmax = {lims[\"fmax\"]} to {fmin}')\n lims['fmax'] = fmin\n if lims['fmid'] < fmin:\n logger.debug(f' Bumping fmid = {lims[\"fmid\"]} to {fmin}')\n lims['fmid'] = fmin\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n if fmid is not None:\n lims['fmid'] = fmid\n if lims['fmin'] > fmid:\n logger.debug(f' Bumping fmin = {lims[\"fmin\"]} to {fmid}')\n lims['fmin'] = fmid\n if lims['fmax'] < fmid:\n logger.debug(f' Bumping fmax = {lims[\"fmax\"]} to {fmid}')\n lims['fmax'] = fmid\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n if fmax is not None:\n lims['fmax'] = fmax\n if lims['fmin'] > fmax:\n logger.debug(f' Bumping fmin = {lims[\"fmin\"]} to {fmax}')\n lims['fmin'] = fmax\n if lims['fmid'] > fmax:\n logger.debug(f' Bumping fmid = {lims[\"fmid\"]} to {fmax}')\n lims['fmid'] = fmax\n assert lims['fmin'] <= lims['fmid'] <= lims['fmax']\n\n\ndef _get_range(brain):\n val = np.abs(np.concatenate(list(brain._current_act_data.values())))\n return [np.min(val), np.max(val)]\n\n\nclass _FakeIren():\n def EnterEvent(self):\n pass\n\n def MouseMoveEvent(self):\n pass\n\n def LeaveEvent(self):\n pass\n\n def SetEventInformation(self, *args, **kwargs):\n pass\n\n def CharEvent(self):\n pass\n\n def KeyPressEvent(self, *args, **kwargs):\n pass\n\n def KeyReleaseEvent(self, *args, **kwargs):\n pass\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.asarray",
"numpy.in1d",
"numpy.round",
"numpy.max",
"numpy.concatenate",
"matplotlib.image.imread",
"numpy.mean",
"numpy.any",
"numpy.searchsorted",
"numpy.cross",
"numpy.where",
"numpy.clip",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.full",
"numpy.atleast_1d",
"scipy.interpolate.interp1d",
"numpy.interp",
"numpy.unravel_index",
"numpy.zeros",
"numpy.min",
"matplotlib.colors.ListedColormap",
"numpy.argsort",
"numpy.array",
"numpy.abs",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.ones",
"numpy.prod",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
data301-2021-summer2/project-group5 | [
"f33e165cca2f383f0f8d43da175d4a1fcd36cf17"
] | [
"analysis/scripts/aliproject_functions.py"
] | [
"import seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef load_and_process(url_or_path_to_csv_file):\n\n # Method Chain 1 (Load data and deal with missing data)\n \n\n df = ( pd.read_csv (url_or_path_to_csv_file,na_values=['No Reasonable offer refused - We Finance', 'NO ACCIDENT, LOCAL, CERTIFIED','U WORK U DRIVE - CALL NOW !!','You work you drive']).dropna() )\n \n\n df1 = (\n df.assign(cylinders=df.cylinders.str.split(' ').str[1]).assign(location=np.where(df.location.str.lower().str.startswith('vancouver'), 'vancouver', df.location))\n .assign(location=df.location.str.lower()).drop(['body text','Unnamed: 0'],axis=1).reset_index(drop=True)\n\n )\n\n df = df1\n\n return df\n\n\n\n\n\n\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Eshan-Agarwal/addons | [
"0b186d6fd94768bffedc258ee531c96ebe5ce62f"
] | [
"tensorflow_addons/metrics/r_square.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements R^2 scores.\"\"\"\n\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Metric\n\nfrom typeguard import typechecked\nfrom tensorflow_addons.utils.types import AcceptableDTypes\n\n\nclass RSquare(Metric):\n \"\"\"Compute R^2 score.\n\n This is also called as coefficient of determination.\n It tells how close are data to the fitted regression line.\n\n - Highest score can be 1.0 and it indicates that the predictors\n perfectly accounts for variation in the target.\n - Score 0.0 indicates that the predictors do not\n account for variation in the target.\n - It can also be negative if the model is worse.\n\n Usage:\n ```python\n actuals = tf.constant([1, 4, 3], dtype=tf.float32)\n preds = tf.constant([2, 4, 4], dtype=tf.float32)\n result = tf.keras.metrics.RSquare()\n result.update_state(actuals, preds)\n print('R^2 score is: ', r1.result().numpy()) # 0.57142866\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self, name: str = \"r_square\", dtype: AcceptableDTypes = None, **kwargs\n ):\n super().__init__(name=name, dtype=dtype)\n self.squared_sum = self.add_weight(\"squared_sum\", initializer=\"zeros\")\n self.sum = self.add_weight(\"sum\", initializer=\"zeros\")\n self.res = self.add_weight(\"residual\", initializer=\"zeros\")\n self.count = self.add_weight(\"count\", initializer=\"zeros\")\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n if sample_weight is not None:\n warnings.warn(\n \"`sample_weight` is not None. Be aware that RSquare \"\n \"does not take `sample_weight` into account when computing the metric \"\n \"value.\"\n )\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2))\n self.sum.assign_add(tf.reduce_sum(y_true))\n self.res.assign_add(tf.reduce_sum(tf.square(tf.subtract(y_true, y_pred))))\n self.count.assign_add(tf.cast(tf.shape(y_true)[0], tf.float32))\n\n def result(self):\n mean = self.sum / self.count\n total = self.squared_sum - 2 * self.sum * mean + self.count * mean ** 2\n return 1 - (self.res / total)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.squared_sum.assign(0.0)\n self.sum.assign(0.0)\n self.res.assign(0.0)\n self.count.assign(0.0)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.reduce_sum",
"tensorflow.shape",
"tensorflow.subtract"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
tusharnankani/Scrapera | [
"d4b0ed33df200fadfe45638410cdb510382004ae"
] | [
"scrapera/text/instagram.py"
] | [
"import os\n\nimport pandas as pd\nimport json\nimport urllib.request\nimport urllib\n\n\nclass InstagramCommentsScraper:\n def _extract_get_comments_data(self, json_response):\n comments_list, usernames_list, timestamps_list = [], [], []\n for node in json_response['graphql']['shortcode_media']['edge_media_to_parent_comment']['edges']:\n comments_list.append(node['node']['text'].encode('utf-8', 'replace').decode())\n usernames_list.append(node['node']['owner']['username'])\n timestamps_list.append(node['node']['created_at'])\n\n return comments_list, usernames_list, timestamps_list\n\n def _extract_post_json(self, url, urllib_proxies=None):\n url = f\"https://www.instagram.com/p/{url.split('/')[-2]}/?__a=1\"\n\n req = urllib.request.Request(url, None, {\"User-Agent\": \"Mozilla/5.0\"})\n\n if urllib_proxies:\n handler = urllib.request.ProxyHandler(urllib_proxies)\n opener = urllib.request.build_opener(handler)\n urllib.request.install_opener(opener)\n\n response = urllib.request.urlopen(req)\n json_response = json.load(response)\n\n texts, usernames, timestamp = self._extract_get_comments_data(json_response)\n return texts, usernames, timestamp, len(texts)\n\n def scrape(self, url, out_path=None, urllib_proxies=None):\n '''\n Scraper function for scraping comments related to a specific Instagarm post\n url: str, URL for the Instagram post to be scraped\n out_path: [Optional] str, Path to output directory. If unspecified, current directory will be used\n urllib_proxies: [Optional] dict, Proxy information for urllib requests\n '''\n texts, usernames, timestamps, length = self._extract_post_json(url, urllib_proxies)\n df = pd.DataFrame({'text': texts, 'username': usernames, 'timestamp': timestamps})\n df.to_csv('ScrapedComments.csv' if out_path is None else os.path.join(out_path, 'ScrapedComments.csv'),\n index=False)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ch3njust1n/record | [
"364e16d748dc3b70879835c5e13467cfcc1865a7"
] | [
"record/__main__.py"
] | [
"'''\nAuthor: Justin Chen\nDate: \t2.15.2020\n'''\nimport os\nimport pwd\nimport json\nimport atexit\nimport signal\nimport psutil\nimport platform\nfrom datetime import datetime\n\nfrom numpy import ndarray\nfrom torch import cuda, Tensor, manual_seed\nfrom pymongo import MongoClient\nfrom gridfs import GridFS\nfrom bson.objectid import ObjectId\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\n\nclass Record(dict):\n\t'''\n\tinputs:\t\n\t_id (string, optional) Record Mongo object id\n\thost (string, optional) MongoDB host name\n\tport (int, optional)\t MongoDB port number\n\tdatabase (string, optional) MongoDB database\n\tcollection (string, optional) MongoDB collection\n\tsave_dir (string, optional) Save Record to file as well as MongoDB\n\tseed \t (int, optional) Random seed\n\t'''\n\tdef __init__(self, _id='', host='localhost', port=27017, database='experiments', collection='results', save_dir='', seed=None):\n\t\tsuper().__init__()\n\n\t\t# Start MongoDB daemon\n\t\tstream = os.popen('mongod')\n\n\t\t# Set seed for random number generator\n\t\tself.seed = seed\n\t\t\n\t\tif isinstance(seed, int): \n\t\t\tmanual_seed(seed)\n\t\t\tself.update(seed, key='seed')\n\n\t\t# Connect to MongoDB\n\t\tself.client = MongoClient(host, port)\n\t\tself.database = database\n\t\tself.collection = collection\n\t\tself.db = self.client[database]\n\t\tself.col = self.db[collection]\n\t\tself.fs = GridFS(self.db)\n\t\tprint(self.fs)\n\t\tself._id = str(_id) if isinstance(_id, ObjectId) else _id\n\t\tself.save_dir = save_dir\n\n\t\tif len(self._id) > 0: self.update(self.col.find_one({ '_id': ObjectId(self._id) }))\n\n\t\tself.system_info()\n\n\t\tatexit.register(self.save)\n\t\tsignal.signal(signal.SIGTERM, self.save)\n\t\tsignal.signal(signal.SIGINT, self.save)\n\n\n\t'''\n\tUpdate a dictionary value. Useful for tracking multiple experiments. \n\n\tImportant: Values must be native Python types to be able to insert properly into MongoDB.\n\te.g. Must convert torch.Tensor, DataFrame, Series, or ndArray into lists before calling this function.\n\n\te.g {\n\t\t'model_0': {'time': 1606588095.295271, 'lr': 0.01},\n\t\t'model_1': {'time': 1606588103.197889, 'lr': 0.001}\n\t}\n\n\tupdate('model_1', {'time': 1606588162.7220979, 'lr': 0.002})\n\n\t{\n\t\t'model_0': {'time': 1606588095.295271, 'lr': 0.01},\n\t\t'model_1': {'time': 1606588162.7220979, 'lr': 0.002}\n\t}\n\n\tinputs:\n\tvalue (any value) Value of parameter\n\tkey (string, optional) Key to dict stored object. Default: None\n\t'''\n\tdef update(self, value, key=None):\n\n\t\tif self.is_argparse(value):\n\t\t\tvalue = vars(value)\n\n\t\tif self.is_configparser(value):\n\t\t\tvalue = { 'config': { k : dict(value[k].items()) for k, _ in value.items() } }\n\n\t\tif isinstance(value, (Tensor, ndarray, Series)):\n\t\t\tvalue = value.tolist()\n\n\t\tif isinstance(value, DataFrame):\n\t\t\tvalue = pd.values.tolist()\n\n\t\tif key:\n\t\t\tif not isinstance(key, str):\n\t\t\t\traise Exception('Key parameter must be type String.')\n\t\t\t\t\n\t\t\tif key in self and isinstance(self[key], dict):\n\t\t\t\tself[key].update(value)\n\t\t\telse:\n\t\t\t\tself[key] = value\n\t\telse:\n\t\t\tsuper().update(value)\n\n\n\t'''\n\tCheck if argument is argparse.Namespace\n\n\tinputs: \n\targs (*) Variable\n\n\toutputs: \n\tres (bool)\tTrue if args is an instance of argparse.Namespace, else False\n\t'''\n\tdef is_argparse(self, args):\n\t\ttry:\n\t\t\treturn args.__module__ == 'argparse'\n\t\texcept AttributeError:\n\t\t\treturn False\n\n\n\t'''\n\tCheck if argument is configparser.ConfigParser\n\n\tinputs: \n\targs (*) Variable\n\n\toutputs: \n\tres (bool) True if args is an instance of configparser.ConfigParser, else False\n\t'''\n\tdef is_configparser(self, args):\n\t\ttry:\n\t\t\treturn args.__module__ == 'configparser'\n\t\texcept:\n\t\t\treturn False\n\n\n\t'''\n\tSave the record to experiment document\n\t'''\n\tdef save(self):\n\t\trecord = dict(self.items())\n\t\tdoc_id = self.col.insert_one(record).inserted_id\n\t\tif len(self._id) == 0: self._id = doc_id\n\n\t\tif len(self.save_dir) > 0:\n\t\t\twith open(os.path.join(self.save_dir, doc_id), 'w') as file:\n\t\t\t\tjson.dump(record, file)\n\n\t\tprint(f'record id: {doc_id}')\n\n\n\t'''\n\tSave system information\n\t'''\n\tdef system_info(self):\n\t\tuname = platform.uname()\n\t\tgpus = [cuda.get_device_name(i) for i in range(cuda.device_count())]\n\n\n\t\tself.update({\n\t\t\t'python': platform.python_version(),\n\t\t\t'machine': uname.machine,\n\t\t\t'processor': uname.processor,\n\t\t\t'os': os.name,\n\t\t\t'os_name': platform.system(),\n\t\t\t'os_ver': platform.release(),\n\t\t\t'memory': str(psutil.virtual_memory().total//2**30)+' GB',\n\t\t\t'storage': str(psutil.disk_usage('/').total//2**30)+' GB',\n\t\t\t'user': pwd.getpwuid(os.getuid())[0],\n\t\t\t'gpus': gpus,\n\t\t\t'timestamp': datetime.now().strftime('%f-%S-%M-%H-%d-%m-%Y')\n\t\t})\n\n\n\t'''\n\tRemove this Record from database\n\n\toutputs:\n\tcount (bool) True if deleted, else False\n\t'''\n\tdef remove(self):\n\t\tres = self.col.delete_one({ '_id': ObjectId(self._id) })\n\t\treturn bool(res.deleted_count)\n\n\t"
] | [
[
"torch.manual_seed",
"torch.cuda.device_count",
"torch.cuda.get_device_name"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pauloabelha/pytorch-mask-rcnn | [
"2c252c8a75f8287a127f9c91dc8c61cf0e7c0e94"
] | [
"model.py"
] | [
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implemenetation.\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport datetime\nimport math\nimport os\nimport random\nimport re\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nimport utils\nimport visualize\nfrom nms.nms_wrapper import nms\nfrom roialign.roi_align.crop_and_resize import CropAndResizeFunction\n\n\n############################################################\n# Logging Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\"))\n print(text)\n\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\n')\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\n############################################################\n# Pytorch Utility Functions\n############################################################\n\ndef unique1d(tensor):\n if tensor.size()[0] == 0 or tensor.size()[0] == 1:\n return tensor\n tensor = tensor.sort()[0]\n unique_bool = tensor[1:] != tensor [:-1]\n first_element = Variable(torch.ByteTensor([True]), requires_grad=False)\n if tensor.is_cuda:\n first_element = first_element.cuda()\n unique_bool = torch.cat((first_element, unique_bool),dim=0)\n return tensor[unique_bool.data]\n\ndef intersect1d(tensor1, tensor2):\n aux = torch.cat((tensor1, tensor2),dim=0)\n aux = aux.sort()[0]\n return aux[:-1][(aux[1:] == aux[:-1]).data]\n\ndef log2(x):\n \"\"\"Implementatin of Log2. Pytorch doesn't have a native implemenation.\"\"\"\n ln2 = Variable(torch.log(torch.FloatTensor([2.0])), requires_grad=False)\n if x.is_cuda:\n ln2 = ln2.cuda()\n return torch.log(x) / ln2\n\nclass SamePad2d(nn.Module):\n \"\"\"Mimics tensorflow's 'SAME' padding.\n \"\"\"\n\n def __init__(self, kernel_size, stride):\n super(SamePad2d, self).__init__()\n self.kernel_size = torch.nn.modules.utils._pair(kernel_size)\n self.stride = torch.nn.modules.utils._pair(stride)\n\n def forward(self, input):\n in_width = input.size()[2]\n in_height = input.size()[3]\n out_width = math.ceil(float(in_width) / float(self.stride[0]))\n out_height = math.ceil(float(in_height) / float(self.stride[1]))\n pad_along_width = ((out_width - 1) * self.stride[0] +\n self.kernel_size[0] - in_width)\n pad_along_height = ((out_height - 1) * self.stride[1] +\n self.kernel_size[1] - in_height)\n pad_left = math.floor(pad_along_width / 2)\n pad_top = math.floor(pad_along_height / 2)\n pad_right = pad_along_width - pad_left\n pad_bottom = pad_along_height - pad_top\n return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)\n\n def __repr__(self):\n return self.__class__.__name__\n\n\n############################################################\n# FPN Graph\n############################################################\n\nclass TopDownLayer(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(TopDownLayer, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1)\n self.padding2 = SamePad2d(kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1)\n\n def forward(self, x, y):\n y = F.upsample(y, scale_factor=2)\n x = self.conv1(x)\n return self.conv2(self.padding2(x+y))\n\nclass FPN(nn.Module):\n def __init__(self, C1, C2, C3, C4, C5, out_channels):\n super(FPN, self).__init__()\n self.out_channels = out_channels\n self.C1 = C1\n self.C2 = C2\n self.C3 = C3\n self.C4 = C4\n self.C5 = C5\n self.P6 = nn.MaxPool2d(kernel_size=1, stride=2)\n self.P5_conv1 = nn.Conv2d(2048, self.out_channels, kernel_size=1, stride=1)\n self.P5_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P4_conv1 = nn.Conv2d(1024, self.out_channels, kernel_size=1, stride=1)\n self.P4_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P3_conv1 = nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1)\n self.P3_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P2_conv1 = nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1)\n self.P2_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n\n def forward(self, x):\n x = self.C1(x)\n x = self.C2(x)\n c2_out = x\n x = self.C3(x)\n c3_out = x\n x = self.C4(x)\n c4_out = x\n x = self.C5(x)\n p5_out = self.P5_conv1(x)\n p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2)\n p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2)\n p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2)\n\n p5_out = self.P5_conv2(p5_out)\n p4_out = self.P4_conv2(p4_out)\n p3_out = self.P3_conv2(p3_out)\n p2_out = self.P2_conv2(p2_out)\n\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n p6_out = self.P6(p5_out)\n\n return [p2_out, p3_out, p4_out, p5_out, p6_out]\n\n\n############################################################\n# Resnet Graph\n############################################################\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride)\n self.bn1 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)\n self.padding2 = SamePad2d(kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3)\n self.bn2 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1)\n self.bn3 = nn.BatchNorm2d(planes * 4, eps=0.001, momentum=0.01)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.padding2(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, architecture, stage5=False):\n super(ResNet, self).__init__()\n assert architecture in [\"resnet50\", \"resnet101\"]\n self.inplanes = 64\n self.layers = [3, 4, {\"resnet50\": 6, \"resnet101\": 23}[architecture], 3]\n self.block = Bottleneck\n self.stage5 = stage5\n\n self.C1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True),\n SamePad2d(kernel_size=3, stride=2),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.C2 = self.make_layer(self.block, 64, self.layers[0])\n self.C3 = self.make_layer(self.block, 128, self.layers[1], stride=2)\n self.C4 = self.make_layer(self.block, 256, self.layers[2], stride=2)\n if self.stage5:\n self.C5 = self.make_layer(self.block, 512, self.layers[3], stride=2)\n else:\n self.C5 = None\n\n def forward(self, x):\n x = self.C1(x)\n x = self.C2(x)\n x = self.C3(x)\n x = self.C4(x)\n x = self.C5(x)\n return x\n\n\n def stages(self):\n return [self.C1, self.C2, self.C3, self.C4, self.C5]\n\n def make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride),\n nn.BatchNorm2d(planes * block.expansion, eps=0.001, momentum=0.01),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, 4] where each row is y1, x1, y2, x2\n deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= torch.exp(deltas[:, 2])\n width *= torch.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = torch.stack([y1, x1, y2, x2], dim=1)\n return result\n\ndef clip_boxes(boxes, window):\n \"\"\"\n boxes: [N, 4] each col is y1, x1, y2, x2\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n boxes = torch.stack( \\\n [boxes[:, 0].clamp(float(window[0]), float(window[2])),\n boxes[:, 1].clamp(float(window[1]), float(window[3])),\n boxes[:, 2].clamp(float(window[0]), float(window[2])),\n boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)\n return boxes\n\ndef proposal_layer(inputs, proposal_count, nms_threshold, anchors, config=None):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinment detals to anchors.\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n # Currently only supports batchsize 1\n inputs[0] = inputs[0].squeeze(0)\n inputs[1] = inputs[1].squeeze(0)\n\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, 1]\n\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n deltas = deltas * std_dev\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = min(6000, anchors.size()[0])\n scores, order = scores.sort(descending=True)\n order = order[:pre_nms_limit]\n scores = scores[:pre_nms_limit]\n deltas = deltas[order.data, :] # TODO: Support batch size > 1 ff.\n anchors = anchors[order.data, :]\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = apply_box_deltas(anchors, deltas)\n\n # Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]\n height, width = config.IMAGE_SHAPE[:2]\n window = np.array([0, 0, height, width]).astype(np.float32)\n boxes = clip_boxes(boxes, window)\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n keep = nms(torch.cat((boxes, scores.unsqueeze(1)), 1).data, nms_threshold)\n keep = keep[:proposal_count]\n boxes = boxes[keep, :]\n\n # Normalize dimensions to range of 0 to 1.\n norm = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)\n if config.GPU_COUNT:\n norm = norm.cuda()\n normalized_boxes = boxes / norm\n\n # Add back batch dimension\n normalized_boxes = normalized_boxes.unsqueeze(0)\n\n return normalized_boxes\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef pyramid_roi_align(inputs, pool_size, image_shape):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n Params:\n - pool_size: [height, width] of the output pooled regions. Usually [7, 7]\n - image_shape: [height, width, channels]. Shape of input image in pixels\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates.\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, channels, height, width]\n Output:\n Pooled regions in the shape: [num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n # Currently only supports batchsize 1\n for i in range(len(inputs)):\n inputs[i] = inputs[i].squeeze(0)\n\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[1:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n try:\n y1, x1, y2, x2 = boxes.chunk(4, dim=1)\n except BaseException as E:\n # Added by Paulo Ferreira\n print(boxes.shape)\n print(E)\n print('Exception!')\n raise E\n h = y2 - y1\n w = x2 - x1\n\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = Variable(torch.FloatTensor([float(image_shape[0]*image_shape[1])]), requires_grad=False)\n if boxes.is_cuda:\n image_area = image_area.cuda()\n roi_level = 4 + log2(torch.sqrt(h*w)/(224.0/torch.sqrt(image_area)))\n roi_level = roi_level.round().int()\n roi_level = roi_level.clamp(2,5)\n\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = roi_level==level\n if not ix.any():\n continue\n ix = torch.nonzero(ix)[:,0]\n level_boxes = boxes[ix.data, :]\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix.data)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = level_boxes.detach()\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n ind = Variable(torch.zeros(level_boxes.size()[0]),requires_grad=False).int()\n if level_boxes.is_cuda:\n ind = ind.cuda()\n feature_maps[i] = feature_maps[i].unsqueeze(0) #CropAndResizeFunction needs batch dimension\n pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(feature_maps[i], level_boxes, ind)\n pooled.append(pooled_features)\n\n # Pack pooled features into one tensor\n pooled = torch.cat(pooled, dim=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = torch.cat(box_to_level, dim=0)\n\n # Rearrange pooled features to match the order of the original boxes\n _, box_to_level = torch.sort(box_to_level)\n pooled = pooled[box_to_level, :, :]\n\n return pooled\n\n\n############################################################\n# Detection Target Layer\n############################################################\ndef bbox_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n boxes1_repeat = boxes2.size()[0]\n boxes2_repeat = boxes1.size()[0]\n boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)\n boxes2 = boxes2.repeat(boxes2_repeat,1)\n\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)\n y1 = torch.max(b1_y1, b2_y1)[:, 0]\n x1 = torch.max(b1_x1, b2_x1)[:, 0]\n y2 = torch.min(b1_y2, b2_y2)[:, 0]\n x2 = torch.min(b1_x2, b2_x2)[:, 0]\n zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)\n if y1.is_cuda:\n zeros = zeros.cuda()\n intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)\n\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area[:,0] + b2_area[:,0] - intersection\n\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = iou.view(boxes2_repeat, boxes1_repeat)\n\n return overlaps\n\ndef detection_target_layer(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Subsamples proposals and generates target box refinment, class_ids,\n and masks for each.\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinments.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n \"\"\"\n\n # Currently only supports batchsize 1\n proposals = proposals.squeeze(0)\n gt_class_ids = gt_class_ids.squeeze(0)\n gt_boxes = gt_boxes.squeeze(0)\n gt_masks = gt_masks.squeeze(0)\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n negative_gt_class_ids = torch.nonzero(gt_class_ids < 0)\n if len(negative_gt_class_ids) > 0:\n crowd_ix = negative_gt_class_ids[:, 0]\n non_crowd_ix = torch.nonzero(gt_class_ids > 0)[:, 0]\n crowd_boxes = gt_boxes[crowd_ix.data, :]\n crowd_masks = gt_masks[crowd_ix.data, :, :]\n gt_class_ids = gt_class_ids[non_crowd_ix.data]\n gt_boxes = gt_boxes[non_crowd_ix.data, :]\n gt_masks = gt_masks[non_crowd_ix.data, :]\n\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = bbox_overlaps(proposals, crowd_boxes)\n crowd_iou_max = torch.max(crowd_overlaps, dim=1)[0]\n no_crowd_bool = crowd_iou_max < 0.001\n else:\n no_crowd_bool = Variable(torch.ByteTensor(proposals.size()[0]*[True]), requires_grad=False)\n if config.GPU_COUNT:\n no_crowd_bool = no_crowd_bool.cuda()\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = bbox_overlaps(proposals, gt_boxes)\n\n # Determine postive and negative ROIs\n roi_iou_max = torch.max(overlaps, dim=1)[0]\n\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = roi_iou_max >= 0.5\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n non_zero_roi = torch.nonzero(positive_roi_bool)\n if len(non_zero_roi) > 0:\n positive_indices = non_zero_roi[:, 0]\n\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n rand_idx = torch.randperm(positive_indices.size()[0])\n rand_idx = rand_idx[:positive_count]\n if config.GPU_COUNT:\n rand_idx = rand_idx.cuda()\n positive_indices = positive_indices[rand_idx]\n positive_count = positive_indices.size()[0]\n positive_rois = proposals[positive_indices.data,:]\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = overlaps[positive_indices.data,:]\n roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]\n roi_gt_boxes = gt_boxes[roi_gt_box_assignment.data,:]\n roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment.data]\n\n # Compute bbox refinement for positive ROIs\n deltas = Variable(utils.box_refinement(positive_rois.data, roi_gt_boxes.data), requires_grad=False)\n std_dev = Variable(torch.from_numpy(config.BBOX_STD_DEV).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n deltas /= std_dev\n\n # Assign positive ROIs to GT masks\n roi_masks = gt_masks[roi_gt_box_assignment.data,:,:]\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI corrdinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = positive_rois.chunk(4, dim=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = roi_gt_boxes.chunk(4, dim=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = torch.cat([y1, x1, y2, x2], dim=1)\n box_ids = Variable(torch.arange(roi_masks.size()[0]), requires_grad=False).int()\n if config.GPU_COUNT:\n box_ids = box_ids.cuda()\n masks = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks.unsqueeze(1), boxes, box_ids).data, requires_grad=False)\n masks = masks.squeeze(1)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = torch.round(masks)\n else:\n positive_count = 0\n\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_roi_bool = roi_iou_max < 0.5\n negative_roi_bool = negative_roi_bool & no_crowd_bool\n non_zero_negative_roi_bool = torch.nonzero(negative_roi_bool)\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n if len(non_zero_negative_roi_bool) > 0 and positive_count>0:\n negative_indices = non_zero_negative_roi_bool[:, 0]\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = int(r * positive_count - positive_count)\n rand_idx = torch.randperm(negative_indices.size()[0])\n rand_idx = rand_idx[:negative_count]\n if config.GPU_COUNT:\n rand_idx = rand_idx.cuda()\n negative_indices = negative_indices[rand_idx]\n negative_count = negative_indices.size()[0]\n negative_rois = proposals[negative_indices.data, :]\n else:\n negative_count = 0\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n if positive_count > 0 and negative_count > 0:\n rois = torch.cat((positive_rois, negative_rois), dim=0)\n zeros = Variable(torch.zeros(negative_count), requires_grad=False).int()\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_class_ids = torch.cat([roi_gt_class_ids, zeros], dim=0)\n zeros = Variable(torch.zeros(negative_count,4), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n deltas = torch.cat([deltas, zeros], dim=0)\n zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n masks = torch.cat([masks, zeros], dim=0)\n elif positive_count > 0:\n rois = positive_rois\n elif negative_count > 0:\n rois = negative_rois\n zeros = Variable(torch.zeros(negative_count), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_class_ids = zeros\n zeros = Variable(torch.zeros(negative_count,4), requires_grad=False).int()\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n deltas = zeros\n zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n masks = zeros\n else:\n rois = Variable(torch.FloatTensor(), requires_grad=False)\n roi_gt_class_ids = Variable(torch.IntTensor(), requires_grad=False)\n deltas = Variable(torch.FloatTensor(), requires_grad=False)\n masks = Variable(torch.FloatTensor(), requires_grad=False)\n if config.GPU_COUNT:\n rois = rois.cuda()\n roi_gt_class_ids = roi_gt_class_ids.cuda()\n deltas = deltas.cuda()\n masks = masks.cuda()\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef clip_to_window(window, boxes):\n \"\"\"\n window: (y1, x1, y2, x2). The window in the image we want to clip to.\n boxes: [N, (y1, x1, y2, x2)]\n \"\"\"\n boxes[:, 0] = boxes[:, 0].clamp(float(window[0]), float(window[2]))\n boxes[:, 1] = boxes[:, 1].clamp(float(window[1]), float(window[3]))\n boxes[:, 2] = boxes[:, 2].clamp(float(window[0]), float(window[2]))\n boxes[:, 3] = boxes[:, 3].clamp(float(window[1]), float(window[3]))\n\n return boxes\n\ndef refine_detections(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)]\n \"\"\"\n\n # Class IDs per ROI\n _, class_ids = torch.max(probs, dim=1)\n\n # Class probability of the top class of each ROI\n # Class-specific bounding box deltas\n idx = torch.arange(class_ids.size()[0]).long()\n if config.GPU_COUNT:\n idx = idx.cuda()\n class_scores = probs[idx, class_ids.data]\n deltas_specific = deltas[idx, class_ids.data]\n\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n refined_rois = apply_box_deltas(rois, deltas_specific * std_dev)\n\n # Convert coordiates to image domain\n height, width = config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)\n if config.GPU_COUNT:\n scale = scale.cuda()\n refined_rois *= scale\n\n # Clip boxes to image window\n refined_rois = clip_to_window(window, refined_rois)\n\n # Round and cast to int since we're deadling with pixels now\n refined_rois = torch.round(refined_rois)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep_bool = class_ids>0\n\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n keep_bool = keep_bool & (class_scores >= config.DETECTION_MIN_CONFIDENCE)\n keep = torch.nonzero(keep_bool)[:,0]\n\n # Apply per-class NMS\n pre_nms_class_ids = class_ids[keep.data]\n pre_nms_scores = class_scores[keep.data]\n pre_nms_rois = refined_rois[keep.data]\n\n for i, class_id in enumerate(unique1d(pre_nms_class_ids)):\n # Pick detections of this class\n ixs = torch.nonzero(pre_nms_class_ids == class_id)[:,0]\n\n # Sort\n ix_rois = pre_nms_rois[ixs.data]\n ix_scores = pre_nms_scores[ixs]\n ix_scores, order = ix_scores.sort(descending=True)\n ix_rois = ix_rois[order.data,:]\n\n class_keep = nms(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1).data, config.DETECTION_NMS_THRESHOLD)\n\n # Map indicies\n class_keep = keep[ixs[order[class_keep].data].data]\n\n if i==0:\n nms_keep = class_keep\n else:\n nms_keep = unique1d(torch.cat((nms_keep, class_keep)))\n keep = intersect1d(keep, nms_keep)\n\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n top_ids = class_scores[keep.data].sort(descending=True)[1][:roi_count]\n keep = keep[top_ids.data]\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are in image domain.\n result = torch.cat((refined_rois[keep.data],\n class_ids[keep.data].unsqueeze(1).float(),\n class_scores[keep.data].unsqueeze(1)), dim=1)\n\n return result\n\n\ndef detection_layer(config, rois, mrcnn_class, mrcnn_bbox, image_meta):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels\n \"\"\"\n\n # Currently only supports batchsize 1\n rois = rois.squeeze(0)\n\n _, _, window, _ = parse_image_meta(image_meta)\n window = window[0]\n detections = refine_detections(rois, mrcnn_class, mrcnn_bbox, window, config)\n\n return detections\n\n\n############################################################\n# Region Proposal Network\n############################################################\n\nclass RPN(nn.Module):\n \"\"\"Builds the model of Region Proposal Network.\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n Returns:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n\n def __init__(self, anchors_per_location, anchor_stride, depth):\n super(RPN, self).__init__()\n self.anchors_per_location = anchors_per_location\n self.anchor_stride = anchor_stride\n self.depth = depth\n\n self.padding = SamePad2d(kernel_size=3, stride=self.anchor_stride)\n self.conv_shared = nn.Conv2d(self.depth, 512, kernel_size=3, stride=self.anchor_stride)\n self.relu = nn.ReLU(inplace=True)\n self.conv_class = nn.Conv2d(512, 2 * anchors_per_location, kernel_size=1, stride=1)\n self.softmax = nn.Softmax(dim=2)\n self.conv_bbox = nn.Conv2d(512, 4 * anchors_per_location, kernel_size=1, stride=1)\n\n def forward(self, x):\n # Shared convolutional base of the RPN\n x = self.relu(self.conv_shared(self.padding(x)))\n\n # Anchor Score. [batch, anchors per location * 2, height, width].\n rpn_class_logits = self.conv_class(x)\n\n # Reshape to [batch, 2, anchors]\n rpn_class_logits = rpn_class_logits.permute(0,2,3,1)\n rpn_class_logits = rpn_class_logits.contiguous()\n rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = self.softmax(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location, depth]\n # where depth is [x, y, log(w), log(h)]\n rpn_bbox = self.conv_bbox(x)\n\n # Reshape to [batch, 4, anchors]\n rpn_bbox = rpn_bbox.permute(0,2,3,1)\n rpn_bbox = rpn_bbox.contiguous()\n rpn_bbox = rpn_bbox.view(x.size()[0], -1, 4)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\nclass Classifier(nn.Module):\n def __init__(self, depth, pool_size, image_shape, num_classes):\n super(Classifier, self).__init__()\n self.depth = depth\n self.pool_size = pool_size\n self.image_shape = image_shape\n self.num_classes = num_classes\n self.conv1 = nn.Conv2d(self.depth, 1024, kernel_size=self.pool_size, stride=1)\n self.bn1 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)\n self.conv2 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1)\n self.bn2 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)\n self.relu = nn.ReLU(inplace=True)\n\n self.linear_class = nn.Linear(1024, num_classes)\n self.softmax = nn.Softmax(dim=1)\n\n self.linear_bbox = nn.Linear(1024, num_classes * 4)\n\n def forward(self, x, rois):\n x = pyramid_roi_align([rois]+x, self.pool_size, self.image_shape)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = x.view(-1,1024)\n mrcnn_class_logits = self.linear_class(x)\n mrcnn_probs = self.softmax(mrcnn_class_logits)\n\n mrcnn_bbox = self.linear_bbox(x)\n mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, 4)\n\n return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox]\n\nclass Mask(nn.Module):\n def __init__(self, depth, pool_size, image_shape, num_classes):\n super(Mask, self).__init__()\n self.depth = depth\n self.pool_size = pool_size\n self.image_shape = image_shape\n self.num_classes = num_classes\n self.padding = SamePad2d(kernel_size=3, stride=1)\n self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)\n self.bn1 = nn.BatchNorm2d(256, eps=0.001)\n self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn2 = nn.BatchNorm2d(256, eps=0.001)\n self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn3 = nn.BatchNorm2d(256, eps=0.001)\n self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn4 = nn.BatchNorm2d(256, eps=0.001)\n self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)\n self.conv5 = nn.Conv2d(256, num_classes, kernel_size=1, stride=1)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, rois):\n x = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)\n x = self.conv1(self.padding(x))\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(self.padding(x))\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv3(self.padding(x))\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv4(self.padding(x))\n x = self.bn4(x)\n x = self.relu(x)\n x = self.deconv(x)\n x = self.relu(x)\n x = self.conv5(x)\n x = self.sigmoid(x)\n\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef compute_rpn_class_loss(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n\n # Squeeze last dim to simplify\n rpn_match = rpn_match.squeeze(2)\n\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = (rpn_match == 1).long()\n\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = torch.nonzero(rpn_match != 0)\n\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = rpn_class_logits[indices.data[:,0],indices.data[:,1],:]\n anchor_class = anchor_class[indices.data[:,0],indices.data[:,1]]\n\n # Crossentropy loss\n loss = F.cross_entropy(rpn_class_logits, anchor_class)\n\n return loss\n\ndef compute_rpn_bbox_loss(target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n\n # Squeeze last dim to simplify\n rpn_match = rpn_match.squeeze(2)\n\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n indices = torch.nonzero(rpn_match==1)\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = rpn_bbox[indices.data[:,0],indices.data[:,1]]\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n target_bbox = target_bbox[0,:rpn_bbox.size()[0],:]\n\n # Smooth L1 loss\n loss = F.smooth_l1_loss(rpn_bbox, target_bbox)\n\n return loss\n\n\ndef compute_mrcnn_class_loss(target_class_ids, pred_class_logits):\n \"\"\"Loss for the classifier head of Mask RCNN.\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n \"\"\"\n\n # Loss\n if target_class_ids.size():\n loss = F.cross_entropy(pred_class_logits,target_class_ids.long())\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\n\ndef compute_mrcnn_bbox_loss(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n\n if target_class_ids.size():\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indicies.\n positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long()\n indices = torch.stack((positive_roi_ix,positive_roi_class_ids), dim=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = target_bbox[indices[:,0].data,:]\n pred_bbox = pred_bbox[indices[:,0].data,indices[:,1].data,:]\n\n # Smooth L1 loss\n loss = F.smooth_l1_loss(pred_bbox, target_bbox)\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\n\ndef compute_mrcnn_mask_loss(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n if target_class_ids.size():\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]\n positive_class_ids = target_class_ids[positive_ix.data].long()\n indices = torch.stack((positive_ix, positive_class_ids), dim=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = target_masks[indices[:,0].data,:,:]\n y_pred = pred_masks[indices[:,0].data,indices[:,1].data,:,:]\n\n # Binary cross entropy\n loss = F.binary_cross_entropy(y_pred, y_true)\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\ndef compute_losses(rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask):\n\n rpn_class_loss = compute_rpn_class_loss(rpn_match, rpn_class_logits)\n rpn_bbox_loss = compute_rpn_bbox_loss(rpn_bbox, rpn_match, rpn_pred_bbox)\n mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)\n mrcnn_bbox_loss = compute_mrcnn_bbox_loss(target_deltas, target_class_ids, mrcnn_bbox)\n mrcnn_mask_loss = compute_mrcnn_mask_loss(target_mask, target_class_ids, mrcnn_mask)\n\n return [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss]\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n augment: If true, apply random image augmentation. Currently, only\n horizontal flipping is offered.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n shape = image.shape\n image, window, scale, padding = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n max_dim=config.IMAGE_MAX_DIM,\n padding=config.IMAGE_PADDING)\n mask = utils.resize_mask(mask, scale, padding)\n\n # Random horizontal flips.\n if augment:\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, shape, window, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinment() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, dataset, config, augment=True):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: If True, applies image augmentation to images (currently only\n horizontal flips are supported)\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The containtes\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_metas: [batch, size of image meta]\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n self.b = 0 # batch item index\n self.image_index = -1\n self.image_ids = np.copy(dataset.image_ids)\n self.error_count = 0\n\n self.dataset = dataset\n self.config = config\n self.augment = augment\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n def __getitem__(self, image_index):\n # Get GT bounding boxes and masks for image.\n image_id = self.image_ids[image_index]\n image, image_metas, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(self.dataset, self.config, image_id, augment=self.augment,\n use_mini_mask=self.config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n return None\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,\n gt_class_ids, gt_boxes, self.config)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n rpn_match = rpn_match[:, np.newaxis]\n images = mold_image(image.astype(np.float32), self.config)\n\n # Convert\n images = torch.from_numpy(images.transpose(2, 0, 1)).float()\n image_metas = torch.from_numpy(image_metas)\n rpn_match = torch.from_numpy(rpn_match)\n rpn_bbox = torch.from_numpy(rpn_bbox).float()\n gt_class_ids = torch.from_numpy(gt_class_ids)\n gt_boxes = torch.from_numpy(gt_boxes).float()\n gt_masks = torch.from_numpy(gt_masks.astype(int).transpose(2, 0, 1)).float()\n\n return images, image_metas, rpn_match, rpn_bbox, gt_class_ids, gt_boxes, gt_masks\n\n def __len__(self):\n return self.image_ids.shape[0]\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN(nn.Module):\n \"\"\"Encapsulates the Mask RCNN model functionality.\n \"\"\"\n\n def __init__(self, config, model_dir):\n \"\"\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n super(MaskRCNN, self).__init__()\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.build(config=config)\n self.initialize_weights()\n self.loss_history = []\n self.val_loss_history = []\n\n def build(self, config):\n \"\"\"Build Mask R-CNN architecture.\n \"\"\"\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n resnet = ResNet(\"resnet101\", stage5=True)\n C1, C2, C3, C4, C5 = resnet.stages()\n\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n self.fpn = FPN(C1, C2, C3, C4, C5, out_channels=256)\n\n # Generate Anchors\n self.anchors = Variable(torch.from_numpy(utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n self.anchors = self.anchors.cuda()\n\n # RPN\n self.rpn = RPN(len(config.RPN_ANCHOR_RATIOS), config.RPN_ANCHOR_STRIDE, 256)\n\n # FPN Classifier\n self.classifier = Classifier(256, config.POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES)\n\n # FPN Mask\n self.mask = Mask(256, config.MASK_POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES)\n\n # Fix batch norm layers\n def set_bn_fix(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n for p in m.parameters(): p.requires_grad = False\n\n self.apply(set_bn_fix)\n\n def initialize_weights(self):\n \"\"\"Initialize model weights.\n \"\"\"\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def set_trainable(self, layer_regex, model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n\n for param in self.named_parameters():\n layer_name = param[0]\n trainable = bool(re.fullmatch(layer_regex, layer_name))\n if not trainable:\n param[1].requires_grad = False\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_\\w+(\\d{4})\\.pth\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n self.epoch = int(m.group(6))\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.pth\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{:04d}\")\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n log_dir: The directory where events and weights are saved\n checkpoint_path: the path to the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint\n\n def load_weights(self, filepath):\n \"\"\"Modified version of the correspoding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exlude: list of layer names to excluce\n \"\"\"\n if os.path.exists(filepath):\n state_dict = torch.load(filepath)\n self.load_state_dict(state_dict, strict=False)\n else:\n print(\"Weight file not found ...\")\n\n # Update the log directory\n self.set_log_dir(filepath)\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n def detect(self, images):\n \"\"\"Runs the detection pipeline.\n images: List of images, potentially of different sizes.\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Convert images to torch tensor\n molded_images = torch.from_numpy(molded_images.transpose(0, 3, 1, 2)).float()\n\n # To GPU\n if self.config.GPU_COUNT:\n molded_images = molded_images.cuda()\n\n # Wrap in variable\n molded_images = Variable(molded_images)\n\n # Run object detection\n detections, mrcnn_mask = self.predict([molded_images, image_metas], mode='inference')\n\n # Convert to numpy\n detections = detections.data.cpu().numpy()\n mrcnn_mask = mrcnn_mask.permute(0, 1, 3, 4, 2).data.cpu().numpy()\n\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def predict(self, input, mode):\n molded_images = input[0]\n image_metas = input[1]\n\n if mode == 'inference':\n self.eval()\n elif mode == 'training':\n self.train()\n\n # Set batchnorm always in eval mode during training\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.eval()\n\n self.apply(set_bn_eval)\n\n # Feature extraction\n [p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]\n mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]\n\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(self.rpn(p))\n\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n outputs = list(zip(*layer_outputs))\n outputs = [torch.cat(list(o), dim=1) for o in outputs]\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = self.config.POST_NMS_ROIS_TRAINING if mode == \"training\" \\\n else self.config.POST_NMS_ROIS_INFERENCE\n rpn_rois = proposal_layer([rpn_class, rpn_bbox],\n proposal_count=proposal_count,\n nms_threshold=self.config.RPN_NMS_THRESHOLD,\n anchors=self.anchors,\n config=self.config)\n\n if mode == 'inference':\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox = self.classifier(mrcnn_feature_maps, rpn_rois)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates\n detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, image_metas)\n\n # Convert boxes to normalized coordinates\n # TODO: let DetectionLayer return normalized coordinates to avoid\n # unnecessary conversions\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n detection_boxes = detections[:, :4] / scale\n\n # Add back batch dimension\n detection_boxes = detection_boxes.unsqueeze(0)\n\n # Create masks for detections\n mrcnn_mask = self.mask(mrcnn_feature_maps, detection_boxes)\n\n # Add back batch dimension\n detections = detections.unsqueeze(0)\n mrcnn_mask = mrcnn_mask.unsqueeze(0)\n\n return [detections, mrcnn_mask]\n\n elif mode == 'training':\n\n gt_class_ids = input[2]\n gt_boxes = input[3]\n gt_masks = input[4]\n\n # Normalize coordinates\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n gt_boxes = gt_boxes / scale\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_deltas, target_mask = \\\n detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)\n\n if not rois.size():\n mrcnn_class_logits = Variable(torch.FloatTensor())\n mrcnn_class = Variable(torch.IntTensor())\n mrcnn_bbox = Variable(torch.FloatTensor())\n mrcnn_mask = Variable(torch.FloatTensor())\n if self.config.GPU_COUNT:\n mrcnn_class_logits = mrcnn_class_logits.cuda()\n mrcnn_class = mrcnn_class.cuda()\n mrcnn_bbox = mrcnn_bbox.cuda()\n mrcnn_mask = mrcnn_mask.cuda()\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox = self.classifier(mrcnn_feature_maps, rois)\n\n # Create masks for detections\n mrcnn_mask = self.mask(mrcnn_feature_maps, rois)\n\n return [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask]\n\n def train_model(self, train_dataset, val_dataset, learning_rate, epochs, layers):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heaads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n \"\"\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(fpn.P5\\_.*)|(fpn.P4\\_.*)|(fpn.P3\\_.*)|(fpn.P2\\_.*)|(rpn.*)|(classifier.*)|(mask.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\\_.*)|(fpn.P4\\_.*)|(fpn.P3\\_.*)|(fpn.P2\\_.*)|(rpn.*)|(classifier.*)|(mask.*)\",\n \"4+\": r\"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\\_.*)|(fpn.P4\\_.*)|(fpn.P3\\_.*)|(fpn.P2\\_.*)|(rpn.*)|(classifier.*)|(mask.*)\",\n \"5+\": r\"(fpn.C5.*)|(fpn.P5\\_.*)|(fpn.P4\\_.*)|(fpn.P3\\_.*)|(fpn.P2\\_.*)|(rpn.*)|(classifier.*)|(mask.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_set = Dataset(train_dataset, self.config, augment=True)\n train_generator = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=4)\n val_set = Dataset(val_dataset, self.config, augment=True)\n val_generator = torch.utils.data.DataLoader(val_set, batch_size=1, shuffle=True, num_workers=4)\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch+1, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n\n # Optimizer object\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n trainables_wo_bn = [param for name, param in self.named_parameters() if param.requires_grad and not 'bn' in name]\n trainables_only_bn = [param for name, param in self.named_parameters() if param.requires_grad and 'bn' in name]\n optimizer = optim.SGD([\n {'params': trainables_wo_bn, 'weight_decay': self.config.WEIGHT_DECAY},\n {'params': trainables_only_bn}\n ], lr=learning_rate, momentum=self.config.LEARNING_MOMENTUM)\n\n for epoch in range(self.epoch+1, epochs+1):\n log(\"Epoch {}/{}.\".format(epoch,epochs))\n\n # Training\n loss, loss_rpn_class, loss_rpn_bbox, loss_mrcnn_class, loss_mrcnn_bbox, loss_mrcnn_mask = self.train_epoch(train_generator, optimizer, self.config.STEPS_PER_EPOCH)\n\n # Validation\n val_loss, val_loss_rpn_class, val_loss_rpn_bbox, val_loss_mrcnn_class, val_loss_mrcnn_bbox, val_loss_mrcnn_mask = self.valid_epoch(val_generator, self.config.VALIDATION_STEPS)\n\n # Statistics\n self.loss_history.append([loss, loss_rpn_class, loss_rpn_bbox, loss_mrcnn_class, loss_mrcnn_bbox, loss_mrcnn_mask])\n self.val_loss_history.append([val_loss, val_loss_rpn_class, val_loss_rpn_bbox, val_loss_mrcnn_class, val_loss_mrcnn_bbox, val_loss_mrcnn_mask])\n visualize.plot_loss(self.loss_history, self.val_loss_history, save=True, log_dir=self.log_dir)\n\n # Save model\n torch.save(self.state_dict(), self.checkpoint_path.format(epoch))\n\n self.epoch = epochs\n\n\n\n def train_epoch(self, datagenerator, optimizer, steps):\n batch_count = 0\n loss_sum = 0\n loss_rpn_class_sum = 0\n loss_rpn_bbox_sum = 0\n loss_mrcnn_class_sum = 0\n loss_mrcnn_bbox_sum = 0\n loss_mrcnn_mask_sum = 0\n step = 0\n\n optimizer.zero_grad()\n\n for inputs in datagenerator:\n batch_count += 1\n\n images = inputs[0]\n image_metas = inputs[1]\n rpn_match = inputs[2]\n rpn_bbox = inputs[3]\n gt_class_ids = inputs[4]\n gt_boxes = inputs[5]\n gt_masks = inputs[6]\n\n # image_metas as numpy array\n image_metas = image_metas.numpy()\n\n # Wrap in variables\n images = Variable(images)\n rpn_match = Variable(rpn_match)\n rpn_bbox = Variable(rpn_bbox)\n gt_class_ids = Variable(gt_class_ids)\n gt_boxes = Variable(gt_boxes)\n gt_masks = Variable(gt_masks)\n\n # To GPU\n if self.config.GPU_COUNT:\n images = images.cuda()\n rpn_match = rpn_match.cuda()\n rpn_bbox = rpn_bbox.cuda()\n gt_class_ids = gt_class_ids.cuda()\n gt_boxes = gt_boxes.cuda()\n gt_masks = gt_masks.cuda()\n\n # Run object detection\n rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask = \\\n self.predict([images, image_metas, gt_class_ids, gt_boxes, gt_masks], mode='training')\n\n # Compute losses\n rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss = compute_losses(rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask)\n loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss\n\n # Backpropagation\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.parameters(), 5.0)\n if (batch_count % self.config.BATCH_SIZE) == 0:\n optimizer.step()\n optimizer.zero_grad()\n batch_count = 0\n\n # Progress\n printProgressBar(step + 1, steps, prefix=\"\\t{}/{}\".format(step + 1, steps),\n suffix=\"Complete - loss: {:.5f} - rpn_class_loss: {:.5f} - rpn_bbox_loss: {:.5f} - mrcnn_class_loss: {:.5f} - mrcnn_bbox_loss: {:.5f} - mrcnn_mask_loss: {:.5f}\".format(\n loss.data.cpu().item(), rpn_class_loss.data.cpu().item(), rpn_bbox_loss.data.cpu().item(),\n mrcnn_class_loss.data.cpu().item(), mrcnn_bbox_loss.data.cpu().item(),\n mrcnn_mask_loss.data.cpu().item()), length=10)\n\n # Statistics\n loss_sum += loss.data.cpu().item()/steps\n loss_rpn_class_sum += rpn_class_loss.data.cpu().item()/steps\n loss_rpn_bbox_sum += rpn_bbox_loss.data.cpu().item()/steps\n loss_mrcnn_class_sum += mrcnn_class_loss.data.cpu().item()/steps\n loss_mrcnn_bbox_sum += mrcnn_bbox_loss.data.cpu().item()/steps\n loss_mrcnn_mask_sum += mrcnn_mask_loss.data.cpu().item()/steps\n\n # Break after 'steps' steps\n if step==steps-1:\n break\n step += 1\n\n return loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum\n\n def valid_epoch(self, datagenerator, steps):\n\n step = 0\n loss_sum = 0\n loss_rpn_class_sum = 0\n loss_rpn_bbox_sum = 0\n loss_mrcnn_class_sum = 0\n loss_mrcnn_bbox_sum = 0\n loss_mrcnn_mask_sum = 0\n\n for inputs in datagenerator:\n images = inputs[0]\n image_metas = inputs[1]\n rpn_match = inputs[2]\n rpn_bbox = inputs[3]\n gt_class_ids = inputs[4]\n gt_boxes = inputs[5]\n gt_masks = inputs[6]\n\n # image_metas as numpy array\n image_metas = image_metas.numpy()\n\n # Wrap in variables\n images = Variable(images, volatile=True)\n rpn_match = Variable(rpn_match, volatile=True)\n rpn_bbox = Variable(rpn_bbox, volatile=True)\n gt_class_ids = Variable(gt_class_ids, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n gt_masks = Variable(gt_masks, volatile=True)\n\n # To GPU\n if self.config.GPU_COUNT:\n images = images.cuda()\n rpn_match = rpn_match.cuda()\n rpn_bbox = rpn_bbox.cuda()\n gt_class_ids = gt_class_ids.cuda()\n gt_boxes = gt_boxes.cuda()\n gt_masks = gt_masks.cuda()\n\n # Run object detection\n rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask = \\\n self.predict([images, image_metas, gt_class_ids, gt_boxes, gt_masks], mode='training')\n\n if not target_class_ids.size():\n continue\n\n # Compute losses\n rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss = compute_losses(rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask)\n loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss\n\n # Progress\n printProgressBar(step + 1, steps, prefix=\"\\t{}/{}\".format(step + 1, steps),\n suffix=\"Complete - loss: {:.5f} - rpn_class_loss: {:.5f} - rpn_bbox_loss: {:.5f} - mrcnn_class_loss: {:.5f} - mrcnn_bbox_loss: {:.5f} - mrcnn_mask_loss: {:.5f}\".format(\n loss.data.cpu()[0], rpn_class_loss.data.cpu()[0], rpn_bbox_loss.data.cpu()[0],\n mrcnn_class_loss.data.cpu()[0], mrcnn_bbox_loss.data.cpu()[0],\n mrcnn_mask_loss.data.cpu()[0]), length=10)\n\n # Statistics\n loss_sum += loss.data.cpu()[0]/steps\n loss_rpn_class_sum += rpn_class_loss.data.cpu()[0]/steps\n loss_rpn_bbox_sum += rpn_bbox_loss.data.cpu()[0]/steps\n loss_mrcnn_class_sum += mrcnn_class_loss.data.cpu()[0]/steps\n loss_mrcnn_bbox_sum += mrcnn_bbox_loss.data.cpu()[0]/steps\n loss_mrcnn_mask_sum += mrcnn_mask_loss.data.cpu()[0]/steps\n\n # Break after 'steps' steps\n if step==steps-1:\n break\n step += 1\n\n return loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum\n\n\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matricies [height,width,depth]. Images can have\n different sizes.\n Returns 3 Numpy matricies:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image to fit the model expected size\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n max_dim=self.config.IMAGE_MAX_DIM,\n padding=self.config.IMAGE_PADDING)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, window,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n detections: [N, (y1, x1, y2, x2, class_id, score)]\n mrcnn_mask: [N, height, width, num_classes]\n image_shape: [height, width, depth] Original size of the image before resizing\n window: [y1, x1, y2, x2] Box in the image where the real image is\n excluding the padding.\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Compute scale and shift to translate coordinates to image domain.\n h_scale = image_shape[0] / (window[2] - window[0])\n w_scale = image_shape[1] / (window[3] - window[1])\n scale = min(h_scale, w_scale)\n shift = window[:2] # y, x\n scales = np.array([scale, scale, scale, scale])\n shifts = np.array([shift[0], shift[1], shift[0], shift[1]])\n\n # Translate bounding boxes to image domain\n boxes = np.multiply(boxes - shifts, scales).astype(np.int32)\n\n # Filter out detections with zero area. Often only happens in early\n # stages of training when the network weights are still a bit random.\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty((0,) + masks.shape[1:3])\n\n return boxes, class_ids, scores, full_masks\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, image_shape, window, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array. Use\n parse_image_meta() to parse the values back.\n image_id: An int ID of the image. Useful for debugging.\n image_shape: [height, width, channels]\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\n# Two functions (for Numpy and TF) to parse image_meta tensors.\ndef parse_image_meta(meta):\n \"\"\"Parses an image info Numpy array to its components.\n See compose_image_meta() for more details.\n \"\"\"\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n \"\"\"\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]\n\n\ndef mold_image(images, config):\n \"\"\"Takes RGB images with 0-255 values and subtraces\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n"
] | [
[
"torch.nn.functional.upsample",
"torch.nn.Softmax",
"numpy.amax",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.init.xavier_uniform_",
"torch.FloatTensor",
"numpy.any",
"torch.nn.functional.smooth_l1_loss",
"numpy.where",
"torch.autograd.Variable",
"numpy.fliplr",
"torch.sqrt",
"torch.round",
"numpy.arange",
"numpy.reshape",
"torch.from_numpy",
"numpy.stack",
"torch.nn.Sigmoid",
"numpy.copy",
"numpy.argmax",
"torch.sort",
"torch.nonzero",
"torch.optim.SGD",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.nn.Sequential",
"numpy.log",
"torch.nn.ConvTranspose2d",
"numpy.random.choice",
"numpy.multiply",
"torch.min",
"torch.nn.Conv2d",
"torch.IntTensor",
"torch.exp",
"torch.nn.Linear",
"numpy.delete",
"torch.log",
"torch.nn.BatchNorm2d",
"torch.stack",
"numpy.array",
"numpy.sum",
"torch.ByteTensor",
"torch.nn.functional.cross_entropy",
"numpy.ones",
"torch.nn.MaxPool2d",
"torch.nn.functional.binary_cross_entropy",
"torch.nn.modules.utils._pair",
"torch.nn.ReLU",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
htwangtw/depersonalisation | [
"2631c9fd6fd64f9d680b292d317f991f6a52b85f"
] | [
"data/code/src/subject_trialHRV.py"
] | [
"import glob, os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom scipy.stats import zscore\nfrom scipy import interpolate, signal\n\nfrom tftb.processing import smoothed_pseudo_wigner_ville as spwvd\n\n# misc\nimport warnings\n\nhome = str(Path.home())\np = Path(home + \"/projects/critchley_depersonalisation\")\nparticipants = pd.read_csv(p / \"data\" / \"participants.tsv\", sep=\"\\t\")\nID_list = participants.query(\"task_heartbeat_trialHRV > 0\").participant_id.tolist()\n\nspike_fs = 1010\n\n\ndef calculate_ibi(peaks, frequency=100):\n \"\"\"\n peak: a list of binary events\n list length == recording time\n \"\"\"\n t = np.arange(0, len(peaks)) / frequency\n p_time = t[peaks == 1]\n ibi = np.diff(p_time)\n return ibi\n\n\nfrequency_bands = {\n \"vlf\": [\"Very low frequency\", (0.003, 0.04), \"b\"],\n \"lf\": [\"Low frequency\", (0.04, 0.15), \"g\"],\n \"hf\": [\"High frequency\", (0.15, 0.4), \"r\"],\n}\n\nfor subject in ID_list[0:1]:\n print(subject)\n event_path = (\n p / \"data\" / subject / \"func\" / f\"{subject}_task-heartbeat_run-1_events.tsv\"\n )\n physio_path = (\n p / \"data\" / subject / \"func\" / f\"{subject}_task-heartbeat_run-1_physio.tsv.gz\"\n )\n df = pd.read_csv(event_path, sep=\"\\t\")\n df_physio = pd.read_csv(physio_path, sep=\"\\t\", compression=\"gzip\")\n\n # trial trigger time\n total_trials = df.shape[0]\n total_sync = df_physio[\"stim\"].sum()\n\n # assert total_sync == (total_trials + 1), \"{}: weird num of trials\".format(subject)\n time = np.array(df_physio.index.tolist()) / spike_fs # unit in second\n bin_stim = df_physio[\"stim\"].values.astype(\"bool\")\n\n # calculate IBI of the whole serie\n full_ibi = calculate_ibi(df_physio.cardiac_event.values, frequency=spike_fs)\n ibi_timestamp = np.cumsum(full_ibi)\n\n # detect outlier (>2.5 sd) and repalce with nan\n keep_idx = zscore(full_ibi) < 2.5\n\n # interpolate nan\n f = interpolate.interp1d(\n ibi_timestamp[keep_idx], full_ibi[keep_idx], \"cubic\", fill_value=\"extrapolate\"\n )\n full_ibi_inter = f(ibi_timestamp)\n\n # resample rr interval to 4 hz\n fs = 4\n time = np.cumsum(full_ibi_inter)\n f = interpolate.interp1d(time, full_ibi_inter, \"cubic\")\n t = np.arange(time[0], time[-1], 1 / fs)\n rr = f(t)\n rr -= rr.mean() # detrend\n\n # power spectrum density spwvd\n nfft = 1\n while nfft < nperseg:\n nfft *= 2\n freq = fs / 2 * np.linspace(0, 1, nfft / 4)\n twin = 4\n fwin = 7\n twindow = signal.hamming(2 ** twin + 1)\n fwindow = signal.hamming(2 ** fwin + 1)\n tfr = spwvd(rr, t, int(nfft / 4), twindow, fwindow)\n psd = tfr ** 2\n\n # Detrend the first 10s to avoid the edge effect\n detrend_idx = np.where(t > 10)[0][0]\n psd[:, :detrend_idx] = 0\n\n # extract relevant frequency band\n for f in [\"lf\", \"hf\"]:\n lb = frequency_bands[f][1][0]\n ub = frequency_bands[f][1][1]\n idx_freq = np.logical_and(freq >= lb, freq < ub)\n amptitude = np.trapz(y=psd[idx_freq, :], dx=np.diff(freq)[0], axis=0)\n plt.plot(t, amptitude)\n\n hrv_stats = pd.DataFrame(\n None,\n columns=[\"lf_power\", \"hf_power\", \"rmssd\", \"n_peak\", \"bpm\", \"qc\"],\n index=range(0, total_sync - 1),\n )\n\n for i in range(1, total_sync): # the first on set was the 6th volume of the scanner\n\n t_start = time[bin_stim][i]\n # The behavioural spreadsheet starts from the first behavioural trial\n df_idx = i - 1\n dur = df.loc[df_idx, \"duration\"]\n t_end = t_start + dur\n\n # Create a window between start and end of heart monitoring in ibi\n ibi_start = np.where(ibi_timestamp > t_start)[0][0]\n ibi_end = np.where(ibi_timestamp < t_end)[0][-1]\n\n ibi = full_ibi_inter[ibi_start : ibi_end + 1]\n n_peak = len(ibi)\n rmssd = np.mean(np.diff(ibi * 1000) ** 2) ** 0.5 # HRV in milliseconds\n bpm = n_peak / dur * 60\n\n hrv_stats.loc[df_idx, \"rmssd\"] = rmssd\n hrv_stats.loc[df_idx, \"n_peak\"] = n_peak\n hrv_stats.loc[df_idx, \"bpm\"] = bpm\n if rmssd > 270: # flag unusual trials\n flag = 1\n elif np.isnan(rmssd):\n flag = 1\n else:\n flag = 0\n hrv_stats.loc[df_idx, \"qc\"] = flag\n\n # power spectrum measure\n hrv_stats.loc[df_idx, \"lf_amplitude\"] = lf\n hrv_stats.loc[df_idx, \"hf_amplitude\"] = hf\n\n # impute flagged trials with median\n if hrv_stats[\"qc\"].sum() > 0:\n val = hrv_stats.loc[:, \"lf_power\":\"bpm\"].values.astype(float)\n lst_qc = hrv_stats.qc.tolist()\n val[np.array(lst_qc) == 1, :] = np.nan\n median = np.nanmedian(val, axis=0)\n val[np.array(lst_qc) == 1, :] = median\n hrv_stats.loc[:, \"lf_power\":\"bpm\"] = val\n\n print(\"{} unusual trials: {}\".format(sum(lst_qc), subject))\n with open(\"./bad_quality_spike_csv.txt\", \"a\") as f:\n f.write(subject + \"\\n\")\n hrv_stats = pd.concat([df, hrv_stats], axis=1)\n hrv_stats.to_csv(\n p\n / \"scratch\"\n / \"trial_HRV\"\n / f\"{subject}_task-heartbeat_run-1_desc-HRV_events.tsv\",\n sep=\"\\t\",\n )\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.nanmedian",
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"scipy.stats.zscore",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"scipy.interpolate.interp1d",
"numpy.diff",
"scipy.signal.hamming",
"numpy.array",
"numpy.logical_and",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
InformationMaximisingNeuralNetworks/imnn | [
"2eb04d9dc1acc4e8d1e60ef0bb25dfac17bd9f81"
] | [
"imnn/imnn/dataset_gradient_imnn_test.py"
] | [
"import pytest\nimport jax\nimport jaxlib\nimport jax.numpy as np\nimport tensorflow as tf\nfrom test.aggregated_defaults import aggregatedTests\nfrom test.gradient_defaults import gradientTests\nfrom imnn.imnn import DatasetGradientIMNN\n\n\nclass datasetGradientTests(\n aggregatedTests, gradientTests):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.kwargs.pop(\"fiducial\")\n self.kwargs.pop(\"derivative\")\n self.kwargs.pop(\"validation_fiducial\")\n self.kwargs.pop(\"validation_derivative\")\n self.reduced_kwargs.pop(\"fiducial\")\n self.reduced_kwargs.pop(\"derivative\")\n self.reduced_kwargs.pop(\"validation_fiducial\")\n self.reduced_kwargs.pop(\"validation_derivative\")\n\n self.kwargs[\"main\"] = (\n self.fiducial[:self.n_d].reshape(\n (self.n_devices,\n self.n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape),\n self.derivative.reshape(\n (self.n_devices,\n self.n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape + (self.n_params,)))\n\n self.kwargs[\"validation_main\"] = (\n self.validation_fiducial[:self.n_d].reshape(\n (self.n_devices,\n self.n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape),\n self.validation_derivative.reshape(\n (self.n_devices,\n self.n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape + (self.n_params,)))\n\n self.kwargs[\"remaining\"] = self.fiducial[self.n_d:].reshape(\n (self.n_devices,\n (self.n_s - self.n_d) // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape)\n\n self.kwargs[\"validation_remaining\"] = \\\n self.validation_fiducial[self.n_d:].reshape(\n (self.n_devices,\n (self.n_s - self.n_d) // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape)\n\n self.reduced_kwargs[\"main\"] = (\n self.fiducial[:self.reduced_n_d].reshape(\n (self.n_devices,\n self.reduced_n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape),\n self.reduced_derivative.reshape(\n (self.n_devices,\n self.reduced_n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape + (self.n_params,)))\n\n self.reduced_kwargs[\"validation_main\"] = (\n self.validation_fiducial[:self.reduced_n_d].reshape(\n (self.n_devices,\n self.reduced_n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape),\n self.reduced_validation_derivative.reshape(\n (self.n_devices,\n self.reduced_n_d // (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape + (self.n_params,)))\n\n self.reduced_kwargs[\"remaining\"] = \\\n self.fiducial[self.reduced_n_d:].reshape(\n (self.n_devices,\n (self.n_s - self.reduced_n_d) //\n (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape)\n\n self.reduced_kwargs[\"validation_remaining\"] = \\\n self.validation_fiducial[self.reduced_n_d:].reshape(\n (self.n_devices,\n (self.n_s - self.reduced_n_d) //\n (self.n_devices * self.n_per_device),\n self.n_per_device)\n + self.input_shape)\n\n self.arrays = [\"θ_fid\"]\n\n def preload(self, dictionary, state=False, validate=False):\n dictionary[\"host\"] = jax.devices(\"cpu\")[0]\n dictionary[\"devices\"] = jax.devices()\n dictionary[\"main\"] = [\n tf.data.Dataset.zip(\n (tf.data.Dataset.from_tensor_slices(fid),\n tf.data.Dataset.from_tensor_slices(der))\n ).repeat().as_numpy_iterator()\n for fid, der in zip(*dictionary[\"main\"])]\n dictionary[\"remaining\"] = [\n tf.data.Dataset.from_tensor_slices(\n fid).repeat().as_numpy_iterator()\n for fid in dictionary[\"remaining\"]]\n if state:\n dictionary[\"key_or_state\"] = self.state\n if (not self.simulate) and (not validate):\n dictionary.pop(\"validation_main\")\n dictionary.pop(\"validation_remaining\")\n else:\n dictionary[\"validation_main\"] = [\n tf.data.Dataset.zip(\n (tf.data.Dataset.from_tensor_slices(fid),\n tf.data.Dataset.from_tensor_slices(der))\n ).repeat().as_numpy_iterator()\n for fid, der in zip(*dictionary[\"validation_main\"])]\n dictionary[\"validation_remaining\"] = [\n tf.data.Dataset.from_tensor_slices(\n fid).repeat().as_numpy_iterator()\n for fid in dictionary[\"validation_remaining\"]]\n\n return dictionary\n\n def specific_exceptions(self, variable, input_variable, kwargs):\n if variable == \"validation_main\":\n if \"validation_main\" not in kwargs.keys():\n return True\n if variable == \"validation_remaining\":\n if \"validation_remaining\" not in kwargs.keys():\n return True\n if variable == \"devices\":\n if input_variable is list():\n if len(input_variable) < 1:\n kwargs[variable] = input_variable\n with pytest.raises(ValueError) as info:\n self.imnn(**kwargs)\n assert info.match(\"`devices` has no elements in\")\n return True\n if not all(\n [isinstance(device, jaxlib.xla_extension.Device)\n for device in input_variable]):\n kwargs[variable] = input_variable\n with pytest.raises(TypeError) as info:\n self.imnn(**kwargs)\n assert info.match(\n \"`all elements of `devices` must be xla devices\")\n return True\n if variable == \"host\":\n if input_variable is None:\n return False\n if not isinstance(input_variable, jaxlib.xla_extension.Device):\n kwargs[variable] = input_variable\n with pytest.raises(TypeError) as info:\n self.imnn(**kwargs)\n assert info.match(\n \"`host` must be an xla device but is a \"\n f\"{type(input_variable)}\")\n return True\n return False\n\n def splitting_resize(self, _kwargs):\n _kwargs[\"main\"] = None\n _kwargs[\"remaining\"] = None\n if \"validation_main\" in _kwargs.keys():\n _kwargs[\"validation_main\"] = None\n _kwargs[\"validation_remaining\"] = None\n return _kwargs\n\n\ntest = datasetGradientTests(\n imnn=DatasetGradientIMNN,\n filename=\"dataset_gradient\",\n n_per_device=100)\n\n\[email protected](\"kwargs\", [test.kwargs, test.reduced_kwargs])\[email protected](\"state\", [True, False])\[email protected](\"validate\", [True, False])\[email protected](\n \"input_variable\",\n [None, list(), 1., 1, np.zeros((1,)), test.rng, tuple(), (0, 0),\n (test.model[0], 0), test.bad_model, test.state])\[email protected](\"variable\", test.kwargs.keys())\ndef test_initialisation_parameters_(\n variable, kwargs, input_variable, state, validate):\n test.initialise_parameters(\n variable, kwargs, input_variable, state=state, validate=validate)\n\n\[email protected](\"validate\", [True, False])\[email protected](\"state\", [False, True])\[email protected](\"variable\", [\"n_s\", \"n_d\", \"same\"])\ndef test_splitting_(variable, validate, state):\n test.splitting(variable, test.kwargs, state=state, validate=validate)\n\n\[email protected](\"kwargs\", [test.kwargs, test.reduced_kwargs])\[email protected](\"state\", [True, False])\[email protected](\"validate\", [True, False])\[email protected](\n \"input_variable\", [None, list(), 1., 1, np.zeros((1,)), test.rng])\[email protected](\"variable\", test.fit_kwargs.keys())\ndef test_fit_parameters_(variable, kwargs, input_variable, state, validate):\n test.fit_parameters(\n variable, kwargs, test.fit_kwargs, input_variable, state=state,\n validate=validate)\n\n\[email protected](\"state\", [True, False])\[email protected](\"validate\", [True, False])\[email protected](\"fit\", [True, False])\[email protected](\"none_first\", [True, False])\[email protected](\"kwargs\", [test.kwargs, test.reduced_kwargs])\ndef test_combined_running_test_(kwargs, state, validate, fit, none_first):\n test.combined_running_test(\n [test.single_target_data, test.batch_target_data], kwargs,\n test.fit_kwargs, state=state, validate=validate, fit=fit,\n none_first=none_first, aggregated=True)\n"
] | [
[
"tensorflow.data.Dataset.from_tensor_slices"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.