repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
macdaliot/exist
[ "65244f79c602c5a00c3ea6a7eef512ce9c21e60a" ]
[ "scripts/insert2db/reputation/plugins/dshield_medium.py" ]
[ "import sys\nimport os\nimport configparser\nimport requests\nimport pandas as pd\nimport hashlib\nfrom io import StringIO\nfrom datetime import datetime, timezone\n\n## Django Setup\nimport django\nimport pymysql\npymysql.install_as_MySQLdb()\nconffile = os.path.join(os.path.dirname(__file__), \"../../conf/insert2db.conf\")\nconf = configparser.SafeConfigParser()\nconf.read(conffile)\nsys.path.append(conf.get('exist', 'syspath'))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')\ndjango.setup()\nfrom apps.reputation.models import blacklist\nimport django.utils.timezone as tzone\nfrom django.db import IntegrityError\n\n## Logger Setup\nfrom logging import getLogger, DEBUG, NullHandler\nlogger = getLogger(__name__)\nlogger.addHandler(NullHandler())\nlogger.setLevel(DEBUG)\nlogger.propagate = True\n\nDataDir = os.path.join(os.path.dirname(__file__), '../data/')\n\nclass Tracker():\n def __init__(self):\n self.name = 'Dshield_Medium'\n self.ID = 222\n self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'\n self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt'\n self.header = [\n 'domain',\n ]\n\n def cmpFiles(self, oldfile, newtext):\n diffline = ''\n if not os.path.exists(oldfile):\n f = open(oldfile, 'w')\n f.close()\n oldsets = set(open(oldfile).readlines())\n newsets = set(newtext.replace('\\r\\n','\\n').splitlines(True))\n results = newsets.difference(oldsets)\n for result in results:\n diffline += result\n return diffline[:-1]\n\n def delComment(self, s):\n result = ''\n for line in s.splitlines(True):\n if not line.startswith('#') \\\n and line != \"Site\\n\":\n result += line\n return result\n\n def makeDataframe(self):\n df = pd.DataFrame()\n newline = ''\n try:\n res = requests.get(self.URL)\n if res.status_code != 200:\n return df\n newline = self.cmpFiles(self.DataFilePath, res.text)\n newline = self.delComment(newline)\n except Exception as e:\n logger.error(e)\n if not newline == '':\n open(self.DataFilePath, 'w').write(res.text)\n df = pd.read_csv(StringIO(newline), names=self.header)\n return df\n\n def parse(self):\n logger.info(\"start parsing: %s\", self.name)\n\n df = self.makeDataframe()\n queries = []\n if not df.empty:\n for i, v in df.iterrows():\n line = str(self.ID) + \",\"\n line += str(v.values)\n md5 = hashlib.md5(line.encode('utf-8')).hexdigest()\n try:\n query = blacklist(\n id = md5,\n domain = v.domain,\n datetime = tzone.now(),\n source = self.ID,\n referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt',\n )\n except Exception as e:\n logger.error(\"%s: %s\", e, line)\n queries.append(query)\n else:\n logger.info(\"no update\")\n\n logger.info(\"done parsing: %s, %s queries were parsed\", self.name, len(queries))\n return queries\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
derekdylu/mgt2001
[ "b228d5e75e75a2f3f170e35db1bea999b765bec8" ]
[ "mgt2001/hyp/non.py" ]
[ "from matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nimport scipy.stats as stats\n\n\ndef inter_p_value(p_value):\n # interpretation\n if p_value >= 0 and p_value < 0.01:\n inter_p = 'Overwhelming Evidence'\n elif p_value >= 0.01 and p_value < 0.05:\n inter_p = 'Strong Evidence'\n elif p_value >= 0.05 and p_value < 0.1:\n inter_p = 'Weak Evidence'\n elif p_value >= .1:\n inter_p = 'No Evidence'\n return inter_p\n\n\ndef grank(data):\n if type(data) == np.ndarray or type(data) == list:\n alldata = data.copy()\n data = data.copy()\n else:\n alldata = data.values.copy()\n data = data.values.copy()\n alldata.sort()\n tmp_df = pd.DataFrame({'value': alldata})\n tmp_df['rank'] = tmp_df.index + 1\n value_to_rank = tmp_df.groupby('value').mean().reset_index()\n samp = pd.DataFrame({'value': data})\n samp = pd.merge(samp, value_to_rank, how='left')\n return samp['rank']\n\n\ndef ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05):\n \"\"\"\n df can only have two columns and df.shape[0] > 10\n alternative has three options: 'two-sided', 'less', 'greater'\n \"\"\"\n # sort all data points by values\n tmp_values = df.values.reshape(-1)\n tmp_values = tmp_values[~np.isnan(tmp_values)]\n tmp_values.sort()\n\n # assign ranks\n updated_df = pd.DataFrame({'value': tmp_values})\n updated_df['rank'] = updated_df.index + 1\n\n # average rank for identical value\n updated_df = updated_df.groupby('value').mean().reset_index()\n # display(updated_df)\n\n # Compute Sum of Ranks\n samp1 = pd.DataFrame({'value': df[to_compute].dropna().values})\n samp1 = pd.merge(samp1, updated_df)\n T = samp1['rank'].sum()\n\n # compute mean and standard deviation\n n1 = df.iloc[:, 0].dropna().shape[0]\n n2 = df.iloc[:, 1].dropna().shape[0]\n\n E_T = n1*(n1+n2+1)/2\n\n sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5\n z = (T-E_T)/sigmaT\n # compute p-value\n # right (greater)\n p_value = 1 - stats.norm.cdf(z)\n\n if alternative == 'greater':\n pass\n elif alternative == 'less':\n p_value = stats.norm.cdf(z)\n elif alternative == 'two-sided':\n # two-tail\n if p_value > 0.5:\n p_value = stats.norm.cdf(z)\n p_value *= 2\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= z-test =======\nT (sum of ranks) = {T}\n(n1, n2) = ({n1}, {n2})\nmu_t = {E_T}\nsigma_t = {sigmaT}\nz statistic value (observed) = {z:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n'''\n print(result)\n result_dict = {'T': T, 'ET': E_T,\n 'sigmaT': sigmaT, 'z': z, 'p-value': p_value}\n return updated_df, result_dict\n\n\ndef sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n n = diff.size - np.sum(diff == 0)\n\n if sign == '+':\n sign_count = np.sum(diff > 0)\n else:\n sign_count = np.sum(diff < 0)\n\n if alternative == 'greater' or alternative == 'less':\n # 如果超過一半就要切換\n if sign_count > n / 2:\n p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)\n else:\n p_value = stats.binom.cdf(sign_count, n=n, p=0.5)\n elif alternative == 'two-sided':\n p_value = stats.binom.cdf(sign_count, n=n, p=0.5)\n if p_value > 0.5:\n p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)\n\n p_value *= 2\n\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= Sign Test - Binomial Distribution =======\n(For small sample size (<= 10))\n\nTargeted Sign: {sign}\nn = {n}\nSign counts = {sign_count}\n\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n return sign_count, p_value\n\n\ndef sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n diff = diff[~(diff == 0)]\n n = len(diff)\n\n if sign == '+':\n T = np.sum(diff > 0)\n else:\n T = np.sum(diff < 0)\n z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5))\n # right tail\n if alternative == 'greater':\n p_value = 1 - stats.norm.cdf(z_stat)\n elif alternative == 'less':\n p_value = stats.norm.cdf(z_stat)\n elif alternative == 'two-sided':\n p_value = 1 - stats.norm.cdf(z_stat)\n if p_value > 0.5:\n p_value = stats.norm.cdf(z_stat)\n p_value *= 2\n flag = False\n if p_value < alpha:\n flag = True\n result = f'''======= Sign Test - z Statistic =======\n(For large sample size (> 10))\n\nTargeted Sign: {sign}\nn = {n}\nSign counts = {T}\n\nz statistic = {z_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n\n return T, p_value\n\n\ndef wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n\n diff = diff[~(diff == 0)]\n n = len(diff)\n\n diff_abs = np.sort(np.abs(diff).to_numpy())\n\n updated_diff = pd.DataFrame({'diff_abs': diff_abs})\n updated_diff['rank'] = updated_diff.index + 1\n updated_diff = updated_diff.groupby('diff_abs').mean().reset_index()\n\n new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)})\n new_df = pd.merge(new_df, updated_diff)\n\n if sign == '+':\n T = np.sum(new_df['rank'][new_df['diff'] > 0])\n else:\n T = np.sum(new_df['rank'][new_df['diff'] < 0])\n\n E_T = n * (n + 1) / 4\n sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5\n\n z_stat = (T - E_T) / sigma_T\n\n if alternative == 'greater':\n # right tail test\n p_value = 1 - stats.norm.cdf(z_stat)\n elif alternative == 'less':\n # left tail test\n p_value = stats.norm.cdf(z_stat)\n elif alternative == 'two-sided':\n # two-tailed test\n p_value = 1 - stats.norm.cdf(z_stat)\n if p_value > 0.5:\n p_value = stats.norm.cdf(z_stat)\n p_value *= 2\n\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic =======\n(For large sample size (> 30))\n\nTargeted Sign: {sign}\nn = {n}\nSum of rank (T statistic) = {T}\n\nmu_t = {E_T}\nsigma_t = {sigma_T}\n\nz statistic value (observed) = {z_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n\n result_dict = {'n': n, 'T': T, 'E_T': E_T,\n 'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value}\n\n return new_df, result_dict\n\n\ndef kruskal_chi2_test(data=None, alpha=0.05, precision=4):\n \"\"\"\n col = 要比較的 target\n row = data for each target\n \"\"\"\n if type(data) == pd.DataFrame:\n data = data.copy().to_numpy()\n alldata = np.concatenate(data.copy())\n else:\n alldata = np.concatenate(data.copy())\n\n k = data.shape[1]\n alldata.sort()\n\n tmp_df = pd.DataFrame(({'value': alldata}))\n tmp_df['rank'] = tmp_df.index + 1 # rank\n value_to_rank = tmp_df.groupby('value').mean().reset_index()\n T = []\n sample_rank_df = []\n for i in range(k):\n\n samp = pd.DataFrame(\n {'value': data[:, i][~np.isnan(data[:, i])]})\n\n samp = pd.merge(samp, value_to_rank)\n sample_rank_df.append(samp)\n T.append(samp['rank'].sum())\n\n n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]\n\n # print(T)\n # print(n)\n\n rule_of_five_str = \"\"\n if (np.sum(np.array(n) < 5) > 0):\n rule_of_five_str += \"!(At least one sample size is less than 5)\"\n else:\n rule_of_five_str += \"(All sample size >= 5)\"\n\n N = np.sum(n)\n\n t_over_n = 0\n\n for i in range(k):\n t_over_n += T[i] ** 2 / n[i]\n\n H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)\n p_value = 1 - stats.chi2.cdf(H, k - 1)\n chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)\n\n result_dict = {'H': H, 'p-value': p_value,\n 'T': T, 'sample_rank_df': sample_rank_df}\n flag = p_value < alpha\n\n result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======\n{rule_of_five_str}\n\nH statistic value (observed) = {H:.{precision}f}\nchi2 critical value = {chi2_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (Not all {k} population locations are the same) → {flag}\n '''\n print(result)\n return result_dict\n\n\ndef friedman_chi2_test(data=None, alpha=0.05, precision=4):\n \"\"\"\n col = 要比較的 target\n row = blocked data for each target\n \"\"\"\n if type(data) == np.ndarray:\n data = pd.DataFrame(data)\n\n new_df = data.apply(grank, axis=1)\n b, k = new_df.shape\n\n rule_of_five_str = \"\"\n if (b < 5 and k < 5):\n rule_of_five_str += f\"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)\"\n else:\n rule_of_five_str += f\"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)\"\n\n T = new_df.sum().to_numpy()\n\n F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1)\n p_value = 1 - stats.chi2.cdf(F_r, k - 1)\n chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)\n\n result_dict = {'F_r': F_r, 'p-value': p_value,\n 'T': T, 'sample_ranked_df': new_df}\n flag = p_value < alpha\n\n result = f'''======= Friedman Test with Chi-squared Test =======\n{rule_of_five_str}\n\nF_r statistic value (observed) = {F_r:.{precision}f}\nchi2 critical value = {chi2_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (Not all {k} population locations are the same) → {flag}\n '''\n print(result)\n return result_dict\n\n\ndef pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4):\n \"\"\"\n a, b 還不能傳入東西\n Make sure that data is in the form of [a, b]\n \"\"\"\n cov_mat = np.cov(data.values, rowvar=False)\n cor_mat = np.corrcoef(data.values, rowvar=False)\n cov = cov_mat[0][1]\n cor = cor_mat[0][1]\n\n n = data.shape[0]\n d_of_f = n - 2\n t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f)\n t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5)\n\n flag = abs(t_stat) > t_c\n result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c}\n results = f\"\"\"======= Pearson Correlation Coefficient =======\nCovariance: {cov:.{precision}f}\nCoefficient of Correlation: {cor:.{precision}f}\n\nt (Critical Value) = {t_c:.{precision}f}\nt (Observed Value) = {t_stat:.{precision}f}\n\nReject H_0 (There are linear relationship between two variables) → {flag}\n\"\"\"\n\n print(results)\n\n return result_dict\n\n\ndef spearman_test(a=None, b=None, alpha=0.05, precision=4):\n spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b)\n # print(f'Correlation = {cor:.4f}, p-value={p_value:.4f}')\n n = len(a)\n\n rule_of_30_str = ''\n\n results = f\"\"\"======= Spearman Rank Correlation Coefficient =======\n[scipy.stats.spearmanr]\nCoefficient of Correlation: {spearman_restult_cor:.{precision}f}\np-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)})\n\"\"\"\n\n if (n < 30):\n rule_of_30_str += f\"!(n = {n} < 30)\"\n flag = spearman_restult_p_value < alpha\n results += f\"\"\"\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n result_dict = {'spearman_result': [\n spearman_restult_cor, spearman_restult_p_value]}\n else:\n rule_of_30_str += f\"(n = {n} >= 30)\"\n flag = spearman_restult_p_value < alpha\n results += f\"\"\"\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n z_stat = spearman_restult_cor * ((n - 1) ** 0.5)\n z_cv = stats.norm.ppf(1 - alpha/2)\n p_value = stats.norm.sf(z_stat) * 2\n if p_value > 1:\n p_value = stats.norm.cdf(z_stat) * 2\n flag = p_value < alpha\n results += f\"\"\"\n[z test statistic]\n{rule_of_30_str}\n\nr_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result)\nz stat (observed value) = {z_stat:.{precision}f}\nz (critical value) = {z_cv:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n\n result_dict = {'spearman_result': [\n spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value}\n\n print(results)\n\n return result_dict\n" ]
[ [ "scipy.stats.chi2.ppf", "scipy.stats.norm.ppf", "pandas.merge", "scipy.stats.norm.cdf", "numpy.abs", "numpy.isnan", "scipy.stats.chi2.cdf", "pandas.DataFrame", "scipy.stats.t.ppf", "numpy.cov", "scipy.stats.binom.cdf", "scipy.stats.norm.sf", "numpy.corrcoef", "scipy.stats.spearmanr", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
cosmoscope/qt-client
[ "c6cb59267c8be9149a95fb853a4f181d9092c86b" ]
[ "qt_client/components/plot_data_model.py" ]
[ "import numpy as np\nfrom PyQt5.QtCore import (QAbstractTableModel, QModelIndex, QObject, Qt,\n QVariant, pyqtProperty, pyqtSignal, pyqtSlot)\n\nfrom ..hub import Hub, Message\n\n\nclass PlotDataModel(QAbstractTableModel):\n # DataRole = Qt.UserRole + 1\n\n def __init__(self, *args, **kwargs):\n super(PlotDataModel, self).__init__(*args, **kwargs)\n\n self._data = list(zip(np.arange(100), np.random.sample(100)))\n\n # The data model needs to listen for add data events\n self._hub = Hub()\n # self._hub.subscribe(AddDataMessage, self.add_data, self)\n # self._hub.subscribe(AddPlotDataMessage, self.add_data, self)\n\n # def roleNames(self):\n # return {\n # self.DataRole: b'data'\n # }\n\n def rowCount(self, parent=None, *args, **kwargs):\n return len(self._data)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 2\n\n def data(self, index, role=None):\n return self._data[index.row()][index.column()]\n # if role == self.DataRole:\n # return self._data[index.row()]\n if role == Qt.DisplayRole:\n return self._data[index.row()][index.column()]\n elif role == Qt.EditRole:\n return self._data[index.row()][index.column()]\n\n return QVariant()\n" ]
[ [ "numpy.arange", "numpy.random.sample" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xXEminenTXx/ImageClassifier
[ "e0e63e12108b523270ea7d615afcbfc696b07996" ]
[ "predict_functions.py" ]
[ "# python imports\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nfrom sys import exit\n\n# File containing all of the functions used in the predict program\ndef load_checkpoint(filepath):\n\n checkpoint = torch.load(filepath)\n \n if checkpoint[\"arch\"] == 'VGG':\n model = models.vgg16(pretrained=True)\n \n elif checkpoint[\"arch\"] == 'Densenet':\n model = models.densenet121(pretrained=True)\n \n else:\n print(\"Unsupported arch used in checkpoint\")\n exit(1)\n\n for param in model.parameters():\n param.requires_grad = False\n\n model.class_to_idx = checkpoint['class_to_idx']\n\n # Load classifier from checkpoint\n classifier = checkpoint['classifier']\n\n model.classifier = classifier\n\n model.load_state_dict(checkpoint['model_state_dict'])\n\n return model\n\ndef process_image(image_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n\n # Process a PIL image for use in a PyTorch model\n\n pil_image = Image.open(image_path)\n\n # Resize\n if pil_image.size[0] > pil_image.size[1]:\n pil_image.thumbnail((5000, 256))\n else:\n pil_image.thumbnail((256, 5000))\n\n # Crop \n left_margin = (pil_image.width-224)/2\n bottom_margin = (pil_image.height-224)/2\n right_margin = left_margin + 224\n top_margin = bottom_margin + 224\n\n pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))\n\n # Normalize\n np_image = np.array(pil_image)/255\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n np_image = (np_image - mean) / std\n\n # PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array\n # Color channel needs to be first; retain the order of the other two dimensions.\n np_image = np_image.transpose((2, 0, 1))\n\n return np_image\n\ndef predict(image_path, model, topk, gpu):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n\n image = process_image(image_path)\n\n if gpu:\n model.to('cuda')\n image = torch.from_numpy(image).type(torch.cuda.FloatTensor)\n else:\n model.to('cpu')\n image = torch.from_numpy(image).type(torch.FloatTensor)\n\n # Returns a new tensor with a dimension of size one inserted at the specified position.\n image = image.unsqueeze(0)\n\n output = model.forward(image)\n\n probabilities = torch.exp(output)\n\n # Probabilities and the indices of those probabilities corresponding to the classes\n top_probabilities, top_indices = probabilities.topk(topk)\n\n # Convert to lists\n top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0] \n top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0] \n\n # Convert topk_indices to the actual class labels using class_to_idx\n # Invert the dictionary so you get a mapping from index to class.\n\n idx_to_class = {value: key for key, value in model.class_to_idx.items()}\n #print(idx_to_class)\n\n top_classes = [idx_to_class[index] for index in top_indices]\n\n return top_probabilities, top_classes\n" ]
[ [ "torch.exp", "numpy.array", "torch.from_numpy", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Gordonbuck/ml-oov-we
[ "ce28cd8b556a16125ba36cd41781a3e60bb26422" ]
[ "src/train.py" ]
[ "import higher\nfrom leap import Leap\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport gc\n\n\ndef train(model, source_corpus, char2idx, args, device):\n model = model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,\n threshold=args.threshold)\n best_valid_cosine = 1\n\n for epoch in np.arange(args.n_epochs):\n valid_cosine = []\n valid_ce = []\n\n model.train()\n for batch in np.arange(args.n_batch):\n train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,\n args.n_shot,\n char2idx, device,\n fixed=args.fixed_shot,\n return_inds=True)\n optimizer.zero_grad()\n\n if args.lang_model:\n pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)\n loss = nn.functional.cross_entropy(pred_ind, train_inds)\n loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()\n else:\n pred_emb = model.forward(train_contexts, train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()\n\n loss.backward()\n optimizer.step()\n\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,\n args.n_shot,\n char2idx, device,\n use_valid=True,\n fixed=args.fixed_shot,\n return_inds=True)\n if args.lang_model:\n pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)\n loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()\n valid_ce += [loss.cpu().numpy()]\n else:\n pred_emb = model.forward(valid_contexts, valid_vocabs)\n\n loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()\n valid_cosine += [loss.cpu().numpy()]\n\n avg_valid = np.average(valid_cosine)\n lr_scheduler.step(avg_valid)\n\n if args.lang_model:\n avg_ce = np.average(valid_ce)\n print(f\"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}\")\n else:\n print(f\"Average cosine loss: {avg_valid}\")\n\n if avg_valid < best_valid_cosine:\n best_valid_cosine = avg_valid\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))\n\n if optimizer.param_groups[0]['lr'] < args.lr_early_stop:\n print('LR early stop')\n break\n\n\ndef maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):\n model = model.to(device)\n meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n best_score = 3\n\n for meta_epoch in np.arange(args.n_meta_epochs):\n gc.collect()\n source_valid_cosine = []\n target_valid_cosine = []\n\n model.train()\n with torch.backends.cudnn.flags(benchmark=True):\n for meta_batch in np.arange(args.n_meta_batch):\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)\n meta_optimizer.zero_grad()\n\n with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):\n for inner_batch in np.arange(args.n_inner_batch):\n source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)\n pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()\n diffopt.step(loss)\n\n target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()\n loss.backward()\n\n meta_optimizer.step()\n\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)\n pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()\n source_valid_cosine += [loss.cpu().numpy()]\n\n target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()\n target_valid_cosine += [loss.cpu().numpy()]\n\n avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)\n score = avg_target_valid\n lr_scheduler.step(score)\n print(f\"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}\")\n\n if score < best_score:\n best_score = score\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))\n\n if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:\n print('LR early stop')\n break\n\n\ndef leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):\n model = model.to(device)\n leap = Leap(model)\n meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n best_score = 3\n\n for meta_epoch in np.arange(args.n_meta_epochs):\n source_valid_cosine = []\n target_valid_cosine = []\n\n model.train()\n for meta_batch in np.arange(args.n_meta_batch):\n meta_optimizer.zero_grad()\n\n leap.init_task()\n leap.to(model)\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)\n for inner_batch in np.arange(args.n_task_steps):\n inner_optimizer.zero_grad()\n source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)\n pred_emb = model.forward(source_train_contexts, source_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()\n loss.backward()\n leap.update(loss, model)\n inner_optimizer.step()\n\n leap.init_task()\n leap.to(model)\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)\n for inner_batch in np.arange(args.n_task_steps):\n inner_optimizer.zero_grad()\n target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_train_contexts, target_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()\n loss.backward()\n leap.update(loss, model)\n inner_optimizer.step()\n\n leap.normalize()\n meta_optimizer.step()\n\n leap.to(model)\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)\n pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()\n source_valid_cosine += [loss.cpu().numpy()]\n\n target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()\n target_valid_cosine += [loss.cpu().numpy()]\n\n avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)\n score = avg_target_valid\n lr_scheduler.step(score)\n print(f\"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}\")\n\n if score < best_score:\n best_score = score\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))\n\n if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:\n print('LR early stop')\n break\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.arange", "torch.nn.functional.cross_entropy", "torch.no_grad", "torch.nn.functional.cosine_similarity", "torch.backends.cudnn.flags", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krisbuote/Reinforcement-Learning-Trader
[ "ae8c3af0856a480c88546c2a7e478a735585e0af", "ae8c3af0856a480c88546c2a7e478a735585e0af" ]
[ "Reinforcement-Learning-Trader/Agent.py", "vanilla-LSTM/run.py" ]
[ "import keras\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, LSTM, Dropout\nfrom keras.optimizers import Adam\n\nimport numpy as np\nimport random\nfrom collections import deque\n\nclass Agent:\n def __init__(self, state_size, is_eval=False, model_name=\"\"):\n self.state_size = state_size # normalized previous days\n self.action_size = 2 # buy, sell\n self.memory = deque(maxlen=1000)\n self.inventory = []\n self.net_worth = []\n self.model_name = model_name\n self.is_eval = is_eval\n\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_min = 0.08\n self.epsilon_decay = 0.995\n\n self.model = load_model(\"models/\" + model_name) if is_eval else self._model()\n\n def _model(self):\n model = Sequential()\n model.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n model.add(Dense(units=32, activation=\"relu\"))\n model.add(Dense(units=8, activation=\"relu\"))\n model.add(Dense(self.action_size, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(lr=0.001))\n\n return model\n\n def act(self, state):\n if not self.is_eval and random.random() <= self.epsilon:\n return random.randrange(self.action_size)\n\n options = self.model.predict(state)\n return np.argmax(options[0])\n\n def expReplay(self, batch_size):\n mini_batch = []\n l = len(self.memory)\n for i in range(l - batch_size + 1, l):\n mini_batch.append(self.memory[i])\n\n for state, action, reward, next_state, done in mini_batch:\n target = reward\n if not done:\n target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])\n\n target_f = self.model.predict(state)\n target_f[0][action] = target\n self.model.fit(state, target_f, epochs=1, verbose=0)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n", "from lstm import model,scale,timeseries_to_supervised,split_and_reshape,data_raw\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import load_model\r\nimport numpy\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom math import sqrt\r\n\r\n### Parameters\r\nepochs = 1\r\nbatch_size = 4\r\ntimesteps = 8 #how many timesteps RNN includes\r\nneurons = 1 # Number of neurons in the LSTM\r\ndata_dim = data_raw.shape[1] # n_cols in data: only Bitcoin price currently\r\nsplit_pct = 0.90 #percent of data in training\r\nloading = False #Set to True if loading a saved model\r\nsaving = True #Set to True if you wish to save new model\r\n\r\n### Preprocess Data\r\nscaler, raw_data_scaled = scale(data_raw, split_pct) #Scale all of the data\r\ndata_scaled = timeseries_to_supervised(raw_data_scaled, timesteps) #turn it into timeshifted data\r\nx_train, y_train, x_test, y_test = split_and_reshape(data_scaled, raw_data_scaled, split_pct, timesteps, data_dim) #Uses shifted data for x and original data for y\r\n\r\n### Load previous Model or Fit new one\r\nif loading == True:\r\n model = load_model('./model/my_model.h5')\r\n\r\nelse:\r\n model = model(neurons, timesteps, data_dim)\r\n model_history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, shuffle=False)\r\n\r\nif saving == True:\r\n model.save('./model/my_model.h5')\r\n\r\n\r\npredictions = model.predict(x_test)\r\nevaluate = model.evaluate(x_test,y_test,batch_size=batch_size)\r\nprint(\"Test Loss is \", evaluate[0])\r\nrmse = sqrt(mean_squared_error(y_test, predictions))\r\n# print(model_history.history['loss'][0])\r\n\r\n### Plot Results\r\nplt.plot(predictions, label='Predicted')\r\nplt.plot(y_test, label='Actual')\r\nplt.legend()\r\nplt.show()\r\n" ]
[ [ "numpy.argmax" ], [ "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "sklearn.metrics.mean_squared_error" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mussard/share_data_benchmark
[ "c02bfa4017b9008800cabe47d7c7959f82c26060" ]
[ "MRPT/vdz/atoms/V_0/mrpt.py" ]
[ "import json\nfrom pyscf import gto,scf,mcscf, fci, lo, ci, cc\nfrom pyscf.scf import ROHF, UHF,ROKS\nimport numpy as np\nimport pandas as pd\n\n# THIS IS WERE IT STARTS ====================================\n\ndf=json.load(open(\"../../../trail.json\"))\n\nspins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}\n\nnd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}\n\ncas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}\n\ndatacsv={}\nfor nm in ['atom','charge','method','basis','pseudopotential',\n 'totalenergy','totalenergy-stocherr','totalenergy-syserr']:\n datacsv[nm]=[]\n\nbasis='vdz'\nel='V'\ncharge=0\n\nmol=gto.Mole()\nmol.ecp={}\nmol.basis={}\nmol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])\nmol.basis[el]=gto.basis.parse(df[el][basis])\nmol.charge=charge\nif el == 'Cr' or el == 'Cu':\n mol.spin=spins[el]-charge\nelse:\n mol.spin=spins[el]+charge\nmol.build(atom=\"%s 0. 0. 0.\"%el,verbose=4)\n\nm=ROHF(mol)\nm.level_shift=1000.0\ndm=m.from_chk(\"../../../../HF/atoms/\"+el+basis+str(charge)+\".chk\")\nhf=m.kernel(dm)\nm.analyze()\n\nfrom pyscf.shciscf import shci\nmc = shci.SHCISCF(m, 6, cas[el]-charge)\n#mc.fcisolver.conv_tol = 1e-14\nmc.fcisolver.mpiprefix=\"srun -n20\"\nmc.fcisolver.num_thrds=12\nmc.verbose = 4\ncas=mc.kernel()[0]\n \nfrom pyscf.icmpspt import icmpspt\npt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\\\n pttype=\"MRLCC\",\\\n third_order=True,\\\n fully_ic=True,\\\n do_dm4=True)\n\ndatacsv['atom'].append(el)\ndatacsv['charge'].append(charge)\ndatacsv['method'].append('MRPT')\ndatacsv['basis'].append(basis)\ndatacsv['pseudopotential'].append('trail')\ndatacsv['totalenergy'].append(cas+pt)\ndatacsv['totalenergy-stocherr'].append(0.0)\ndatacsv['totalenergy-syserr'].append(0.0)\npd.DataFrame(datacsv).to_csv(el+\".csv\",index=False)\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
PariseC/osm2rail
[ "dfc373aedba4a82fd144192cb6a855e8a11b0601" ]
[ "osm2rail/plotter.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection,PolyCollection\n\ndef showNetwork(network,savefig=None):\n node_x_coords=[]\n node_y_coords=[]\n link_coords=[]\n poi_coords=[]\n\n for _,node in network.node_dict.items():\n node_x_coords.append(node.x_coord)\n node_y_coords.append(node.y_coord)\n\n for _,link in network.link_dict.items():\n coords = list(link.geometry.coords)\n link_coords.append(np.array(coords))\n\n if len(network.POI_list):\n for poi in network.POI_list:\n coords = list(poi.geometry.exterior.coords)\n poi_coords.append(np.array(coords))\n\n fig, ax = plt.subplots(figsize=(12, 8))\n # plot network nodes\n ax.scatter(node_x_coords, node_y_coords, marker='o', c='red', s=10, zorder=1)\n # plot network links\n ax.add_collection(LineCollection(link_coords, colors='orange', linewidths=1, zorder=2))\n # plot network pois\n if len(poi_coords):\n coll = PolyCollection(poi_coords, alpha=0.7, zorder=0)\n ax.add_collection(coll)\n # set axis\n ax.autoscale_view()\n plt.xlabel('x_coord')\n plt.ylabel('y_coord')\n plt.tight_layout()\n # show fig\n plt.show()\n # save fig\n if savefig:\n try:\n figname = savefig['filename'] if 'filename' in savefig.keys() else 'network.png'\n dpi = savefig['dpi'] if 'dpi' in savefig else 300\n fig.savefig(figname, dpi=dpi, bbox_inches='tight')\n except Exception as e:\n print(e)" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.collections.LineCollection", "matplotlib.pyplot.subplots", "matplotlib.collections.PolyCollection", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mikee385/fbsrankings
[ "2b50e26a302b53c21cd8f5c965943d6fbf0680a1" ]
[ "src/fbsrankings/domain/service/srs_ranking_service.py" ]
[ "from typing import Dict\nfrom typing import List\n\nimport numpy\n\nfrom fbsrankings.domain.model.affiliation import Subdivision\nfrom fbsrankings.domain.model.game import Game\nfrom fbsrankings.domain.model.game import GameStatus\nfrom fbsrankings.domain.model.ranking import Ranking\nfrom fbsrankings.domain.model.ranking import SeasonData\nfrom fbsrankings.domain.model.ranking import TeamRankingRepository\nfrom fbsrankings.domain.model.ranking import TeamRankingService\nfrom fbsrankings.domain.model.team import TeamID\n\n\nclass TeamData:\n def __init__(self, index: int) -> None:\n self.index = index\n self.game_total = 0\n self.point_margin = 0\n\n def add_game(self, point_margin: int) -> None:\n self.game_total += 1\n self.point_margin += point_margin\n\n\nclass SRSRankingService(TeamRankingService):\n name: str = \"SRS\"\n\n def __init__(self, repository: TeamRankingRepository) -> None:\n self._repository = repository\n\n def calculate_for_season(self, season_data: SeasonData) -> List[Ranking[TeamID]]:\n team_data: Dict[TeamID, TeamData] = {}\n for affiliation in season_data.affiliation_map.values():\n if affiliation.subdivision == Subdivision.FBS:\n team_data[affiliation.team_id] = TeamData(len(team_data))\n\n season_is_complete = True\n games_by_week: Dict[int, List[Game]] = {}\n for game in season_data.game_map.values():\n winning_data = None\n if game.winning_team_id is not None:\n winning_data = team_data.get(game.winning_team_id)\n\n losing_data = None\n if game.losing_team_id is not None:\n losing_data = team_data.get(game.losing_team_id)\n\n if winning_data is not None and losing_data is not None:\n week_games = games_by_week.setdefault(game.week, [])\n week_games.append(game)\n\n elif game.status == GameStatus.SCHEDULED:\n season_is_complete = False\n\n n = len(team_data)\n a = numpy.zeros((n + 1, n))\n b = numpy.zeros(n + 1)\n\n rankings = []\n for week in sorted(games_by_week.keys()):\n for game in games_by_week[week]:\n if (\n game.home_team_score is not None\n and game.away_team_score is not None\n ):\n home_data = team_data[game.home_team_id]\n away_data = team_data[game.away_team_id]\n\n home_margin = self._adjust_margin(\n game.home_team_score - game.away_team_score,\n )\n home_data.add_game(home_margin)\n away_data.add_game(-home_margin)\n\n a[home_data.index, away_data.index] -= 1.0\n a[away_data.index, home_data.index] -= 1.0\n\n for data in team_data.values():\n a[data.index, data.index] = data.game_total\n b[data.index] = data.point_margin\n a[n, data.index] = 1.0\n b[n] = 0.0\n\n x = numpy.linalg.lstsq(a, b, rcond=-1)[0]\n\n result = {id_: x[data.index] for id_, data in team_data.items()}\n ranking_values = TeamRankingService._to_values(season_data, result)\n\n rankings.append(\n self._repository.create(\n SRSRankingService.name,\n season_data.season.id_,\n week,\n ranking_values,\n ),\n )\n\n if season_is_complete:\n rankings.append(\n self._repository.create(\n SRSRankingService.name,\n season_data.season.id_,\n None,\n ranking_values,\n ),\n )\n\n return rankings\n\n @staticmethod\n def _adjust_margin(margin: int) -> int:\n if margin > 24:\n return 24\n if margin < -24:\n return -24\n if 0 < margin < 7:\n return 7\n if 0 > margin > -7:\n return -7\n return margin\n" ]
[ [ "numpy.linalg.lstsq", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamaps/tracc
[ "0f71b07b6560ed2f5a9a9f6f94a07e487af254c5" ]
[ "build/lib/tracc/tracc.py" ]
[ "import tracc\nimport pandas as pd\nimport numpy as np\n\n\nclass costs:\n\n def __init__(self,\n travelcosts_df,\n columns = None\n ):\n\n \"\"\"\n Inputs data and prunes columns if desired\n \"\"\"\n\n if columns is not None:\n self.data = travelcosts_df[columns]\n\n else:\n self.data = travelcosts_df\n\n\n def intrazonal(self,\n cost_column,\n origin_column,\n destination_column,\n method = \"constant\",\n value = 0,\n polygon_file = None,\n polygon_id = None\n ):\n \"\"\"\n Computes and updates intrazonal travel cost in a travel costs matrix. The output will include a travel cost between any origin or destination location in the matrix to itself.\n\n Parameters\n ----------\n cost_column : column name for travel costs\n\n origin_column : column name for origin IDs\n\n destinationn_column : column name for origin IDs\n\n method : \"constant\" applies a single @value to all intrazonal travel costs. \"radius\" applies a cost which is proportional to the radius of a circle with the same area as its input polygon\n\n value : parameters for the method\n\n polygon_file : file path to an input spatial polygon (e.g. geojson) if needed (it is for method = \"radius\")\n\n polygon_id : ID field for the polygon_file needed for joining to the cost matrix\n \"\"\"\n\n # making sure ID columns are strings for a merge later on\n self.data[origin_column] = self.data[origin_column].astype(str)\n self.data[destination_column] = self.data[destination_column].astype(str)\n\n # getting set of unique locations in the dataset\n locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())\n locations = list(set(locations))\n\n if method == \"constant\":\n\n new_times = [value] * len(locations)\n\n df = pd.DataFrame(\n list(zip(locations, locations, new_times)),\n columns =[origin_column, destination_column, cost_column + \"_i\"])\n\n elif method == \"radius\":\n\n from tracc.spatial import radius\n\n # compute based on the equivilant radius of each polygon\n df = radius(polygon_file,polygon_id)\n df[origin_column] = df[polygon_id]\n df[destination_column] = df[polygon_id]\n del df[polygon_id]\n df[cost_column + \"_i\"] = value * df[\"radius\"]\n del df[\"radius\"]\n\n else:\n raise Exception(\"Method can only be 'constant' or 'radius'\")\n\n df[origin_column] = df[origin_column].astype(str)\n df[destination_column] = df[destination_column].astype(str)\n\n # join in the newly created intrazonal travel times\n self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])\n\n # replace the older intrazonal travel times\n self.data[cost_column] = np.where((self.data[cost_column + \"_i\"] >= 0),self.data[cost_column + \"_i\"],self.data[cost_column])\n\n del self.data[cost_column + \"_i\"]\n\n\n\n\n def fill_missing_costs(\n self,\n cost_column,\n origin_column,\n destination_column,\n spatial_file_path,\n spatial_file_id,\n where = \"origin\",\n weight_type = \"Queen\"\n ):\n \"\"\"\n Completes an OD matrix by filling locations that were missing from the original matrix, based on a neighbourhood spatial weights matrix. For example if a origin zone has no travel costs, it presumes its travel costs to destinations are the average of the same costs of its neighbouring zones.\n \"\"\"\n\n from tracc.spatial import area\n\n # get list of zones which are missing from the input costs table\n dfz = area(spatial_file_path, spatial_file_id)\n dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)\n self.data[origin_column] = self.data[origin_column].astype(str)\n li1 = list(self.data[origin_column].unique())\n li2 = list(dfz[spatial_file_id].unique())\n missing = [x for x in li2 if x not in li1]\n del li1,li2\n\n if len(missing) == 0:\n return None\n\n if where == \"origin\":\n\n # get neighbours for each missing zone\n from tracc.spatial import get_neighbours\n neighbours = get_neighbours(spatial_file_path, \"Queen\", spatial_file_id)\n\n new_times = []\n\n # for each zone, compute average travel times to other zones based on neighbours\n for location in missing:\n\n locneigh = neighbours[location]\n\n temp = self.data[self.data[origin_column].isin(locneigh)]\n\n temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())\n\n temp[origin_column] = location\n\n new_times.append(temp)\n\n # combine the outputs, and concat to the input times\n new_times = pd.concat(new_times)\n self.data = pd.concat([self.data, new_times])\n\n elif where == \"destination\":\n\n # get neighbours for each missing zone\n from tracc.spatial import get_neighbours\n neighbours = get_neighbours(spatial_file_path, \"Queen\", spatial_file_id)\n\n new_times = []\n\n # for each zone, compute average travel times from other zones based on neighbours\n for location in missing:\n\n locneigh = neighbours[location]\n\n temp = self.data[self.data[destination_column].isin(locneigh)]\n\n temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())\n\n temp[destination_column] = location\n\n new_times.append(temp)\n\n # combine the outputs, and concat to the input times\n new_times = pd.concat(new_times)\n self.data = pd.concat([self.data, new_times])\n\n else:\n\n raise Exception(\"Input paramater @where should either be 'origin' or 'destination'\")\n\n\n\n\n def generalized_cost(\n self,\n columns,\n coefficients,\n exponents = None,\n prune_output = True,\n output_cost_name = \"GC\"\n ):\n\n \"\"\"\n Computes generalized costs\n \"\"\"\n\n # need to add a column check warning, and make the intercept = 0 if none is provided\n\n # set all exponents as 1 if none are inputted\n if exponents is None:\n exponents = [1] * len(columns)\n\n # compute the generalized cost value\n self.data[output_cost_name] = coefficients[len(coefficients) - 1]\n i = 0\n while i < len(columns):\n self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]\n i += 1\n\n # delete initital cost columns if desired\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n def impedence_calc(\n self,\n cost_column,\n impedence_func,\n impedence_func_params,\n prune_output = False,\n output_col_name = \"fCij\"\n ):\n\n \"\"\"\n Measures impdence given input of travel cost and selected impedence funciton and parameters\n\n # To Do: add in more impdence function options\n \"\"\"\n\n if impedence_func == \"cumulative\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))\n\n elif impedence_func == \"linear\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))\n\n elif impedence_func == \"exponential\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))\n\n else:\n raise Exception(\"Please select an appropriate decay function\")\n\n if prune_output is True:\n del self.data[cost_column]\n\n\n def impedence_combine(self,\n columns,\n how = \"product\",\n output_col_name = \"fCij\",\n prune_output = True\n ):\n\n \"\"\"\n If there are multiple impedences, and we want to combine them into a single impedence value. This is similar to genearlized cost.\n\n For example, if we have an impedence value for transit travel time, and we also want to remove any trips based on a fare criteria, it can be applied in this way.\n \"\"\"\n\n if how == \"product\":\n self.data[output_col_name] = 1\n i = 0\n while i < len(columns):\n self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]\n i += 1\n\n elif how == \"sum\":\n self.data[output_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]\n i += 1\n\n else:\n raise Exception('the input @how must be one of \"product\" or \"sum\"')\n\n\n\n def max_impedence(self,\n columns,\n imp_col_name = \"fCij\"\n ):\n \"\"\"\n Reduces the cost table to only include rows with the maximum impedence value for the set of input columns.\n\n For example, if there 3 transit trips from i to j, each with a different computed generalized_cost resulting from different route choices, this function will return the row with the one resulting in the greatest impedence value (i.e. lowest generalized cost)\n \"\"\"\n\n self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()\n\n\n\nclass supply:\n\n def __init__(self,\n supply_df,\n columns = None\n ):\n \"\"\"\n intitializing can include pruning the dataset to a list of @column names\n \"\"\"\n\n if columns is not None:\n self.data = supply_df[columns]\n\n else:\n self.data = supply_df\n\n\n\n def weight(self,\n columns,\n weights,\n weight_col_name = \"Oj\",\n prune_output = True\n ):\n \"\"\"\n Creating a value based on a weighted linear combination other values. Can be used to weight by destinations by their desirability.\n\n Parameters\n ----------------\n columns : columns in which to input into the weights function\n\n weights : linear multipliers, the same length as the weights\n\n weight_col_name : output column name\n\n prune_output : if True, delete all input columns used in the weight function\n \"\"\"\n\n if len(columns) != len(weights):\n raise Exception(\"Please make sure columns and weights are lists of the same length\")\n\n if len(columns) < 2:\n raise Exception(\"Can only weight opportunities if 2 or more are inputted\")\n\n if sum(weights) < 0.999 or sum(weights) > 1.001:\n print(\"WARNING: the inputted weights do not sum to 1.\")\n\n\n self.data[weight_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]\n i += 1\n\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n\nclass demand:\n\n def __init__(self,\n demand_df,\n columns = None\n ):\n \"\"\"\n intitializing can include pruning the dataset to a list of @column names\n \"\"\"\n\n if columns is not None:\n self.data = demand_df[columns]\n\n else:\n self.data = demand_df\n\n\n def weight(self,\n columns,\n weights,\n weight_col_name = \"Pi\",\n prune_output = True\n ):\n \"\"\"\n Creating a value based on a weighted linear combination other values. Can be used to weight by population groups by their propensity to travel to certain activity types.\n\n Parameters\n ----------------\n columns : columns in which to input into the weights function\n\n weights : linear multipliers, the same length as the weights\n\n weight_col_name : output column name\n\n prune_output : if True, delete all input columns used in the weight function\n \"\"\"\n\n if len(columns) != len(weights):\n raise Exception(\"Please make sure columns and weights are lists of the same length\")\n\n if len(columns) < 2:\n raise Exception(\"Can only weight opportunities if 2 or more are inputted\")\n\n if sum(weights) < 0.999 or sum(weights) > 1.001:\n print(\"WARNING: the inputted weights do not sum to 1.\")\n\n self.data[weight_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]\n i += 1\n\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n\nclass accessibility:\n\n def __init__(self,\n travelcosts_df,\n supply_df,\n demand_df = None,\n travelcosts_ids = [\"origin_id\",\"destination_id\"],\n supply_ids = \"destination_id\",\n demand_ids = None\n ):\n \"\"\"\n Parameters\n ----------\n travelcosts_df : a pandas dataframe containing travel costs from a set of locations (e.g. orignis) to another set of locations (e.g. destinations). Data should be in a long table format:\n\n origin_id | destination_id | travel_cost_1 | travel_cost_2 (optional) | etc (optional)\n\n supply_df : a pandas dataframe containing the number of opportunities (e.g. supply), relational to the destination IDs in travelcosts_df\n\n demand_df : a pandas dataframe containing the number of agents competiting for opportunities (e.g. demand), relational to the origin IDs in travelcosts_df. This is optional since several accessibility measures do not account for demand\n\n travelcosts_ids : a two item list of the column names for the origin and destination IDs in the travelcosts_df table\n\n supply_ids : a single variable string for the destination ID in the supply_df table\n\n demand_ids : a single variable string for the origin ID in the demand_df table. This is optional since several accessibility measures do not account for demand\n\n \"\"\"\n\n self.travelcosts_ids = travelcosts_ids\n self.supply_ids = supply_ids\n self.demand_ids = demand_ids\n\n if demand_df is None and supply_df is None:\n raise Exception(\"Please input a supply_df or a demand_df\")\n\n # setting ID columns to strings to aid merging\n travelcosts_df[travelcosts_ids[0]] = travelcosts_df[travelcosts_ids[0]].astype(str)\n travelcosts_df[travelcosts_ids[1]] = travelcosts_df[travelcosts_ids[1]].astype(str)\n\n # join supply data to the travel costs\n if supply_df is not None and demand_df is None:\n supply_df[supply_ids] = supply_df[supply_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n supply_df,\n left_on=travelcosts_ids[1],\n right_on=self.supply_ids,\n how = 'left'\n )\n\n # join demand data as well, if inputted\n elif demand_df is not None and supply_df is None:\n demand_df[demand_ids] = demand_df[demand_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n demand_df,\n left_on=travelcosts_ids[0],\n right_on=self.demand_ids,\n how = 'left'\n )\n\n else:\n supply_df[supply_ids] = supply_df[supply_ids].astype(str)\n demand_df[demand_ids] = demand_df[demand_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n supply_df,\n left_on=travelcosts_ids[1],\n right_on=self.supply_ids,\n how = 'left'\n )\n self.data = pd.merge(\n self.data,\n demand_df,\n left_on=travelcosts_ids[0],\n right_on=self.demand_ids,\n how = 'left'\n )\n\n\n def potential(self, opportunity, impedence, output_col_name = None):\n \"\"\"\n Measures potential accessibility to destinations\n\n Parameters\n ----------\n opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe\n\n impedence : column from the travel costs object to weight opportunities by\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n Output\n ----------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.\n\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_\" + opportunity + \"_\" + impedence\n else:\n A_col_name = output_col_name\n\n # multiply the opportunity by the impedence\n self.data[A_col_name] = self.data[opportunity] * self.data[impedence]\n\n # sum by the origin locations\n Ai = self.data.groupby(self.travelcosts_ids[0])[[A_col_name]].sum().reset_index()\n\n del self.data[A_col_name]\n\n return Ai\n\n\n\n\n def passive(self, population, impedence, output_col_name = None):\n\n \"\"\"\n Measures passive accessibility to destinations\n\n Parameters\n ----------\n population : a string indicating the column name for which population we are measuring access to (e.g. overall population, employed population, etc.). This column should be in the demand_df dataframe\n\n impedence : column from the travel costs object to weight opportunities by\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n Output\n ----------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.\n\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_\" + population + \"_\" + impedence\n else:\n A_col_name = output_col_name\n\n # multiply the opportunity by the impedence\n self.data[A_col_name] = self.data[population] * self.data[impedence]\n\n # sum by the origin locations\n Ai = self.data.groupby(self.travelcosts_ids[1])[[A_col_name]].sum().reset_index()\n\n del self.data[A_col_name]\n\n return Ai\n\n\n\n\n def mintravelcost(self, travelcost, opportunity, min_n, output_col_name = None):\n \"\"\"\n Parameters\n ----------\n opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe\n\n travelcost : a string indicating the column name for which travel cost shall be used (e.g. travel time, monetary cost, etc.). This column should be in the travelcosts_df dataframe\n\n min_n : an int indicating the number of desired reachable opportunities (e.g. 1 library, 3 grocery stores, 10k jobs, etc.)\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n\n Output\n ---------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column are the accessibility measures based on the input parameters.\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_mintravelcost_\" + str(travelcost) + \"_\" + str(opportunity) + \"_\" + str(min_n)\n else:\n A_col_name = output_col_name\n\n # internal function of returning the min travel time for n opportunities\n def get_min(df, tc, o, n):\n df = df.sort_values(by=[tc], ascending=True)\n df[\"cumsum\"] = df[o].cumsum()\n df = df[df[\"cumsum\"] >= n]\n return df[travelcost].min()\n\n # generating the accessibility measure\n out = pd.DataFrame(self.data.groupby(self.travelcosts_ids[0]).apply(get_min, tc = travelcost, o = opportunity, n = min_n))\n\n # setting the column name of the output\n out.columns = [A_col_name]\n\n return out\n\n\n\nclass summary:\n \"\"\"\n Computing various summary statistics of accessibility, usually with respect to different population groups\n\n Some of these can be used to assess distributions and equity of transport networks.\n \"\"\"\n\n def __init__(\n self,\n accessibility_df,\n summary_vars,\n accessibility_id = \"id\",\n summary_vars_id = \"id\"\n ):\n\n # join the data\n self.data = pd.merge(\n accessibility_df,\n summary_vars,\n left_on=accessibility_id,\n right_on=summary_vars_id,\n how = 'left'\n )\n\n def weighted_mean(self, access_var, group_var):\n\n return tracc.statistics.weighted_mean(self.data, access_var, group_var)\n\n def weighted_var(self, access_var, group_var):\n\n return tracc.statistics.weighted_var(self.data, access_var, group_var)\n\n def weighted_sd(self, access_var, group_var):\n\n return tracc.statistics.weighted_sd(self.data, access_var, group_var)\n\n def weighted_CV(self, access_var, group_var):\n\n return tracc.statistics.weighted_CV(self.data, access_var, group_var)\n\n def weighted_Gini(self, access_var, group_var):\n\n return tracc.statistics.weighted_Gini(self.data, access_var, group_var)\n\n def quantiles(self, access_var, group_vars, nbins = 10, result = \"percent\"):\n\n # assign each observation a bin, based on nbins\n dfq = pd.DataFrame( tracc.statistics.weighted_qcut(self.data[access_var], self.data[group_vars[0]], nbins))\n\n # create a specific name for the quantile column\n q_col_name = 'q' + str(nbins) + \"_\" + (group_vars[0])\n dfq.columns = [q_col_name]\n self.data = self.data.join(dfq, how='outer')\n\n # group by each bin, susmmarize\n dfq = self.data.groupby([q_col_name])[group_vars].sum()\n\n # return as counts or percent\n if result == \"count\":\n return dfq\n elif result == \"percent\":\n for var in group_vars:\n dfq[var] = dfq[var] / dfq[var].sum()\n return dfq\n" ]
[ [ "pandas.merge", "numpy.where", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ShashankBice/pygeotools
[ "5bc74f96cf79f3089572cab7e4f3632ca36b22bc" ]
[ "pygeotools/lib/iolib.py" ]
[ "#! /usr/bin/env python\n\"\"\"\nFunctions for IO, mostly wrapped around GDAL\n\nNote: This was all written before RasterIO existed, which might be a better choice. \n\"\"\"\n\nimport os\nimport subprocess\n\nimport numpy as np\nfrom osgeo import gdal, gdal_array, osr\n\n#Define drivers\nmem_drv = gdal.GetDriverByName('MEM')\ngtif_drv = gdal.GetDriverByName('GTiff')\nvrt_drv = gdal.GetDriverByName(\"VRT\")\n\n#Default GDAL creation options\ngdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']\n#gdal_opt += ['BLOCKXSIZE=1024', 'BLOCKYSIZE=1024']\n#List that can be used for building commands\ngdal_opt_co = []\n[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]\n\n#Add methods to load ma from OpenCV, PIL, etc.\n#These formats should be directly readable as np arrays\n\n#Note: want to modify to import all bands as separate arrays in ndarray\n#Unless the user requests a single band, or range of bands\n\n#Check for file existence\ndef fn_check(fn):\n \"\"\"Wrapper to check for file existence\n \n Parameters\n ----------\n fn : str\n Input filename string.\n \n Returns\n -------\n bool\n True if file exists, False otherwise.\n \"\"\"\n return os.path.exists(fn)\n\ndef fn_check_full(fn):\n \"\"\"Check for file existence\n\n Avoids race condition, but slower than os.path.exists.\n \n Parameters\n ----------\n fn : str\n Input filename string.\n \n Returns\n -------\n status \n True if file exists, False otherwise.\n \"\"\"\n status = True \n if not os.path.isfile(fn): \n status = False\n else:\n try: \n open(fn) \n except IOError:\n status = False\n return status\n\ndef fn_list_check(fn_list):\n status = True\n for fn in fn_list:\n if not fn_check(fn):\n print('Unable to find: %s' % fn)\n status = False\n return status\n\ndef fn_list_valid(fn_list):\n print('%i input fn' % len(fn_list))\n out_list = []\n for fn in fn_list:\n if not fn_check(fn):\n print('Unable to find: %s' % fn)\n else:\n out_list.append(fn)\n print('%i output fn' % len(out_list))\n return out_list \n\n#Wrapper around gdal.Open\ndef fn_getds(fn):\n \"\"\"Wrapper around gdal.Open()\n \"\"\"\n ds = None\n if fn_check(fn):\n ds = gdal.Open(fn, gdal.GA_ReadOnly)\n else:\n print(\"Unable to find %s\" % fn)\n return ds\n\ndef fn_getma(fn, bnum=1):\n \"\"\"Get masked array from input filename\n\n Parameters\n ----------\n fn : str\n Input filename string\n bnum : int, optional\n Band number\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n #Add check for filename existence\n ds = fn_getds(fn)\n return ds_getma(ds, bnum=bnum)\n\n#Given input dataset, return a masked array for the input band\ndef ds_getma(ds, bnum=1):\n \"\"\"Get masked array from input GDAL Dataset\n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n bnum : int, optional\n Band number\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n b = ds.GetRasterBand(bnum)\n return b_getma(b)\n\n#Given input band, return a masked array\ndef b_getma(b):\n \"\"\"Get masked array from input GDAL Band\n\n Parameters\n ----------\n b : gdal.Band \n Input GDAL Band \n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n b_ndv = get_ndv_b(b)\n #bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)\n #This is more appropriate for float, handles precision issues\n bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)\n return bma\n\ndef get_sub_dim(src_ds, scale=None, maxdim=1024):\n \"\"\"Compute dimensions of subsampled dataset \n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n scale : int, optional\n Scaling factor\n maxdim : int, optional \n Maximum dimension along either axis, in pixels\n \n Returns\n -------\n ns\n Numper of samples in subsampled output\n nl\n Numper of lines in subsampled output\n scale \n Final scaling factor\n \"\"\"\n ns = src_ds.RasterXSize\n nl = src_ds.RasterYSize\n maxdim = float(maxdim)\n if scale is None:\n scale_ns = ns/maxdim\n scale_nl = nl/maxdim\n scale = max(scale_ns, scale_nl)\n #Need to check to make sure scale is positive real \n if scale > 1:\n ns = int(round(ns/scale))\n nl = int(round(nl/scale))\n return ns, nl, scale\n\ndef fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False): \n ds = gdal.Open(fn)\n return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)\n\n#Load a subsampled array\n#Can specify scale factor or max dimension\n#No need to load the entire dataset for stats computation\ndef ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False): \n \"\"\"Load a subsampled array, rather than full resolution\n\n This is useful when working with large rasters\n\n Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.\n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n bnum : int, optional\n Band number\n scale : int, optional\n Scaling factor\n maxdim : int, optional \n Maximum dimension along either axis, in pixels\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n #print src_ds.GetFileList()[0]\n b = src_ds.GetRasterBand(bnum)\n b_ndv = get_ndv_b(b)\n ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)\n #The buf_size parameters determine the final array dimensions\n b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)\n bma = np.ma.masked_values(b_array, b_ndv)\n out = bma\n if return_ds:\n dtype = src_ds.GetRasterBand(1).DataType\n src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)\n gt = np.array(src_ds.GetGeoTransform())\n gt[[1,5]] = gt[[1,5]]*scale\n src_ds_sub.SetGeoTransform(list(gt))\n src_ds_sub.SetProjection(src_ds.GetProjection())\n b = src_ds_sub.GetRasterBand(1)\n b.WriteArray(bma)\n b.SetNoDataValue(b_ndv)\n out = (bma, src_ds_sub)\n return out\n\n#Note: need to consolidate with warplib.writeout (takes ds, not ma)\n#Add option to build overviews when writing GTiff\n#Input proj must be WKT\ndef writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):\n \"\"\"Write input array to disk as GeoTiff\n\n Parameters\n ----------\n a : np.array or np.ma.array\n Input array\n dst_fn : str\n Output filename\n src_ds: GDAL Dataset, optional\n Source Dataset to use for creating copy\n bnum : int, optional \n Output band\n ndv : float, optional \n Output NoData Value\n gt : list, optional\n Output GeoTransform\n proj : str, optional\n Output Projection (OGC WKT or PROJ.4 format)\n create : bool, optional\n Create new dataset\n sparse : bool, optional\n Output should be created with sparse options\n \"\"\"\n #If input is not np.ma, this creates a new ma, which has default filL_value of 1E20\n #Must manually override with ndv\n #Also consumes a lot of memory\n #Should bypass if input is bool\n from pygeotools.lib.malib import checkma \n a = checkma(a, fix=False)\n #Want to preserve fill_value if already specified\n if ndv is not None:\n a.set_fill_value(ndv)\n driver = gtif_drv\n #Currently only support writing singleband rasters\n #if a.ndim > 2:\n # np_nbands = a.shape[2]\n # if src_ds.RasterCount np_nbands: \n # for bnum in np_nbands:\n nbands = 1\n np_dt = a.dtype.name\n if src_ds is not None:\n #If this is a fn, get a ds\n #Note: this saves a lot of unnecessary iolib.fn_getds calls\n if isinstance(src_ds, str):\n src_ds = fn_getds(src_ds)\n #if isinstance(src_ds, gdal.Dataset):\n src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)\n src_gt = src_ds.GetGeoTransform()\n #This is WKT\n src_proj = src_ds.GetProjection()\n #src_srs = osr.SpatialReference() \n #src_srs.ImportFromWkt(src_ds.GetProjectionRef())\n\n #Probably a cleaner way to handle this\n if gt is None:\n gt = src_gt\n if proj is None:\n proj = src_proj\n\n #Need to create a new copy of the default options\n opt = list(gdal_opt)\n \n #Note: packbits is better for sparse data\n if sparse:\n opt.remove('COMPRESS=LZW')\n opt.append('COMPRESS=PACKBITS')\n #Not sure if VW can handle sparse tif\n #opt.append('SPARSE_OK=TRUE')\n\n #Use predictor=3 for floating point data\n if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt: \n opt.append('PREDICTOR=3')\n\n #If input ma is same as src_ds, write out array using CreateCopy from existing dataset\n #if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())): \n #Should compare srs.IsSame(src_srs)\n if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):\n #Note: third option is strict flag, set to false\n dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)\n #Otherwise, use Create\n else:\n a_dtype = a.dtype\n gdal_dtype = np2gdal_dtype(a_dtype)\n if a_dtype.name == 'bool':\n #Set ndv to 0\n a.fill_value = False\n opt.remove('COMPRESS=LZW')\n opt.append('COMPRESS=DEFLATE')\n #opt.append('NBITS=1')\n #Create(fn, nx, ny, nbands, dtype, opt)\n dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)\n #Note: Need GeoMA here to make this work, or accept gt as argument\n #Could also do ds creation in calling script\n if gt is not None:\n dst_ds.SetGeoTransform(gt)\n if proj is not None:\n dst_ds.SetProjection(proj)\n \n dst_ds.GetRasterBand(bnum).WriteArray(a.filled())\n dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))\n dst_ds = None\n\ndef writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):\n \"\"\"\n Write out a vrt to accompany a csv of points\n \"\"\"\n out_vrt = os.path.splitext(out_csv)[0]+'.vrt'\n out_csv = os.path.split(out_csv)[-1]\n f = open(out_vrt, 'w')\n f.write('<OGRVRTDataSource>\\n')\n f.write(' <OGRVRTLayer name=\"%s\">\\n' % os.path.splitext(out_csv)[0])\n f.write(' <SrcDataSource>%s</SrcDataSource>\\n' % out_csv)\n f.write(' <GeometryType>wkbPoint</GeometryType>\\n')\n f.write(' <LayerSRS>%s</LayerSRS>\\n' % srs)\n f.write(' <GeometryField encoding=\"PointFromColumns\" x=\"%s\" y=\"%s\"/>\\n' % (x, y))\n f.write(' </OGRVRTLayer>\\n')\n f.write('</OGRVRTDataSource>\\n')\n f.close()\n\n#Move to geolib?\n#Look up equivalent GDAL data type\ndef np2gdal_dtype(d):\n \"\"\"\n Get GDAL RasterBand datatype that corresponds with NumPy datatype\n Input should be numpy array or numpy dtype\n \"\"\"\n dt_dict = gdal_array.codes \n if isinstance(d, (np.ndarray, np.generic)):\n d = d.dtype\n #This creates dtype from another built-in type\n #d = np.dtype(d)\n if isinstance(d, np.dtype):\n if d.name == 'int8':\n gdal_dt = 1\n elif d.name == 'bool':\n #Write out as Byte\n gdal_dt = 1 \n else:\n gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]\n else:\n print(\"Input must be NumPy array or NumPy dtype\")\n gdal_dt = None\n return gdal_dt\n\ndef gdal2np_dtype(b):\n \"\"\"\n Get NumPy datatype that corresponds with GDAL RasterBand datatype\n Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype\n \"\"\"\n dt_dict = gdal_array.codes\n if isinstance(b, str):\n b = gdal.Open(b)\n if isinstance(b, gdal.Dataset):\n b = b.GetRasterBand(1)\n if isinstance(b, gdal.Band):\n b = b.DataType\n if isinstance(b, int):\n np_dtype = dt_dict[b]\n else:\n np_dtype = None\n print(\"Input must be GDAL Dataset or RasterBand object\")\n return np_dtype\n\n#Replace nodata value in GDAL band\ndef replace_ndv(b, new_ndv):\n b_ndv = get_ndv_b(b) \n bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)\n bma.set_fill_value(new_ndv)\n b.WriteArray(bma.filled())\n b.SetNoDataValue(new_ndv)\n return b\n\ndef set_ndv(dst_fn, ndv):\n dst_ds = gdal.Open(dst_fn, gdal.GA_Update)\n for n in range(1, dst_ds.RasterCount+1):\n b = dst_ds.GetRasterBand(1)\n b.SetNoDataValue(ndv)\n dst_ds = None\n\n#Should overload these functions to handle fn, ds, or b\n#Perhaps abstract, as many functions will need this functionality\ndef get_ndv_fn(fn):\n ds = gdal.Open(fn, gdal.GA_ReadOnly)\n return get_ndv_ds(ds)\n\n#Want to modify to handle multi-band images and return list of ndv\ndef get_ndv_ds(ds, bnum=1):\n b = ds.GetRasterBand(bnum)\n return get_ndv_b(b)\n\n#Return nodata value for GDAL band\ndef get_ndv_b(b):\n \"\"\"Get NoData value for GDAL band.\n\n If NoDataValue is not set in the band, \n extract upper left and lower right pixel values.\n Otherwise assume NoDataValue is 0.\n \n Parameters\n ----------\n b : GDALRasterBand object \n This is the input band.\n \n Returns\n -------\n b_ndv : float \n NoData value \n \"\"\"\n\n b_ndv = b.GetNoDataValue()\n if b_ndv is None:\n #Check ul pixel for ndv\n ns = b.XSize\n nl = b.YSize\n ul = float(b.ReadAsArray(0, 0, 1, 1))\n #ur = float(b.ReadAsArray(ns-1, 0, 1, 1))\n lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))\n #ll = float(b.ReadAsArray(0, nl-1, 1, 1))\n #Probably better to use 3/4 corner criterion\n #if ul == ur == lr == ll:\n if np.isnan(ul) or ul == lr:\n b_ndv = ul\n else:\n #Assume ndv is 0\n b_ndv = 0\n elif np.isnan(b_ndv):\n b_dt = gdal.GetDataTypeName(b.DataType)\n if 'Float' in b_dt:\n b_ndv = np.nan\n else:\n b_ndv = 0\n return b_ndv\n\n#Write out a recarray as a csv\ndef write_recarray(outfn, ra):\n with open(outfn,'w') as f:\n f.write(','.join([str(item) for item in ra.dtype.names])+'\\n')\n for row in ra:\n f.write(','.join([str(item) for item in row])+'\\n')\n \n#Check to make sure image doesn't contain errors\ndef image_check(fn):\n ds = gdal.Open(fn)\n status = True \n for i in range(ds.RasterCount):\n ds.GetRasterBand(i+1).Checksum()\n if gdal.GetLastErrorType() != 0:\n status = False \n return status\n\n#Return number of CPUs\n#Logical is \"virtual\" cpu count with hyperthreading\n#Set to False for physical cpu count\ndef cpu_count(logical=True):\n \"\"\"Return system CPU count\n \"\"\"\n if logical:\n from multiprocessing import cpu_count\n ncpu=cpu_count()\n else:\n import psutil\n ncpu=psutil.cpu_count(logical=False)\n return ncpu\n\ndef setstripe(dir, threads=cpu_count()):\n #import socket\n #if 'nasa' in socket.getfqdn():\n #Better to use 'df -T' to determine filesystem of directory\n #Can do this with psutil Python lib, but need to also find mount point of file\n if dir is not None:\n if 'lustre' in str(subprocess.check_output(['df','-T'])):\n if os.path.exists(dir): \n if threads is None:\n threads = cpu_count()\n cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]\n print(' '.join(cmd))\n subprocess.call(cmd)\n\n#This is a shared directory for files like LULC, used by multiple tools \n#Default location is $HOME/data\n#Can specify in ~/.bashrc or ~/.profile\n#export DATADIR=$HOME/data\ndef get_datadir():\n default_datadir = os.path.join(os.path.expanduser('~'), 'data')\n datadir = os.environ.get('DATADIR', default_datadir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n return datadir\n\n#Function to get files using urllib\n#This works with ftp\ndef getfile(url, outdir=None):\n \"\"\"Function to fetch files using urllib\n\n Works with ftp\n\n \"\"\"\n fn = os.path.split(url)[-1]\n if outdir is not None:\n fn = os.path.join(outdir, fn)\n if not os.path.exists(fn):\n #Find appropriate urlretrieve for Python 2 and 3\n try:\n from urllib.request import urlretrieve\n except ImportError:\n from urllib import urlretrieve \n print(\"Retrieving: %s\" % url)\n #Add progress bar\n urlretrieve(url, fn)\n return fn\n\n#Function to get files using requests\n#Works with https authentication\ndef getfile2(url, auth=None, outdir=None):\n \"\"\"Function to fetch files using requests\n\n Works with https authentication\n\n \"\"\"\n import requests\n print(\"Retrieving: %s\" % url)\n fn = os.path.split(url)[-1]\n if outdir is not None:\n fn = os.path.join(outdir, fn)\n if auth is not None:\n r = requests.get(url, stream=True, auth=auth)\n else:\n r = requests.get(url, stream=True)\n chunk_size = 1000000\n with open(fn, 'wb') as fd:\n for chunk in r.iter_content(chunk_size):\n fd.write(chunk)\n\n#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC \ndef get_auth():\n \"\"\"Get authorization token for https\n \"\"\"\n import getpass\n from requests.auth import HTTPDigestAuth\n #This binds raw_input to input for Python 2\n input_func = input\n try:\n input_func = raw_input\n except NameError:\n pass\n uname = input_func(\"MODSCAG Username:\")\n pw = getpass.getpass(\"MODSCAG Password:\")\n auth = HTTPDigestAuth(uname, pw)\n #wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw\n return auth\n\ndef readcsv(fn):\n \"\"\"\n Wrapper to read arbitrary csv, check for header\n\n Needs some work to be more robust, quickly added for demcoreg sampling\n \"\"\"\n import csv\n #Check first line for header\n with open(fn, 'r') as f:\n reader = csv.DictReader(f)\n hdr = reader.fieldnames\n\n #Assume there is a header on first line, check \n skiprows = 1\n if np.all(f.isdigit() for f in hdr):\n hdr = None\n skiprows = 0\n\n #Check header for lat/lon/z or x/y/z tags\n\n #Should probably do genfromtxt here if header exists and dtype of cols is variable\n pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)\n return pts\n" ]
[ [ "numpy.isnan", "numpy.loadtxt", "numpy.ma.masked_values" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
chenxiaoyu523/RPNet-Pytorch
[ "7beceb9f39e66eba5283536b478f86523fcc96c7" ]
[ "data/utils.py" ]
[ "import os\nfrom PIL import Image\nimport numpy as np\n\n\ndef get_files(folder, name_filter=None, extension_filter=None):\n \"\"\"Helper function that returns the list of files in a specified folder\n with a specified extension.\n\n Keyword arguments:\n - folder (``string``): The path to a folder.\n - name_filter (```string``, optional): The returned files must contain\n this substring in their filename. Default: None; files are not filtered.\n - extension_filter (``string``, optional): The desired file extension.\n Default: None; files are not filtered\n\n \"\"\"\n if not os.path.isdir(folder):\n raise RuntimeError(\"\\\"{0}\\\" is not a folder.\".format(folder))\n\n # Filename filter: if not specified don't filter (condition always true);\n # otherwise, use a lambda expression to filter out files that do not\n # contain \"name_filter\"\n if name_filter is None:\n # This looks hackish...there is probably a better way\n name_cond = lambda filename: True\n else:\n name_cond = lambda filename: name_filter in filename\n\n # Extension filter: if not specified don't filter (condition always true);\n # otherwise, use a lambda expression to filter out files whose extension\n # is not \"extension_filter\"\n if extension_filter is None:\n # This looks hackish...there is probably a better way\n ext_cond = lambda filename: True\n else:\n ext_cond = lambda filename: filename.endswith(extension_filter)\n\n filtered_files = []\n\n # Explore the directory tree to get files that contain \"name_filter\" and\n # with extension \"extension_filter\"\n for path, _, files in os.walk(folder):\n files.sort()\n for file in files:\n if name_cond(file) and ext_cond(file):\n full_path = os.path.join(path, file)\n filtered_files.append(full_path)\n\n return filtered_files\n\n\ndef pil_loader(data_path, label_path):\n \"\"\"Loads a sample and label image given their path as PIL images.\n\n Keyword arguments:\n - data_path (``string``): The filepath to the image.\n - label_path (``string``): The filepath to the ground-truth image.\n\n Returns the image and the label as PIL images.\n\n \"\"\"\n data = Image.open(data_path)\n label = Image.open(label_path)\n\n return data, label\n\n\ndef remap(image, old_values, new_values):\n assert isinstance(image, Image.Image) or isinstance(\n image, np.ndarray), \"image must be of type PIL.Image or numpy.ndarray\"\n assert type(new_values) is tuple, \"new_values must be of type tuple\"\n assert type(old_values) is tuple, \"old_values must be of type tuple\"\n assert len(new_values) == len(\n old_values), \"new_values and old_values must have the same length\"\n\n # If image is a PIL.Image convert it to a numpy array\n if isinstance(image, Image.Image):\n image = np.array(image)\n\n # Replace old values by the new ones\n tmp = np.zeros_like(image)\n for old, new in zip(old_values, new_values):\n # Since tmp is already initialized as zeros we can skip new values\n # equal to 0\n if new != 0:\n tmp[image == old] = new\n\n return Image.fromarray(tmp)\n\n\ndef enet_weighing(dataloader, num_classes, c=1.02):\n \"\"\"Computes class weights as described in the ENet paper:\n\n w_class = 1 / (ln(c + p_class)),\n\n where c is usually 1.02 and p_class is the propensity score of that\n class:\n\n propensity_score = freq_class / total_pixels.\n\n References: https://arxiv.org/abs/1606.02147\n\n Keyword arguments:\n - dataloader (``data.Dataloader``): A data loader to iterate over the\n dataset.\n - num_classes (``int``): The number of classes.\n - c (``int``, optional): AN additional hyper-parameter which restricts\n the interval of values for the weights. Default: 1.02.\n\n \"\"\"\n class_count = 0\n total = 0\n for _, label in dataloader:\n label = label.cpu().numpy()\n\n # Flatten label\n flat_label = label.flatten()\n\n # Sum up the number of pixels of each class and the total pixel\n # counts for each label\n class_count += np.bincount(flat_label, minlength=num_classes)\n total += flat_label.size\n\n # Compute propensity score and then the weights for each class\n propensity_score = class_count / total\n class_weights = 1 / (np.log(c + propensity_score))\n\n return class_weights\n\n\ndef median_freq_balancing(dataloader, num_classes):\n \"\"\"Computes class weights using median frequency balancing as described\n in https://arxiv.org/abs/1411.4734:\n\n w_class = median_freq / freq_class,\n\n where freq_class is the number of pixels of a given class divided by\n the total number of pixels in images where that class is present, and\n median_freq is the median of freq_class.\n\n Keyword arguments:\n - dataloader (``data.Dataloader``): A data loader to iterate over the\n dataset.\n whose weights are going to be computed.\n - num_classes (``int``): The number of classes\n\n \"\"\"\n class_count = 0\n total = 0\n for _, label in dataloader:\n label = label.cpu().numpy()\n\n # Flatten label\n flat_label = label.flatten()\n\n # Sum up the class frequencies\n bincount = np.bincount(flat_label, minlength=num_classes)\n\n # Create of mask of classes that exist in the label\n mask = bincount > 0\n # Multiply the mask by the pixel count. The resulting array has\n # one element for each class. The value is either 0 (if the class\n # does not exist in the label) or equal to the pixel count (if\n # the class exists in the label)\n total += mask * flat_label.size\n\n # Sum up the number of pixels found for each class\n class_count += bincount\n\n # Compute the frequency and its median\n freq = class_count / total\n med = np.median(freq)\n\n return med / freq\n" ]
[ [ "numpy.log", "numpy.median", "numpy.zeros_like", "numpy.bincount", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rpuntaie/tensorflow_examples
[ "1958f7f0de9d96859dc3961a1695e1543fec9fd3", "1958f7f0de9d96859dc3961a1695e1543fec9fd3", "1958f7f0de9d96859dc3961a1695e1543fec9fd3" ]
[ "mask.py", "course_v2/_09nlp.py", "course_v1/sixi.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"\nChain models.\n\nMasking.\n\nShow output of layer.\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.layers import Masking, Dense\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.models import Sequential, Model\n\nX_train = np.random.rand(4,3,2)\nDense_unit = 1\ndense_reg = 0.01\nmdl = Sequential()\nmdl.add(Input(shape=(X_train.shape[1],X_train.shape[2]),name='input_feature'))\nmdl.add(Masking(mask_value=0,name='masking'))\nmdl.add(Dense(Dense_unit,kernel_regularizer=l2(dense_reg),activation='relu',name='output_feature'))\nmdl.summary()\n#this is the same as chaining models\nmdl2mask = Model(inputs=mdl.input,outputs=mdl.get_layer(\"masking\").output)\nmdl2mask.compile()\nmdl.compile()\nmaskoutput = mdl2mask.predict(X_train)\nmdloutput = mdl.predict(X_train)\nprint(maskoutput) # print output after/of masking\nprint(mdloutput) # print output of mdl\nprint(maskoutput.shape) #(4, 3, 2): masking has the shape of the layer before (input here)\nprint(mdloutput.shape) #(4, 3, 1): shape of the output of dense\n\n", "#!/usr/bin/env python3\n\n# Tokenizing text and creating sequences for sentences\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c01_nlp_turn_words_into_tokens.ipynb\n\n# This colab shows you how to tokenize text and create sequences for sentences as\n# the first stage of preparing text for use with TensorFlow models.\n\n## Import the Tokenizer\n\n# Import the Tokenizer\nimport io\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n\nimport tensorflow_datasets as tfds\n\nfrom savefit import *\n\n## Write some sentences\n# Feel free to change and add sentences as you like\n\nsentences = [\n 'My favorite food is ice cream',\n 'do you like ice cream too?',\n 'My dog likes ice cream!',\n \"your favorite flavor of icecream is chocolate\",\n \"chocolate isn't good for dogs\",\n \"your dog, your cat, and your parrot prefer broccoli\"\n]\n\n## Tokenize the words\n# The first step to preparing text to be used in a machine learning model is to\n# tokenize the text, in other words, to generate numbers for the words.\n\n# Optionally set the max number of words to tokenize.\n# The out of vocabulary (OOV) token represents words that are not in the index.\n# Call fit_on_text() on the tokenizer to generate unique numbers for each word\ntokenizer = Tokenizer(num_words = 100, oov_token=\"<OOV>\")\ntokenizer.fit_on_texts(sentences)\n\n\n## View the word index\n# After you tokenize the text, the tokenizer has a word index that contains\n# key-value pairs for all the words and their numbers.\n# The word is the key, and the number is the value.\n# Notice that the OOV token is the first entry.\n\n# Examine the word index\nword_index = tokenizer.word_index\nprint(word_index)\n\n# Get the number for a given word\nprint(word_index['favorite'])\n\n# Create sequences for the sentences\n\n# After you tokenize the words, the word index contains a unique number for each\n# word. However, the numbers in the word index are not ordered. Words in a\n# sentence have an order. So after tokenizing the words, the next step is to\n# generate sequences for the sentences.\n\nsequences = tokenizer.texts_to_sequences(sentences)\nprint (sequences)\n\n# Sequence sentences that contain words that are not in the word index\n\n# Let's take a look at what happens if the sentence being sequenced contains\n# words that are not in the word index.\n# The Out of Vocabluary (OOV) token is the first entry in the word index. You\n# will see it shows up in the sequences in place of any word that is not in the\n# word index.\n\nsentences2 = [\"I like hot chocolate\", \"My dogs and my hedgehog like kibble but my squirrel prefers grapes and my chickens like ice cream, preferably vanilla\"]\n\nsequences2 = tokenizer.texts_to_sequences(sentences2)\nprint(sequences2)\n\n\n# Preparing text to use with TensorFlow models\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c02_nlp_padding.ipynb\n\n# The high level steps to prepare text to be used in a machine learning model are:\n\n# 1. Tokenize the words to get numerical values for them\n# 2. Create numerical sequences of the sentences\n# 3. Adjust the sequences to all be the same length.\n\n## Make the sequences all the same length\n\n# Later, when you feed the sequences into a neural network to train a model, the\n# sequences all need to be uniform in size. Currently the sequences have varied\n# lengths, so the next step is to make them all be the same size, either by\n# padding them with zeros and/or truncating them.\n# \n# Use f.keras.preprocessing.sequence.pad_sequences to add zeros to the sequences\n# to make them all be the same length. By default, the padding goes at the start\n# of the sequences, but you can specify to pad at the end.\n# \n# You can optionally specify the maximum length to pad the sequences to.\n# Sequences that are longer than the specified max length will be truncated. By\n# default, sequences are truncated from the beginning of the sequence, but you\n# can specify to truncate from the end.\n# \n# If you don't provide the max length, then the sequences are padded to match the\n# length of the longest sentence.\n# \n# For all the options when padding and truncating sequences, see\n# https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences\n\n\npadded = pad_sequences(sequences)\nprint(\"\\nWord Index = \" , word_index)\nprint(\"\\nSequences = \" , sequences)\nprint(\"\\nPadded Sequences:\")\nprint(padded)\n\n\n# Specify a max length for the padded sequences\npadded = pad_sequences(sequences, maxlen=15)\nprint(padded)\n\n# Put the padding at the end of the sequences\npadded = pad_sequences(sequences, maxlen=15, padding=\"post\")\nprint(padded)\n\n# Limit the length of the sequences, you will see some sequences get truncated\npadded = pad_sequences(sequences, maxlen=3)\nprint(padded)\n\n## What happens if some of the sentences contain words that are not in the word index?\n\n# Here's where the \"out of vocabulary\" token is used. Try generating sequences\n# for some sentences that have words that are not in the word index.\n\n# Try turning sentences that contain words that \n# aren't in the word index into sequences.\n# Add your own sentences to the test_data\ntest_data = [\n \"my best friend's favorite ice cream flavor is strawberry\",\n \"my dog's best friend is a manatee\"\n]\nprint (test_data)\n\n# Remind ourselves which number corresponds to the\n# out of vocabulary token in the word index\nprint(\"<OOV> has the number\", word_index['<OOV>'], \"in the word index.\")\n\n# Convert the test sentences to sequences\ntest_seq = tokenizer.texts_to_sequences(test_data)\nprint(\"\\nTest Sequence = \", test_seq)\n\n# Pad the new sequences\npadded = pad_sequences(test_seq, maxlen=10)\nprint(\"\\nPadded Test Sequence: \")\n\n# Notice that \"1\" appears in the sequence wherever there's a word \n# that's not in the word index\nprint(padded)\n\n\n# Tokenize and sequence a bigger corpus of text\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c03_nlp_prepare_larger_text_corpus.ipynb\n\n# So far, you have written some test sentences and generated a word index and\n# then created sequences for the sentences. \n\n# Now you will tokenize and sequence a larger body of text, specifically reviews\n# from Amazon and Yelp. \n\n## About the dataset\n\n# You will use a dataset containing Amazon and Yelp reviews of products and\n# restaurants. This dataset was originally extracted from\n# [Kaggle](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).\n\n# The dataset includes reviews, and each review is labelled as 0 (bad) or 1\n# (good). However, in this exercise, you will only work with the reviews, not the\n# labels, to practice tokenizing and sequencing the text. \n\n### Example good reviews:\n\n# * This is hands down the best phone I've ever had.\n# * Four stars for the food & the guy in the blue shirt for his great vibe & still letting us in to eat !\n\n### Example bad reviews: \n\n# * A lady at the table next to us found a live green caterpillar In her salad\n# * If you plan to use this in a car forget about it.\n\n### See more reviews\n# Feel free to [download the\n # dataset](https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P)\n# from a drive folder belonging to Udacity and open it on your local machine to\n# see more reviews.\n\n# Get the corpus of text\n\n# The combined dataset of reviews has been saved in a Google drive belonging to\n# Udacity. You can download it from there.\n\npath = tf.keras.utils.get_file('reviews.csv', 'https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P')\nprint (path)\n\n\n# Each row in the csv file is a separate review.\n# The csv file has 2 columns:\n# \n# * **text** (the review)\n# * **sentiment** (0 or 1 indicating a bad or good review)\n\n# Read the csv file\ndataset = pd.read_csv(path)\n\n# Review the first few entries in the dataset\ndataset.head()\n\n# Get the reviews from the csv file\n\n# Get the reviews from the text column\nreviews = dataset['text'].tolist()\n\n# Tokenize the text\n# Create the tokenizer, specify the OOV token, tokenize the text, then inspect the word index.\n\ntokenizer = Tokenizer(oov_token=\"<OOV>\")\ntokenizer.fit_on_texts(reviews)\n\nword_index = tokenizer.word_index\nprint(len(word_index))\nprint(word_index)\n\n\n# Generate sequences for the reviews\n# Generate a sequence for each review. Set the max length to match the longest\n# review. Add the padding zeros at the end of the review for reviews that are not\n# as long as the longest one.\n\nsequences = tokenizer.texts_to_sequences(reviews)\npadded_sequences = pad_sequences(sequences, padding='post')\n\n# What is the shape of the vector containing the padded sequences?\n# The shape shows the number of sequences and the length of each one.\nprint(padded_sequences.shape)\n\n# What is the first review?\nprint (reviews[0])\n\n# Show the sequence for the first review\nprint(padded_sequences[0])\n\n# Try printing the review and padded sequence for other elements.\n\n\n# Word Embeddings and Sentiment\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c04_nlp_embeddings_and_sentiment.ipynb\n\n# In this colab, you'll work with word embeddings and train a basic neural\n# network to predict text sentiment. At the end, you'll be able to visualize how\n# the network sees the related sentiment of each word in the dataset.\n\n## Get the dataset\n\n# We're going to use a dataset containing Amazon and Yelp reviews, with their\n# related sentiment (1 for positive, 0 for negative). This dataset was originally\n# extracted from\n# [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).\n\n# !wget --no-check-certificate -O sentiment.csv https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P\n\ndataset = pd.read_csv('sentiment.csv')\n\nsentences = dataset['text'].tolist()\nlabels = dataset['sentiment'].tolist()\n\n# Separate out the sentences and labels into training and test sets\ntraining_size = int(len(sentences) * 0.8)\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\n# Make labels into numpy arrays for use with the network later\ntraining_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)\n\n## Tokenize the dataset\n\n# Tokenize the dataset, including padding and OOV\n\nvocab_size = 1000\nembedding_dim = 16\nmax_length = 100\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\n\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\nsequences = tokenizer.texts_to_sequences(training_sentences)\npadded = pad_sequences(sequences,maxlen=max_length, padding=padding_type, \n truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences,maxlen=max_length, \n padding=padding_type, truncating=trunc_type)\n\n## Review a Sequence\n\n# Let's quickly take a look at one of the padded sequences to ensure everything\n# above worked appropriately.\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\nprint(decode_review(padded[1]))\nprint(training_sentences[1])\n\n## Train a Basic Sentiment Model with Embeddings\n\n# Build a basic sentiment network\n# Note the embedding layer is first, \n# and the output is only 1 node as it is either 0 or 1 (negative or positive)\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\nnum_epochs = 10\nmodel,history = savefit(model, padded, training_labels_final, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels_final), verbose=0)\n\n## Get files for visualizing the network\n\n# The code below will download two files for visualizing how your network \"sees\"\n# the sentiment related to each word. Head to http://projector.tensorflow.org/\n# and load these files, then click the \"Sphereize\" checkbox.\n\n# First get the weights of the embedding layer\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\n\n# Write out the embedding vectors and metadata\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\n# Download the files\ntry:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')\n\n## Predicting Sentiment in New Reviews\n\n# Now that you've trained and visualized your network, take a look below at how\n# we can predict sentiment in new reviews the network has never seen before.\n\n# Use the model to predict a review \nfake_reviews = ['I love this phone', 'I hate spaghetti', \n 'Everything was cold',\n 'Everything was hot exactly as I wanted', \n 'Everything was green', \n 'the host seated us immediately',\n 'they gave us free chocolate cake', \n 'not sure about the wilted flowers on the table',\n 'only works when I stand on tippy toes', \n 'does not work when I stand on my head']\n\nprint(fake_reviews) \n\n# Create the sequences\npadding_type='post'\nsample_sequences = tokenizer.texts_to_sequences(fake_reviews)\nfakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) \n\nprint('\\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\\n') \n\nclasses = model.predict(fakes_padded)\n\n# The closer the class is to 1, the more positive the review is deemed to be\nfor x in range(len(fake_reviews)):\n print(fake_reviews[x])\n print(classes[x])\n print('\\n')\n\n# Try adding reviews of your own\n# Add some negative words (such as \"not\") to the good reviews and see what happens\n# For example:\n# they gave us free chocolate cake and did not charge us\n\n\n# Tweaking the Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb\n\n# In this colab, you'll investigate how various tweaks to data processing and the\n# model itself can impact results. At the end, you'll once again be able to\n# visualize how the network sees the related sentiment of each word in the\n# dataset.\n\nsentences = dataset['text'].tolist()\nlabels = dataset['sentiment'].tolist()\n\n# Separate out the sentences and labels into training and test sets\ntraining_size = int(len(sentences) * 0.8)\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\n# Make labels into numpy arrays for use with the network later\ntraining_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)\n\n## Tokenize the dataset (with tweaks!)\n\n# Now, we'll tokenize the dataset, but we can make some changes to this from\n# before. Previously, we used: \n\nvocab_size = 1000\nembedding_dim = 16\nmax_length = 100\ntrunc_type='post'\npadding_type='post'\n\n# How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how\n# the model performs?\n\nvocab_size = 500\nembedding_dim = 16\nmax_length = 50\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\n## Train a Sentiment Model (with tweaks!)\n\n# We'll use a slightly different model here, using `GlobalAveragePooling1D`\n# instead of `Flatten()`.\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\nnum_epochs = 30\nmodel,history = savefit(model, training_padded, training_labels_final, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels_final),verbose=0)\n\n## Visualize the training graph\n\n# You can use the code below to visualize the training and validation accuracy\n# while you try out different tweaks to the hyperparameters and model.\n\ndef plot_graphs(history, string):\n if not history:\n return\n plt.plot(history.history[string])\n plt.plot(history.history['val_'+string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend([string, 'val_'+string])\n plt.show()\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\n## Get files for visualizing the network\n\n# The code below will download two files for visualizing how your network \"sees\"\n# the sentiment related to each word. Head to http://projector.tensorflow.org/\n# and load these files, then click the checkbox to \"sphereize\" the data.\n\n# Note: You may run into errors with the projection if your `vocab_size` earlier\n# was larger than the actual number of words in the vocabulary, in which case\n# you'll need to decrease this variable and re-train in order to visualize.\n\n# First get the weights of the embedding layer\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\nimport io\n\n# Create the reverse word index\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n# Write out the embedding vectors and metadata\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\n# Download the files\ntry:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')\n\n## Predicting Sentiment in New Reviews\n\n# Using LSTMs, CNNs, GRUs with a larger dataset\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c02_nlp_multiple_models_for_predicting_sentiment.ipynb\n\n# In this colab, you use different kinds of layers to see how they affect the\n# model.\n# You will use the glue/sst2 dataset, which is available through tensorflow_datasets. \n# The General Language Understanding Evaluation (GLUE) benchmark\n# (https://gluebenchmark.com/) is a collection of resources for training,\n# evaluating, and analyzing natural language understanding systems.\n# These resources include the Stanford Sentiment Treebank (SST) dataset that\n# consists of sentences from movie reviews and human annotations of their\n# sentiment. This colab uses version 2 of the SST dataset.\n# The splits are:\n# \n# * train\t67,349\n# * validation\t872\n# \n# and the column headings are:\n# \n# * sentence\n# * label\n\n# For more information about the dataset, see\n# [https://www.tensorflow.org/datasets/catalog/glue#gluesst2](https://www.tensorflow.org/datasets/catalog/glue#gluesst2)\n\n# Get the dataset.\n# It has 70000 items, so might take a while to download\ndataset, info = tfds.load('glue/sst2', with_info=True)\nprint(info.features)\nprint(info.features[\"label\"].num_classes)\nprint(info.features[\"label\"].names)\n\n# Get the training and validation datasets\ndataset_train, dataset_validation = dataset['train'], dataset['validation']\ndataset_train\n\n# Print some of the entries\nfor example in dataset_train.take(2):\n review, label = example[\"sentence\"], example[\"label\"]\n print(\"Review:\", review)\n print(\"Label: %d \\n\" % label.numpy())\n\n# Get the sentences and the labels\n# for both the training and the validation sets\ntraining_reviews = []\ntraining_labels = []\n\nvalidation_reviews = []\nvalidation_labels = []\n\n# The dataset has 67,000 training entries, but that's a lot to process here!\n\n# If you want to take the entire dataset: WARNING: takes longer!!\n# for item in dataset_train.take(-1):\n\n# Take 10,000 reviews\nfor item in dataset_train.take(10000):\n review, label = item[\"sentence\"], item[\"label\"]\n training_reviews.append(str(review.numpy()))\n training_labels.append(label.numpy())\n\nprint (\"\\nNumber of training reviews is: \", len(training_reviews))\n\n# print some of the reviews and labels\nfor i in range(0, 2):\n print (training_reviews[i])\n print (training_labels[i])\n\n# Get the validation data\n# there's only about 800 items, so take them all\nfor item in dataset_validation.take(-1): \n review, label = item[\"sentence\"], item[\"label\"]\n validation_reviews.append(str(review.numpy()))\n validation_labels.append(label.numpy())\n\nprint (\"\\nNumber of validation reviews is: \", len(validation_reviews))\n\n# Print some of the validation reviews and labels\nfor i in range(0, 2):\n print (validation_reviews[i])\n print (validation_labels[i])\n\n\n# Tokenize the words and sequence the sentences\n\n\n# There's a total of 21224 words in the reviews\n# but many of them are irrelevant like with, it, of, on.\n# If we take a subset of the training data, then the vocab\n# will be smaller.\n\n# A reasonable review might have about 50 words or so,\n# so we can set max_length to 50 (but feel free to change it as you like)\n\nvocab_size = 4000\nembedding_dim = 16\nmax_length = 50\ntrunc_type='post'\npad_type='post'\noov_tok = \"<OOV>\"\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_reviews)\nword_index = tokenizer.word_index\n\n\n# Pad the sequences\n\n# Pad the sequences so that they are all the same length\ntraining_sequences = tokenizer.texts_to_sequences(training_reviews)\ntraining_padded = pad_sequences(training_sequences,maxlen=max_length, \n truncating=trunc_type, padding=pad_type)\n\nvalidation_sequences = tokenizer.texts_to_sequences(validation_reviews)\nvalidation_padded = pad_sequences(validation_sequences,maxlen=max_length)\n\ntraining_labels_final = np.array(training_labels)\nvalidation_labels_final = np.array(validation_labels)\n\n# Create the model using an Embedding\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(), \n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\n# Train the model\n\nnum_epochs = 20\nmodel,history = savefit(model, training_padded, training_labels_final, epochs=num_epochs, \n validation_data=(validation_padded, validation_labels_final),verbose=0)\n\n\n# Plot the accurracy and loss\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\n# Write a function to predict the sentiment of reviews\n\n# Write some new reviews \n\nreview1 = \"\"\"I loved this movie\"\"\"\n\nreview2 = \"\"\"that was the worst movie I've ever seen\"\"\"\n\nreview3 = \"\"\"too much violence even for a Bond film\"\"\"\n\nreview4 = \"\"\"a captivating recounting of a cherished myth\"\"\"\n\nnew_reviews = [review1, review2, review3, review4]\n\n\n# Define a function to prepare the new reviews for use with a model\n# and then use the model to predict the sentiment of the new reviews \n\ndef predict_review(model, reviews):\n # Create the sequences\n padding_type='post'\n sample_sequences = tokenizer.texts_to_sequences(reviews)\n reviews_padded = pad_sequences(sample_sequences, padding=padding_type, \n maxlen=max_length) \n classes = model.predict(reviews_padded)\n for x in range(len(reviews_padded)):\n print(reviews[x])\n print(classes[x])\n print('\\n')\n\npredict_review(model, new_reviews)\n\n\n\n# Define a function to train and show the results of models with different layers\n\ndef fit_model_and_show_results (model, reviews):\n model.summary()\n model, history = savefit(model, training_padded, training_labels_final, epochs=num_epochs, \n validation_data=(validation_padded, validation_labels_final),verbose=0)\n plot_graphs(history, \"accuracy\")\n plot_graphs(history, \"loss\")\n predict_review(model, reviews)\n\n# Use a CNN\n\nnum_epochs = 30\n\nmodel_cnn = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Conv1D(16, 5, activation='relu'),\n tf.keras.layers.GlobalMaxPooling1D(),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\n# Default learning rate for the Adam optimizer is 0.001\n# Let's slow down the learning rate by 10.\nlearning_rate = 0.0001\nmodel_cnn.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate), \n metrics=['accuracy'])\n\nfit_model_and_show_results(model_cnn, new_reviews)\n\n# Use a GRU\n\nnum_epochs = 30\n\nmodel_gru = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.00003 # slower than the default learning rate\nmodel_gru.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\n\nfit_model_and_show_results(model_gru, new_reviews)\n\n# Add a bidirectional LSTM\n\nnum_epochs = 30\n\nmodel_bidi_lstm = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)), \n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.00003\nmodel_bidi_lstm.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\nfit_model_and_show_results(model_bidi_lstm, new_reviews)\n\n# Use multiple bidirectional LSTMs\n\nnum_epochs = 30\n\nmodel_multiple_bidi_lstm = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim, \n return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.0003\nmodel_multiple_bidi_lstm.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\nfit_model_and_show_results(model_multiple_bidi_lstm, new_reviews)\n\n# Try some more reviews\n\n# Write some new reviews \n\nreview1 = \"\"\"I loved this movie\"\"\"\n\nreview2 = \"\"\"that was the worst movie I've ever seen\"\"\"\n\nreview3 = \"\"\"too much violence even for a Bond film\"\"\"\n\nreview4 = \"\"\"a captivating recounting of a cherished myth\"\"\"\n\nreview5 = \"\"\"I saw this movie yesterday and I was feeling low to start with,\n but it was such a wonderful movie that it lifted my spirits and brightened \n my day, you can\\'t go wrong with a movie with Whoopi Goldberg in it.\"\"\"\n\nreview6 = \"\"\"I don\\'t understand why it received an oscar recommendation\n for best movie, it was long and boring\"\"\"\n\nreview7 = \"\"\"the scenery was magnificent, the CGI of the dogs was so realistic I\n thought they were played by real dogs even though they talked!\"\"\"\n\nreview8 = \"\"\"The ending was so sad and yet so uplifting at the same time. \n I'm looking for an excuse to see it again\"\"\"\n\nreview9 = \"\"\"I had expected so much more from a movie made by the director \n who made my most favorite movie ever, I was very disappointed in the tedious \n story\"\"\"\n\nreview10 = \"I wish I could watch this movie every day for the rest of my life\"\n\nmore_reviews = [review1, review2, review3, review4, review5, review6, review7, \n review8, review9, review10]\n\n\nprint(\"============================\\n\",\"Embeddings only:\\n\", \"============================\")\npredict_review(model, more_reviews)\n\nprint(\"============================\\n\",\"With CNN\\n\", \"============================\")\npredict_review(model_cnn, more_reviews)\n\nprint(\"===========================\\n\",\"With bidirectional GRU\\n\", \"============================\")\npredict_review(model_gru, more_reviews)\n\nprint(\"===========================\\n\", \"With a single bidirectional LSTM:\\n\", \"===========================\")\npredict_review(model_bidi_lstm, more_reviews)\n\nprint(\"===========================\\n\", \"With multiple bidirectional LSTM:\\n\", \"==========================\")\npredict_review(model_multiple_bidi_lstm, more_reviews)\n\n\n# Constructing a Text Generation Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c03_nlp_constructing_text_generation_model.ipynb\n\n# Using most of the techniques you've already learned, it's now possible to\n# generate new text by predicting the next word that follows a given seed word.\n# To practice this method, we'll use the [Kaggle Song Lyrics\n# Dataset](https://www.kaggle.com/mousehead/songlyrics).\n\n## Import TensorFlow and related functions\n\n## Get the Dataset\n\n# As noted above, we'll utilize the [Song Lyrics\n# dataset](https://www.kaggle.com/mousehead/songlyrics) on Kaggle.\n\n# !wget --no-check-certificate https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8 -O songdata.csv\n\n## **First 10 Songs**\n\n# Let's first look at just 10 songs from the dataset, and see how things perform.\n\n### Preprocessing\n\n# Let's perform some basic preprocessing to get rid of punctuation and make\n# everything lowercase. We'll then split the lyrics up by line and tokenize the\n# lyrics.\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\nimport string\n\ndef create_lyrics_corpus(dataset, field):\n # Remove all other punctuation\n dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')\n # Make it lowercase\n dataset[field] = dataset[field].str.lower()\n # Make it one long string to split by line\n lyrics = dataset[field].str.cat()\n corpus = lyrics.split('\\n')\n # Remove any trailing whitespace\n for l in range(len(corpus)):\n corpus[l] = corpus[l].rstrip()\n # Remove any empty lines\n corpus = [l for l in corpus if l != '']\n\n return corpus\n\n# Read the dataset from csv - just first 10 songs for now\ndataset = pd.read_csv('songdata.csv', dtype=str)[:10]\n# Create the corpus using the 'text' column containing lyrics\ncorpus = create_lyrics_corpus(dataset, 'text')\n# Tokenize the corpus\ntokenizer = tokenize_corpus(corpus)\n\ntotal_words = len(tokenizer.word_index) + 1\n\nprint(tokenizer.word_index)\nprint(total_words)\n\n### Create Sequences and Labels\n\n# After preprocessing, we next need to create sequences and labels. Creating the\n# sequences themselves is similar to before with `texts_to_sequences`, but also\n# including the use of\n# [N-Grams](https://towardsdatascience.com/introduction-to-language-models-n-gram-e323081503d9);\n# creating the labels will now utilize those sequences as well as utilize one-hot\n# encoding over all potential output words.\n\nsequences = []\nfor line in corpus:\n\ttoken_list = tokenizer.texts_to_sequences([line])[0]\n\tfor i in range(1, len(token_list)):\n\t\tn_gram_sequence = token_list[:i+1]\n\t\tsequences.append(n_gram_sequence)\n\n# Pad sequences for equal input length \nmax_sequence_len = max([len(seq) for seq in sequences])\nsequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))\n\n# Split sequences between the \"input\" sequence and \"output\" predicted word\ninput_sequences, labels = sequences[:,:-1], sequences[:,-1]\n# One-hot encode the labels\none_hot_labels = tf.keras.utils.to_categorical(labels, num_classes=total_words)\n\n# Check out how some of our data is being stored\n# The Tokenizer has just a single index per word\nprint(tokenizer.word_index['know'])\nprint(tokenizer.word_index['feeling'])\n# Input sequences will have multiple indexes\nprint(input_sequences[5])\nprint(input_sequences[6])\n# And the one hot labels will be as long as the full spread of tokenized words\nprint(one_hot_labels[5])\nprint(one_hot_labels[6])\n\n### Train a Text Generation Model\n\n# Building an RNN to train our text generation model will be very similar to the\n# sentiment models you've built previously. The only real change necessary is to\n# make sure to use Categorical instead of Binary Cross Entropy as the loss\n# function - we could use Binary before since the sentiment was only 0 or 1, but\n# now there are hundreds of categories.\n\n# From there, we should also consider using *more* epochs than before, as text\n# generation can take a little longer to converge than sentiment analysis, *and*\n# we aren't working with all that much data yet. I'll set it at 200 epochs here\n# since we're only use part of the dataset, and training will tail off quite a\n# bit over that many epochs.\n\n\nmodel = Sequential()\nmodel.add(Embedding(total_words, 64, input_length=max_sequence_len-1))\nmodel.add(Bidirectional(LSTM(20)))\nmodel.add(Dense(total_words, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel,history = savefit(model, input_sequences, one_hot_labels, epochs=200, verbose=0)\n\n### View the Training Graph\n\nimport matplotlib.pyplot as plt\n\ndef plot_graphs(history, string):\n if not history:\n return\n plt.plot(history.history[string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.show()\n\nplot_graphs(history, 'accuracy')\n\n### Generate new lyrics!\n\n# It's finally time to generate some new lyrics from the trained model, and see\n# what we get. To do so, we'll provide some \"seed text\", or an input sequence for\n# the model to start with. We'll also decide just how long of an output sequence\n# we want - this could essentially be infinite, as the input plus the previous\n# output will be continuously fed in for a new output word (at least up to our\n # max sequence length).\n\nseed_text = \"im feeling chills\"\nnext_words = 100\n\nfor _ in range(next_words):\n\ttoken_list = tokenizer.texts_to_sequences([seed_text])[0]\n\ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n\tpredicted = np.argmax(model.predict(token_list), axis=-1)\n\toutput_word = \"\"\n\tfor word, index in tokenizer.word_index.items():\n\t\tif index == predicted:\n\t\t\toutput_word = word\n\t\t\tbreak\n\tseed_text += \" \" + output_word\nprint(seed_text)\n\n\n# Optimizing the Text Generation Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c04_nlp_optimizing_the_text_generation_model.ipynb\n\n## 250 Songs\n\n# Now we've seen a model trained on just a small sample of songs, and how this\n# often leads to repetition as you get further along in trying to generate new\n# text. Let's switch to using the 250 songs instead, and see if our output\n# improves. This will actually be nearly 10K lines of lyrics, which should be\n# sufficient.\n\n# Note that we won't use the full dataset here as it will take up quite a bit of\n# RAM and processing time, but you're welcome to try doing so on your own later.\n# If interested, you'll likely want to use only some of the more common words for\n# the Tokenizer, which will help shrink processing time and memory needed \n# (or else you'd have an output array hundreds of thousands of words long).\n\n### Preprocessing\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\ndef create_lyrics_corpus(dataset, field):\n # Remove all other punctuation\n dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')\n # Make it lowercase\n dataset[field] = dataset[field].str.lower()\n # Make it one long string to split by line\n lyrics = dataset[field].str.cat()\n corpus = lyrics.split('\\n')\n # Remove any trailing whitespace\n for l in range(len(corpus)):\n corpus[l] = corpus[l].rstrip()\n # Remove any empty lines\n corpus = [l for l in corpus if l != '']\n\n return corpus\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\n# Read the dataset from csv - this time with 250 songs\ndataset = pd.read_csv('songdata.csv', dtype=str)[:250]\n# Create the corpus using the 'text' column containing lyrics\ncorpus = create_lyrics_corpus(dataset, 'text')\n# Tokenize the corpus\ntokenizer = tokenize_corpus(corpus, num_words=2000)\ntotal_words = tokenizer.num_words\n\n# There should be a lot more words now\nprint(total_words)\n\n### Create Sequences and Labels\n\nsequences = []\nfor line in corpus:\n\ttoken_list = tokenizer.texts_to_sequences([line])[0]\n\tfor i in range(1, len(token_list)):\n\t\tn_gram_sequence = token_list[:i+1]\n\t\tsequences.append(n_gram_sequence)\n\n# Pad sequences for equal input length \nmax_sequence_len = max([len(seq) for seq in sequences])\nsequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))\n\n# Split sequences between the \"input\" sequence and \"output\" predicted word\ninput_sequences, labels = sequences[:,:-1], sequences[:,-1]\n# One-hot encode the labels\none_hot_labels = tf.keras.utils.to_categorical(labels, num_classes=total_words)\n\n### Train a (Better) Text Generation Model\n\n# With more data, we'll cut off after 100 epochs to avoid keeping you here all\n# day. You'll also want to change your runtime type to GPU if you haven't already\n# (you'll need to re-run the above cells if you change runtimes).\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n\nmodel = Sequential()\nmodel.add(Embedding(total_words, 64, input_length=max_sequence_len-1))\nmodel.add(Bidirectional(LSTM(20)))\nmodel.add(Dense(total_words, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel,history = savefit(model, input_sequences, one_hot_labels, epochs=100, verbose=0)\n\n### View the Training Graph\n\nplot_graphs(history, 'accuracy')\n\n### Generate better lyrics!\n\n# This time around, we should be able to get a more interesting output with less\n# repetition.\n\nseed_text = \"im feeling chills\"\nnext_words = 100\n \nfor _ in range(next_words):\n\ttoken_list = tokenizer.texts_to_sequences([seed_text])[0]\n\ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n\tpredicted = np.argmax(model.predict(token_list), axis=-1)\n\toutput_word = \"\"\n\tfor word, index in tokenizer.word_index.items():\n\t\tif index == predicted:\n\t\t\toutput_word = word\n\t\t\tbreak\n\tseed_text += \" \" + output_word\nprint(seed_text)\n\n### Varying the Possible Outputs\n\n# In running the above, you may notice that the same seed text will generate\n# similar outputs. This is because the code is currently always choosing the top\n# predicted class as the next word. What if you wanted more variance in the\n# output? \n\n# Switching from `model.predict_classes` to `model.predict_proba` will get us all\n# of the class probabilities. We can combine this with `np.random.choice` to\n# select a given predicted output based on a probability, thereby giving a bit\n# more randomness to our outputs.\n\n# Test the method with just the first word after the seed text\nseed_text = \"im feeling chills\"\nnext_words = 100\n \ntoken_list = tokenizer.texts_to_sequences([seed_text])[0]\ntoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\npredicted_probs = model.predict(token_list)[0]\npredicted = np.random.choice([x for x in range(len(predicted_probs))], \n p=predicted_probs)\n# Running this cell multiple times should get you some variance in output\nprint(predicted)\n\n# Use this process for the full output generation\nseed_text = \"im feeling chills\"\nnext_words = 100\n \nfor _ in range(next_words):\n token_list = tokenizer.texts_to_sequences([seed_text])[0]\n token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n predicted_probs = model.predict(token_list)[0]\n predicted = np.random.choice([x for x in range(len(predicted_probs))],\n p=predicted_probs)\n output_word = \"\"\n for word, index in tokenizer.word_index.items():\n if index == predicted:\n output_word = word\n break\n seed_text += \" \" + output_word\nprint(seed_text)\n", "#!/usr/bin/env python3\n\n#tensorflow_examples/courses/udacity_deep_learning/6_lstm.ipynb\n\nimport os\nimport numpy as np\nimport random\nimport string\nimport tensorflow as tf\nimport zipfile\nfrom six.moves import range\nfrom six.moves.urllib.request import urlretrieve\nfrom tensorflow.python.training import gradient_descent\n\nurl = 'http://mattmahoney.net/dc/'\n\ndef maybe_download(filename, expected_bytes):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\nfilename = maybe_download('../data/text8.zip', 31344016)\n\ndef read_data(filename):\n with zipfile.ZipFile(filename) as f:\n name = f.namelist()[0]\n data = tf.compat.as_str(f.read(name))\n return data\n \ntext = read_data(filename)\nprint('Data size %d' % len(text))\n\n#Create a small validation set.\n\nvalid_size = 1000\nvalid_text = text[:valid_size]\ntrain_text = text[valid_size:]\ntrain_size = len(train_text)\nprint(train_size, train_text[:64])\nprint(valid_size, valid_text[:64])\n\n#Utility functions to map characters to vocabulary IDs and back.\n\nvocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '\nfirst_letter = ord(string.ascii_lowercase[0])\n\ndef char2id(char):\n if char in string.ascii_lowercase:\n return ord(char) - first_letter + 1\n elif char == ' ':\n return 0\n else:\n print('Unexpected character: %s' % char)\n return 0\n \ndef id2char(dictid):\n if dictid > 0:\n return chr(dictid + first_letter - 1)\n else:\n return ' '\n\nprint(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))\nprint(id2char(1), id2char(26), id2char(0))\n\n#Function to generate a training batch for the LSTM model.\n\nbatch_size=64\nnum_unrollings=10\n\nclass BatchGenerator(object):\n def __init__(self, text, batch_size, num_unrollings):\n self._text = text\n self._text_size = len(text)\n self._batch_size = batch_size\n self._num_unrollings = num_unrollings\n segment = self._text_size // batch_size\n self._cursor = [ offset * segment for offset in range(batch_size)]\n self._last_batch = self._next_batch()\n \n def _next_batch(self):\n \"\"\"Generate a single batch from the current cursor position in the data.\"\"\"\n batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)\n for b in range(self._batch_size):\n batch[b, char2id(self._text[self._cursor[b]])] = 1.0\n self._cursor[b] = (self._cursor[b] + 1) % self._text_size\n return batch\n \n def next(self):\n \"\"\"Generate the next array of batches from the data. The array consists of\n the last batch of the previous array, followed by num_unrollings new ones.\n \"\"\"\n batches = [self._last_batch]\n for step in range(self._num_unrollings):\n batches.append(self._next_batch())\n self._last_batch = batches[-1]\n return batches\n\ndef characters(probabilities):\n \"\"\"Turn a 1-hot encoding or a probability distribution over the possible\n characters back into its (most likely) character representation.\"\"\"\n return [id2char(c) for c in np.argmax(probabilities, 1)]\n\ndef batches2string(batches):\n \"\"\"Convert a sequence of batches back into their (most likely) string\n representation.\"\"\"\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s\n\ntrain_batches = BatchGenerator(train_text, batch_size, num_unrollings)\nvalid_batches = BatchGenerator(valid_text, 1, 1)\n\nprint(batches2string(train_batches.next()))\nprint(batches2string(train_batches.next()))\nprint(batches2string(valid_batches.next()))\nprint(batches2string(valid_batches.next()))\n\ndef logprob(predictions, labels):\n \"\"\"Log-probability of the true labels in a predicted batch.\"\"\"\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]\n\ndef sample_distribution(distribution):\n \"\"\"Sample one element from a distribution assumed to be an array of normalized\n probabilities.\n \"\"\"\n r = random.uniform(0, 1)\n s = 0\n for i in range(len(distribution)):\n s += distribution[i]\n if s >= r:\n return i\n return len(distribution) - 1\n\ndef sample(prediction):\n \"\"\"Turn a (column) prediction into 1-hot encoded samples.\"\"\"\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p\n\ndef random_distribution():\n \"\"\"Generate a random column of probabilities.\"\"\"\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]\n\n#Simple LSTM Model.\nnum_nodes = 64\n\ngraph = tf.Graph()\nwith graph.as_default():\n \n # Parameters:\n # Input gate: input, previous output, and bias.\n ix = tf.Variable(tf.compat.v1.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n im = tf.Variable(tf.compat.v1.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n ib = tf.Variable(tf.zeros([1, num_nodes]))\n # Forget gate: input, previous output, and bias.\n fx = tf.Variable(tf.compat.v1.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n fm = tf.Variable(tf.compat.v1.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n fb = tf.Variable(tf.zeros([1, num_nodes]))\n # Memory cell: input, state and bias. \n cx = tf.Variable(tf.compat.v1.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n cm = tf.Variable(tf.compat.v1.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n cb = tf.Variable(tf.zeros([1, num_nodes]))\n # Output gate: input, previous output, and bias.\n ox = tf.Variable(tf.compat.v1.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n om = tf.Variable(tf.compat.v1.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))\n ob = tf.Variable(tf.zeros([1, num_nodes]))\n # Variables saving state across unrollings.\n saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)\n # Classifier weights and biases.\n w = tf.Variable(tf.compat.v1.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n b = tf.Variable(tf.zeros([vocabulary_size]))\n \n # Definition of the cell computation.\n def lstm_cell(i, o, state):\n \"\"\"Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf\n Note that in this formulation, we omit the various connections between the\n previous state and the gates.\"\"\"\n input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)\n forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)\n update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb\n state = forget_gate * state + input_gate * tf.tanh(update)\n output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)\n return output_gate * tf.tanh(state), state\n\n # Input data.\n train_data = list()\n for _ in range(num_unrollings + 1):\n train_data.append(\n tf.compat.v1.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))\n train_inputs = train_data[:num_unrollings]\n train_labels = train_data[1:] # labels are inputs shifted by one time step.\n\n # Unrolled LSTM loop.\n outputs = list()\n output = saved_output\n state = saved_state\n for i in train_inputs:\n output, state = lstm_cell(i, output, state)\n outputs.append(output)\n\n # State saving across unrollings.\n with tf.control_dependencies([saved_output.assign(output),\n saved_state.assign(state)]):\n # Classifier.\n logits = tf.compat.v1.nn.xw_plus_b(tf.concat(outputs, 0), w, b)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=tf.concat(train_labels, 0), logits=logits))\n\n # Optimizer.\n global_step = tf.Variable(0)\n learning_rate = tf.compat.v1.train.exponential_decay(\n 10.0, global_step, 5000, 0.1, staircase=True)\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate)\n gradients, v = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n optimizer = optimizer.apply_gradients(\n zip(gradients, v), global_step=global_step)\n\n # Predictions.\n train_prediction = tf.nn.softmax(logits)\n \n # Sampling and validation eval: batch 1, no unrolling.\n sample_input = tf.compat.v1.placeholder(tf.float32, shape=[1, vocabulary_size])\n saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))\n saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))\n reset_sample_state = tf.group(\n saved_sample_output.assign(tf.zeros([1, num_nodes])),\n saved_sample_state.assign(tf.zeros([1, num_nodes])))\n sample_output, sample_state = lstm_cell(\n sample_input, saved_sample_output, saved_sample_state)\n with tf.control_dependencies([saved_sample_output.assign(sample_output),\n saved_sample_state.assign(sample_state)]):\n sample_prediction = tf.nn.softmax(tf.compat.v1.nn.xw_plus_b(sample_output, w, b))\n\nnum_steps = 7001\nsummary_frequency = 100\n\nwith tf.compat.v1.Session(graph=graph) as session:\n tf.compat.v1.global_variables_initializer().run()\n print('Initialized')\n mean_loss = 0\n for step in range(num_steps):\n batches = train_batches.next()\n feed_dict = dict()\n for i in range(num_unrollings + 1):\n feed_dict[train_data[i]] = batches[i]\n _, l, predictions, lr = session.run(\n [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n mean_loss += l\n if step % summary_frequency == 0:\n if step > 0:\n mean_loss = mean_loss / summary_frequency\n # The mean loss is an estimate of the loss over the last few batches.\n print(\n 'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n mean_loss = 0\n labels = np.concatenate(list(batches)[1:])\n print('Minibatch perplexity: %.2f' % float(\n np.exp(logprob(predictions, labels))))\n if step % (summary_frequency * 10) == 0:\n # Generate some samples.\n print('=' * 80)\n for _ in range(5):\n feed = sample(random_distribution())\n sentence = characters(feed)[0]\n reset_sample_state.run()\n for _ in range(79):\n prediction = sample_prediction.eval({sample_input: feed})\n feed = sample(prediction)\n sentence += characters(feed)[0]\n print(sentence)\n print('=' * 80)\n # Measure validation set perplexity.\n reset_sample_state.run()\n valid_logprob = 0\n for _ in range(valid_size):\n b = valid_batches.next()\n predictions = sample_prediction.eval({sample_input: b[0]})\n valid_logprob = valid_logprob + logprob(predictions, b[1])\n print('Validation set perplexity: %.2f' % float(np.exp(\n valid_logprob / valid_size)))\n\n" ]
[ [ "tensorflow.keras.layers.Masking", "tensorflow.keras.Input", "tensorflow.keras.regularizers.l2", "numpy.random.rand", "tensorflow.keras.models.Sequential" ], [ "matplotlib.pyplot.legend", "tensorflow.keras.layers.GlobalAveragePooling1D", "matplotlib.pyplot.plot", "pandas.read_csv", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.GRU", "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.GlobalMaxPooling1D", "tensorflow.keras.layers.Dense", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.ylabel", "tensorflow.keras.preprocessing.text.Tokenizer", "matplotlib.use", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.utils.get_file", "tensorflow.keras.layers.LSTM", "matplotlib.pyplot.xlabel", "tensorflow.keras.utils.to_categorical", "tensorflow.keras.preprocessing.sequence.pad_sequences" ], [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.tanh", "tensorflow.compat.v1.truncated_normal", "numpy.exp", "tensorflow.Graph", "tensorflow.Variable", "numpy.argmax", "numpy.zeros", "tensorflow.matmul", "numpy.log", "tensorflow.compat.v1.train.exponential_decay", "numpy.sum", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "tensorflow.nn.softmax", "tensorflow.compat.v1.nn.xw_plus_b", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.clip_by_global_norm", "tensorflow.compat.v1.placeholder", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pyomeca/BiorbdOptim
[ "f07094668788d3e1b5e8cd1c65fbf0c7dc7cc978" ]
[ "bioptim/limits/penalty_option.py" ]
[ "from typing import Any, Union, Callable\n\nimport biorbd_casadi as biorbd\nfrom casadi import horzcat, vertcat, Function, MX, SX\nimport numpy as np\n\nfrom .penalty_node import PenaltyNodeList\nfrom ..misc.enums import Node, PlotType, ControlType, ConstraintType, IntegralApproximation\nfrom ..misc.mapping import Mapping, BiMapping\nfrom ..misc.options import OptionGeneric\n\n\nclass PenaltyOption(OptionGeneric):\n \"\"\"\n A placeholder for a penalty\n\n Attributes\n ----------\n node: Node\n The node within a phase on which the penalty is acting on\n quadratic: bool\n If the penalty is quadratic\n rows: Union[list, tuple, range, np.ndarray]\n The index of the rows in the penalty to keep\n cols: Union[list, tuple, range, np.ndarray]\n The index of the columns in the penalty to keep\n expand: bool\n If the penalty should be expanded or not\n target: np.array(target)\n A target to track for the penalty\n target_plot_name: str\n The plot name of the target\n target_to_plot: np.ndarray\n The subset of the target to plot\n plot_target: bool\n If the target should be plotted\n custom_function: Callable\n A user defined function to call to get the penalty\n node_idx: Union[list, tuple, Node]\n The index in nlp to apply the penalty to\n dt: float\n The delta time\n function: Function\n The casadi function of the penalty\n weighted_function: Function\n The casadi function of the penalty weighted\n derivative: bool\n If the minimization is applied on the numerical derivative of the state [f(t+1) - f(t)]\n explicit_derivative: bool\n If the minimization is applied to derivative of the penalty [f(t, t+1)]\n integration_rule: IntegralApproximation\n The integration rule to use for the penalty\n transition: bool\n If the penalty is a transition\n phase_pre_idx: int\n The index of the nlp of pre when penalty is transition\n phase_post_idx: int\n The index of the nlp of post when penalty is transition\n constraint_type: ConstraintType\n If the penalty is from the user or from bioptim (implicit or internal)\n multi_thread: bool\n If the penalty is multithreaded\n\n Methods\n -------\n set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList)\n Prepare the dimension and index of the penalty (including the target)\n _set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int)\n Checks if the variable index is consistent with the requested variable.\n _check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int)\n Checks if the variable index is consistent with the requested variable.\n If the function returns, all is okay\n _set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX])\n Finalize the preparation of the penalty (setting function and weighted_function)\n add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str)\n Interface to the plot so it can be properly added to the proper plot\n _finish_add_target_to_plot(self, all_pn: PenaltyNodeList)\n Internal interface to add (after having check the target dimensions) the target to the plot if needed\n add_or_replace_to_penalty_pool(self, ocp, nlp)\n Doing some configuration on the penalty and add it to the list of penalty\n _add_penalty_to_pool(self, all_pn: PenaltyNodeList)\n Return the penalty pool for the specified penalty (abstract)\n clear_penalty(self, ocp, nlp)\n Resets a penalty. A negative penalty index creates a new empty penalty (abstract)\n _get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList\n Get the actual node (time, X and U) specified in the penalty\n \"\"\"\n\n def __init__(\n self,\n penalty: Any,\n phase: int = 0,\n node: Union[Node, list, tuple] = Node.DEFAULT,\n target: Union[int, float, np.array, list[int], list[float], list[np.array]] = None,\n quadratic: bool = None,\n weight: float = 1,\n derivative: bool = False,\n explicit_derivative: bool = False,\n integrate: bool = False,\n integration_rule: IntegralApproximation = IntegralApproximation.DEFAULT,\n index: list = None,\n rows: Union[list, tuple, range, np.ndarray] = None,\n cols: Union[list, tuple, range, np.ndarray] = None,\n states_mapping: BiMapping = None,\n custom_function: Callable = None,\n constraint_type: ConstraintType = ConstraintType.USER,\n multi_thread: bool = None,\n expand: bool = False,\n **params: Any,\n ):\n \"\"\"\n Parameters\n ----------\n penalty: PenaltyType\n The actual penalty\n phase: int\n The phase the penalty is acting on\n node: Union[Node, list, tuple]\n The node within a phase on which the penalty is acting on\n target: Union[int, float, np.array, list[int], list[float], list[np.array]]\n A target to track for the penalty\n quadratic: bool\n If the penalty is quadratic\n weight: float\n The weighting applied to this specific penalty\n derivative: bool\n If the function should be evaluated at X and X+1\n explicit_derivative: bool\n If the function should be evaluated at [X, X+1]\n integrate: bool\n If the function should be integrated\n integration_rule: IntegralApproximation\n The rule to use for the integration\n index: int\n The component index the penalty is acting on\n custom_function: Callable\n A user defined function to call to get the penalty\n constraint_type: ConstraintType\n If the penalty is from the user or from bioptim (implicit or internal)\n **params: dict\n Generic parameters for the penalty\n \"\"\"\n\n super(PenaltyOption, self).__init__(phase=phase, type=penalty, **params)\n self.node: Union[Node, list, tuple] = node\n self.quadratic = quadratic\n self.integration_rule = integration_rule\n\n if index is not None and rows is not None:\n raise ValueError(\"rows and index cannot be defined simultaneously since they are the same variable\")\n self.rows = rows if rows is not None else index\n self.cols = cols\n self.expand = expand\n\n self.target = None\n if target is not None:\n target = np.array(target)\n if isinstance(target, int) or isinstance(target, float) or isinstance(target, np.ndarray):\n target = [target]\n self.target = []\n for t in target:\n self.target.append(np.array(t))\n if len(self.target[-1].shape) == 0:\n self.target[-1] = self.target[-1][np.newaxis]\n if len(self.target[-1].shape) == 1:\n self.target[-1] = self.target[-1][:, np.newaxis]\n if len(self.target) == 1 and (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n ):\n if self.node == Node.ALL or self.node == Node.DEFAULT:\n self.target = [self.target[0][:, :-1], self.target[0][:, 1:]]\n else:\n raise NotImplementedError(\n f\"A list of 2 elements is required with {self.node} and TRAPEZOIDAL Integration\"\n f\"except for Node.NODE_ALL and Node.NODE_DEFAULT\"\n \"which can be automatically generated\"\n )\n\n self.target_plot_name = None\n self.target_to_plot = None\n # todo: not implemented yet for trapezoidal integration\n self.plot_target = (\n False\n if (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n else True\n )\n\n self.states_mapping = states_mapping\n\n self.custom_function = custom_function\n\n self.node_idx = []\n self.dt = 0\n self.weight = weight\n self.function: Union[Function, None] = None\n self.function_non_threaded: Union[Function, None] = None\n self.weighted_function: Union[Function, None] = None\n self.weighted_function_non_threaded: Union[Function, None] = None\n self.derivative = derivative\n self.explicit_derivative = explicit_derivative\n self.integrate = integrate\n self.transition = False\n self.multinode_constraint = False\n self.phase_pre_idx = None\n self.phase_post_idx = None\n if self.derivative and self.explicit_derivative:\n raise ValueError(\"derivative and explicit_derivative cannot be both True\")\n self.constraint_type = constraint_type\n\n self.multi_thread = multi_thread\n\n def set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList):\n \"\"\"\n Prepare the dimension and index of the penalty (including the target)\n\n Parameters\n ----------\n penalty: Union[MX, SX],\n The actual penalty function\n all_pn: PenaltyNodeList\n The penalty node elements\n \"\"\"\n\n self.rows = self._set_dim_idx(self.rows, penalty.rows())\n self.cols = self._set_dim_idx(self.cols, penalty.columns())\n if self.target is not None:\n self._check_target_dimensions(all_pn, len(all_pn.t))\n if self.plot_target:\n self._finish_add_target_to_plot(all_pn)\n self._set_penalty_function(all_pn, penalty)\n self._add_penalty_to_pool(all_pn)\n\n def _set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int):\n \"\"\"\n Checks if the variable index is consistent with the requested variable.\n\n Parameters\n ----------\n dim: Union[list, tuple, range]\n The dimension to set\n n_rows: int\n The expected row shape\n\n Returns\n -------\n The formatted indices\n \"\"\"\n\n if dim is None:\n dim = range(n_rows)\n else:\n if isinstance(dim, int):\n dim = [dim]\n if max(dim) > n_rows:\n raise RuntimeError(f\"{self.name} index cannot be higher than nx ({n_rows})\")\n dim = np.array(dim)\n if not np.issubdtype(dim.dtype, np.integer):\n raise RuntimeError(f\"{self.name} index must be a list of integer\")\n return dim\n\n def _check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int):\n \"\"\"\n Checks if the variable index is consistent with the requested variable.\n If the function returns, all is okay\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n n_time_expected: Union[list, tuple]\n The expected shape (n_rows, ns) of the data to track\n \"\"\"\n\n if self.integration_rule == IntegralApproximation.RECTANGLE:\n n_dim = len(self.target[0].shape)\n if n_dim != 2 and n_dim != 3:\n raise RuntimeError(\n f\"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)\"\n )\n if self.target[0].shape[-1] == 1:\n self.target = np.repeat(self.target, n_time_expected, axis=-1)\n\n shape = (\n (len(self.rows), n_time_expected) if n_dim == 2 else (len(self.rows), len(self.cols), n_time_expected)\n )\n if self.target[0].shape != shape:\n raise RuntimeError(\n f\"target {self.target[0].shape} does not correspond to expected size {shape} for penalty {self.name}\"\n )\n\n # If the target is on controls and control is constant, there will be one value missing\n if all_pn is not None:\n if (\n all_pn.nlp.control_type == ControlType.CONSTANT\n and all_pn.nlp.ns in all_pn.t\n and self.target[0].shape[-1] == all_pn.nlp.ns\n ):\n if all_pn.t[-1] != all_pn.nlp.ns:\n raise NotImplementedError(\"Modifying target for END not being last is not implemented yet\")\n self.target[0] = np.concatenate(\n (self.target[0], np.nan * np.zeros((self.target[0].shape[0], 1))), axis=1\n )\n elif (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n ):\n\n target_dim = len(self.target)\n if target_dim != 2:\n raise RuntimeError(f\"targets with trapezoidal integration rule need to get a list of two elements.\")\n\n for target in self.target:\n n_dim = len(target.shape)\n if n_dim != 2 and n_dim != 3:\n raise RuntimeError(\n f\"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)\"\n )\n if target.shape[-1] == 1:\n target = np.repeat(target, n_time_expected, axis=-1)\n\n shape = (\n (len(self.rows), n_time_expected - 1)\n if n_dim == 2\n else (len(self.rows), len(self.cols), n_time_expected - 1)\n )\n\n for target in self.target:\n if target.shape != shape:\n raise RuntimeError(\n f\"target {target.shape} does not correspond to expected size {shape} for penalty {self.name}\"\n )\n\n # If the target is on controls and control is constant, there will be one value missing\n if all_pn is not None:\n if (\n all_pn.nlp.control_type == ControlType.CONSTANT\n and all_pn.nlp.ns in all_pn.t\n and self.target[0].shape[-1] == all_pn.nlp.ns - 1\n and self.target[1].shape[-1] == all_pn.nlp.ns - 1\n ):\n if all_pn.t[-1] != all_pn.nlp.ns:\n raise NotImplementedError(\"Modifying target for END not being last is not implemented yet\")\n self.target = np.concatenate((self.target, np.nan * np.zeros((self.target.shape[0], 1))), axis=1)\n\n def _set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX]):\n \"\"\"\n Finalize the preparation of the penalty (setting function and weighted_function)\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The nodes\n fcn: Union[MX, SX]\n The value of the penalty function\n \"\"\"\n\n # Sanity checks\n if self.transition and self.explicit_derivative:\n raise ValueError(\"transition and explicit_derivative cannot be true simultaneously\")\n if self.transition and self.derivative:\n raise ValueError(\"transition and derivative cannot be true simultaneously\")\n if self.derivative and self.explicit_derivative:\n raise ValueError(\"derivative and explicit_derivative cannot be true simultaneously\")\n\n def get_u(nlp, u: Union[MX, SX], dt: Union[MX, SX]):\n \"\"\"\n Get the control at a given time\n\n Parameters\n ----------\n nlp: NonlinearProgram\n The nonlinear program\n u: Union[MX, SX]\n The control matrix\n dt: Union[MX, SX]\n The time a which control should be computed\n\n Returns\n -------\n The control at a given time\n \"\"\"\n\n if nlp.control_type == ControlType.CONSTANT:\n return u\n elif nlp.control_type == ControlType.LINEAR_CONTINUOUS:\n return u[:, 0] + (u[:, 1] - u[:, 0]) * dt\n else:\n raise RuntimeError(f\"{nlp.control_type} ControlType not implemented yet\")\n\n return u\n\n if self.multinode_constraint or self.transition:\n ocp = all_pn[0].ocp\n nlp = all_pn[0].nlp\n nlp_post = all_pn[1].nlp\n name = self.name.replace(\"->\", \"_\").replace(\" \", \"_\").replace(\",\", \"_\")\n states_pre = nlp.states.cx_end\n states_post = nlp_post.states.cx\n controls_pre = nlp.controls.cx_end\n controls_post = nlp_post.controls.cx\n state_cx = vertcat(states_pre, states_post)\n control_cx = vertcat(controls_pre, controls_post)\n\n else:\n ocp = all_pn.ocp\n nlp = all_pn.nlp\n name = self.name\n if self.integrate:\n state_cx = horzcat(*([all_pn.nlp.states.cx] + all_pn.nlp.states.cx_intermediates_list))\n control_cx = all_pn.nlp.controls.cx\n else:\n state_cx = all_pn.nlp.states.cx\n control_cx = all_pn.nlp.controls.cx\n if self.explicit_derivative:\n if self.derivative:\n raise RuntimeError(\"derivative and explicit_derivative cannot be simultaneously true\")\n state_cx = horzcat(state_cx, all_pn.nlp.states.cx_end)\n control_cx = horzcat(control_cx, all_pn.nlp.controls.cx_end)\n\n param_cx = nlp.cx(nlp.parameters.cx)\n\n # Do not use nlp.add_casadi_func because all functions must be registered\n sub_fcn = fcn[self.rows, self.cols]\n self.function = biorbd.to_casadi_func(name, sub_fcn, state_cx, control_cx, param_cx, expand=self.expand)\n self.function_non_threaded = self.function\n\n if self.derivative:\n state_cx = horzcat(all_pn.nlp.states.cx_end, all_pn.nlp.states.cx)\n control_cx = horzcat(all_pn.nlp.controls.cx_end, all_pn.nlp.controls.cx)\n self.function = biorbd.to_casadi_func(\n f\"{name}\",\n self.function(all_pn.nlp.states.cx_end, all_pn.nlp.controls.cx_end, param_cx)\n - self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx),\n state_cx,\n control_cx,\n param_cx,\n )\n\n dt_cx = nlp.cx.sym(\"dt\", 1, 1)\n is_trapezoidal = (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n target_shape = tuple(\n [\n len(self.rows),\n len(self.cols) + 1 if is_trapezoidal else len(self.cols),\n ]\n )\n target_cx = nlp.cx.sym(\"target\", target_shape)\n weight_cx = nlp.cx.sym(\"weight\", 1, 1)\n exponent = 2 if self.quadratic and self.weight else 1\n\n if is_trapezoidal:\n # Hypothesis: the function is continuous on states\n # it neglects the discontinuities at the beginning of the optimization\n state_cx = (\n horzcat(all_pn.nlp.states.cx, all_pn.nlp.states.cx_end)\n if self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n else all_pn.nlp.states.cx\n )\n # to handle piecewise constant in controls we have to compute the value for the end of the interval\n # which only relies on the value of the control at the beginning of the interval\n control_cx = (\n horzcat(all_pn.nlp.controls.cx)\n if nlp.control_type == ControlType.CONSTANT\n else horzcat(all_pn.nlp.controls.cx, all_pn.nlp.controls.cx_end)\n )\n control_cx_end = get_u(nlp, control_cx, dt_cx)\n state_cx_end = (\n all_pn.nlp.states.cx_end\n if self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n else nlp.dynamics[0](x0=state_cx, p=control_cx_end, params=nlp.parameters.cx)[\"xf\"]\n )\n self.modified_function = biorbd.to_casadi_func(\n f\"{name}\",\n (\n (self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx) - target_cx[:, 0])\n ** exponent\n + (self.function(state_cx_end, control_cx_end, param_cx) - target_cx[:, 1]) ** exponent\n )\n / 2,\n state_cx,\n control_cx,\n param_cx,\n target_cx,\n dt_cx,\n )\n modified_fcn = self.modified_function(state_cx, control_cx, param_cx, target_cx, dt_cx)\n else:\n modified_fcn = (self.function(state_cx, control_cx, param_cx) - target_cx) ** exponent\n\n modified_fcn = weight_cx * modified_fcn * dt_cx if self.weight else modified_fcn * dt_cx\n\n # Do not use nlp.add_casadi_func because all of them must be registered\n self.weighted_function = Function(\n name, [state_cx, control_cx, param_cx, weight_cx, target_cx, dt_cx], [modified_fcn]\n )\n self.weighted_function_non_threaded = self.weighted_function\n\n if ocp.n_threads > 1 and self.multi_thread and len(self.node_idx) > 1:\n self.function = self.function.map(len(self.node_idx), \"thread\", ocp.n_threads)\n self.weighted_function = self.weighted_function.map(len(self.node_idx), \"thread\", ocp.n_threads)\n else:\n self.multi_thread = False # Override the multi_threading, since only one node is optimized\n\n if self.expand:\n self.function = self.function.expand()\n self.weighted_function = self.weighted_function.expand()\n\n def add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str):\n \"\"\"\n Interface to the plot so it can be properly added to the proper plot\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n combine_to: str\n The name of the underlying plot to combine the tracking data to\n \"\"\"\n\n if self.target is None or combine_to is None:\n return\n\n self.target_plot_name = combine_to\n # if the target is n x ns, we need to add a dimension (n x ns + 1) to make it compatible with the plot\n if self.target[0].shape[1] == all_pn.nlp.ns:\n self.target_to_plot = np.concatenate(\n (self.target[0], np.nan * np.ndarray((self.target[0].shape[0], 1))), axis=1\n )\n else:\n self.target_to_plot = self.target[0]\n\n def _finish_add_target_to_plot(self, all_pn: PenaltyNodeList):\n \"\"\"\n Internal interface to add (after having check the target dimensions) the target to the plot if needed\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n\n \"\"\"\n\n def plot_function(t, x, u, p):\n if isinstance(t, (list, tuple)):\n return self.target_to_plot[:, [self.node_idx.index(_t) for _t in t]]\n else:\n return self.target_to_plot[:, self.node_idx.index(t)]\n\n if self.target_to_plot is not None:\n if self.target_to_plot.shape[1] > 1:\n plot_type = PlotType.STEP\n else:\n plot_type = PlotType.POINT\n\n all_pn.ocp.add_plot(\n self.target_plot_name,\n plot_function,\n color=\"tab:red\",\n plot_type=plot_type,\n phase=all_pn.nlp.phase_idx,\n axes_idx=Mapping(self.rows),\n node_idx=self.node_idx,\n )\n\n def add_or_replace_to_penalty_pool(self, ocp, nlp):\n \"\"\"\n Doing some configuration on the penalty and add it to the list of penalty\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n \"\"\"\n if not self.name:\n if self.type.name == \"CUSTOM\":\n self.name = self.custom_function.__name__\n else:\n self.name = self.type.name\n\n penalty_type = self.type.get_type()\n if self.node == Node.TRANSITION:\n all_pn = []\n\n # Make sure the penalty behave like a PhaseTransition, even though it may be an Objective or Constraint\n self.node = Node.END\n self.node_idx = [0]\n self.transition = True\n self.dt = 1\n self.phase_pre_idx = nlp.phase_idx\n self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases\n if not self.states_mapping:\n self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))\n\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n all_pn[0].u = [nlp.U[-1]] # Make an exception to the fact that U is not available for the last node\n\n nlp = ocp.nlp[(nlp.phase_idx + 1) % ocp.n_phases]\n self.node = Node.START\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n\n self.node = Node.TRANSITION\n\n penalty_type.validate_penalty_time_index(self, all_pn[0])\n penalty_type.validate_penalty_time_index(self, all_pn[1])\n self.clear_penalty(ocp, all_pn[0].nlp)\n\n elif isinstance(self.node, tuple) and self.multinode_constraint:\n all_pn = []\n self.node_list = self.node\n # Make sure the penalty behave like a MultinodeConstraint, even though it may be an Objective or Constraint\n # self.transition = True\n self.dt = 1\n # self.phase_pre_idx\n # self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases\n if not self.states_mapping:\n self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))\n self.node = self.node_list[0]\n nlp = ocp.nlp[self.phase_first_idx]\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n if self.node == Node.END:\n all_pn[0].u = [nlp.U[-1]]\n # Make an exception to the fact that U is not available for the last node\n\n self.node = self.node_list[1]\n nlp = ocp.nlp[self.phase_second_idx]\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n if self.node == Node.END:\n all_pn[1].u = [nlp.U[-1]]\n # Make an exception to the fact that U is not available for the last node\n\n # reset the node list\n self.node = self.node_list\n\n penalty_type.validate_penalty_time_index(self, all_pn[0])\n penalty_type.validate_penalty_time_index(self, all_pn[1])\n self.node_idx = [all_pn[0].t[0], all_pn[1].t[0]]\n self.clear_penalty(ocp, all_pn[0].nlp)\n else:\n all_pn = self._get_penalty_node_list(ocp, nlp)\n penalty_type.validate_penalty_time_index(self, all_pn)\n self.clear_penalty(all_pn.ocp, all_pn.nlp)\n self.dt = penalty_type.get_dt(all_pn.nlp)\n self.node_idx = (\n all_pn.t[:-1]\n if (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n and self.target is not None\n else all_pn.t\n )\n\n penalty_function = self.type.value[0](self, all_pn, **self.params)\n self.set_penalty(penalty_function, all_pn)\n\n def _add_penalty_to_pool(self, all_pn: PenaltyNodeList):\n \"\"\"\n Return the penalty pool for the specified penalty (abstract)\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n \"\"\"\n\n raise RuntimeError(\"get_dt cannot be called from an abstract class\")\n\n def clear_penalty(self, ocp, nlp):\n \"\"\"\n Resets a penalty. A negative penalty index creates a new empty penalty (abstract)\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n \"\"\"\n\n raise RuntimeError(\"_reset_penalty cannot be called from an abstract class\")\n\n def _get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList:\n \"\"\"\n Get the actual node (time, X and U) specified in the penalty\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n\n Returns\n -------\n The actual node (time, X and U) specified in the penalty\n \"\"\"\n\n if not isinstance(self.node, (list, tuple)):\n self.node = (self.node,)\n\n t = []\n for node in self.node:\n if isinstance(node, int):\n if node < 0 or node > nlp.ns:\n raise RuntimeError(f\"Invalid node, {node} must be between 0 and {nlp.ns}\")\n t.append(node)\n elif node == Node.START:\n t.append(0)\n elif node == Node.MID:\n if nlp.ns % 2 == 1:\n raise (ValueError(\"Number of shooting points must be even to use MID\"))\n t.append(nlp.ns // 2)\n elif node == Node.INTERMEDIATES:\n t.extend(list(i for i in range(1, nlp.ns - 1)))\n elif node == Node.PENULTIMATE:\n if nlp.ns < 2:\n raise (ValueError(\"Number of shooting points must be greater than 1\"))\n t.append(nlp.ns - 1)\n elif node == Node.END:\n t.append(nlp.ns)\n elif node == Node.ALL_SHOOTING:\n t.extend(range(nlp.ns))\n elif node == Node.ALL:\n t.extend(range(nlp.ns + 1))\n else:\n raise RuntimeError(\" is not a valid node\")\n\n x = [nlp.X[idx] for idx in t]\n u = [nlp.U[idx] for idx in t if idx != nlp.ns]\n return PenaltyNodeList(ocp, nlp, t, x, u, nlp.parameters.cx)\n" ]
[ [ "numpy.issubdtype", "numpy.ndarray", "numpy.repeat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nerdslab/SwapVAE
[ "f43e59c93d0b9f7f1de51a63e25b17b7be1da2d9" ]
[ "vae_kits/classification.py" ]
[ "import torch\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom tqdm import tqdm\r\n\r\nclass Simple_Trans(Dataset):\r\n def __init__(self, data, transform=None):\r\n # [reps, labels]\r\n self.reps = data[0]\r\n self.labels = data[1]\r\n # print(self.reps.shape, self.labels.shape) # torch.Size([60000, 64]) torch.Size([60000])\r\n\r\n def __len__(self):\r\n return self.labels.shape[0]\r\n\r\n def __getitem__(self, idx):\r\n return self.reps[idx, :], self.labels[idx]\r\n\r\n\r\nclass linear_clf(object):\r\n def __init__(self, net, classifier, optimizer, train_dataloader, test_dataloader, device = \"cpu\", batch_size=1024,\r\n num_epochs = 10, disable_tqdm = False, writer=None, writer_tag = \"\", pair=False):\r\n self.net = net\r\n #self.net.eval()\r\n\r\n self.classifier = classifier\r\n self.optimizer = optimizer\r\n self.writer = writer\r\n self.tag = writer_tag\r\n\r\n self.disable_tqdm = disable_tqdm\r\n self.device = device\r\n self.batch_size = batch_size\r\n self.num_epochs = num_epochs\r\n\r\n self.data_train = Simple_Trans(self.compute_representations(train_dataloader))\r\n self.data_test = Simple_Trans(self.compute_representations(test_dataloader))\r\n\r\n self.best_number = 0\r\n self.train_linear_layer()\r\n\r\n self.train_acc = self.compute_accuracy(DataLoader(self.data_train, batch_size=batch_size))\r\n self.test_acc = self.compute_accuracy(DataLoader(self.data_test, batch_size=batch_size))\r\n #self.net.train()\r\n\r\n def compute_representations(self, dataloader):\r\n \"\"\" store the representations\r\n :param net: ResNet or smth\r\n :param dataloader: train_loader and test_loader\r\n \"\"\"\r\n #self.net.eval()\r\n reps, labels = [], []\r\n\r\n for i, (x, label) in enumerate(dataloader):\r\n # load data\r\n x = x.to(self.device)\r\n labels.append(label)\r\n\r\n # forward\r\n with torch.no_grad():\r\n representation = self.net(x)\r\n reps.append(representation.detach().cpu())\r\n\r\n if i % 100 == 0:\r\n reps = [torch.cat(reps, dim=0)]\r\n labels = [torch.cat(labels, dim=0)]\r\n\r\n reps = torch.cat(reps, dim=0)\r\n labels = torch.cat(labels, dim=0)\r\n #self.net.train()\r\n return [reps, labels]\r\n\r\n def compute_accuracy(self, dataloader):\r\n #self.net.eval()\r\n self.classifier.eval()\r\n right = []\r\n total = []\r\n for x, label in dataloader:\r\n x, label = x.to(self.device), label.to(self.device)\r\n # feed to network and classifier\r\n with torch.no_grad():\r\n pred_logits = self.classifier(x)\r\n # compute accuracy\r\n _, pred_class = torch.max(pred_logits, 1)\r\n right.append((pred_class == label).sum().item())\r\n total.append(label.size(0))\r\n self.classifier.train()\r\n #self.net.train()\r\n return sum(right) / sum(total)\r\n\r\n def train_linear_layer(self):\r\n #self.net.eval()\r\n class_criterion = torch.nn.CrossEntropyLoss()\r\n progress_bar = tqdm(range(self.num_epochs), disable=self.disable_tqdm, position=0, leave=True)\r\n for epoch in progress_bar:\r\n for x, label in DataLoader(self.data_train, batch_size=self.batch_size):\r\n self.classifier.train()\r\n x, label = x.to(self.device), label.to(self.device)\r\n pred_class = self.classifier(x)\r\n loss = class_criterion(pred_class, label)\r\n\r\n # backward\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n curr_number = self.compute_accuracy(DataLoader(self.data_test, batch_size=self.batch_size))\r\n if curr_number >= self.best_number:\r\n self.best_number = curr_number\r\n\r\n if self.writer is not None:\r\n self.writer.log_metrics({'CLFtraining/val-tag{}'.format(self.tag): curr_number}, step = epoch)\r\n\r\n progress_bar.set_description('Linear_CLF Epoch: [{}/{}] Acc@1:{:.3f}% BestAcc@1:{:.3f}%'\r\n .format(epoch, self.num_epochs, curr_number, self.best_number))\r\n #self.net.train()\r\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.cat", "torch.utils.data.DataLoader", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
opennlp/Large-Scale-Text-Classification
[ "a803c8d89357e5ec897031a41dda807d91f00431" ]
[ "interpretation/instance_explanation.py" ]
[ "from factory import vectorizer_factory\nfrom sklearn.base import TransformerMixin\nfrom sklearn.pipeline import make_pipeline\nfrom lime.lime_text import LimeTextExplainer\n\n\nclass VectorTransformer(TransformerMixin):\n def __init__(self, vectorizer_name):\n self.vectorizer_name = vectorizer_name\n\n def fit(self,X, y=None):\n pass\n\n def transform(self, sentence_list, y=None):\n return vectorizer_factory.get_vectorized_text(sentence_list,self.vectorizer_name)\n\n\ndef get_pipeline_for_classification(feature_transformer, trained_model):\n return make_pipeline(feature_transformer, trained_model)\n\n\ndef get_explanation_for_instance(text_string,classifier_function, class_list, max_num_features_to_show=10, file_to_save='explain.html'):\n explainer = LimeTextExplainer(class_names=class_list,random_state=42)\n explained_instance = explainer.explain_instance(text_string, classifier_function.predict_proba,\n num_features=max_num_features_to_show, top_labels=len(class_list))\n explained_instance.save_to_file(file_to_save)\n return explained_instance.as_list()\n" ]
[ [ "sklearn.pipeline.make_pipeline" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lefevre-fraser/openmeta-mms
[ "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "08f3115e76498df1f8d70641d71f5c52cab4ce5f" ]
[ "bin/Python27/Lib/site-packages/numpy/core/tests/test_regression.py", "bin/Python27/Lib/site-packages/scipy/linalg/tests/test_decomp.py", "bin/Python27/Lib/site-packages/scipy/sparse/base.py", "bin/Python27/Lib/site-packages/scipy/stats/_multivariate.py", "bin/Python27/Lib/site-packages/scipy/sparse/linalg/dsolve/__init__.py", "bin/Python27/Lib/site-packages/scipy/optimize/tests/test__differential_evolution.py", "bin/Python27/Lib/site-packages/scipy/stats/tests/test_mstats_basic.py", "bin/Python27/Lib/site-packages/scipy/io/__init__.py", "analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/voxel_methods.py", "bin/Python27/Lib/site-packages/scipy/sparse/linalg/isolve/utils.py", "bin/Python27/Lib/site-packages/scipy/optimize/tests/test__root.py", "bin/Python27/Lib/site-packages/scipy/special/tests/test_basic.py", "bin/Python27/Lib/site-packages/numpy/polynomial/chebyshev.py", "bin/Python27/Lib/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py", "bin/Python27/Lib/site-packages/scipy/optimize/minpack.py", "bin/Python27/Lib/site-packages/scipy/linalg/tests/test_special_matrices.py", "bin/Python27/Lib/site-packages/scipy/linalg/_solvers.py", "bin/Python27/Lib/site-packages/numpy/ma/mrecords.py", "bin/Python27/Lib/site-packages/scipy/cluster/hierarchy.py", "bin/Python27/Lib/site-packages/numpy/polynomial/polynomial.py", "bin/Python27/Lib/site-packages/scipy/constants/__init__.py", "bin/Python27/Lib/site-packages/scipy/weave/examples/vq.py", "bin/Python27/Lib/site-packages/scipy/signal/fir_filter_design.py", "bin/Python27/Lib/site-packages/scipy/special/tests/test_loggamma.py", "bin/Python27/Lib/site-packages/scipy/sparse/linalg/eigen/__init__.py", "bin/Python27/Lib/site-packages/scipy/stats/tests/test_kdeoth.py", "bin/Python27/Lib/site-packages/numpy/lib/arraysetops.py", "bin/Python27/Lib/site-packages/scipy/linalg/tests/test_cython_lapack.py", "bin/Python27/Lib/site-packages/scipy/io/harwell_boeing/hb.py", "bin/Python27/Lib/site-packages/numpy/distutils/command/install_clib.py", "bin/Python27/Lib/site-packages/scipy/optimize/tests/test_lsq_linear.py", "bin/Python27/Lib/site-packages/pyqtgraph/examples/FlowchartCustomNode.py" ]
[ "from __future__ import division, absolute_import, print_function\r\n\r\nimport copy\r\nimport pickle\r\nimport sys\r\nimport platform\r\nimport gc\r\nimport warnings\r\nimport tempfile\r\nfrom os import path\r\nfrom io import BytesIO\r\nfrom itertools import chain\r\n\r\nimport numpy as np\r\nfrom numpy.testing import (\r\n run_module_suite, TestCase, assert_, assert_equal,\r\n assert_almost_equal, assert_array_equal, assert_array_almost_equal,\r\n assert_raises, assert_warns, dec\r\n )\r\nfrom numpy.testing.utils import _assert_valid_refcount\r\nfrom numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu\r\n\r\nrlevel = 1\r\n\r\nclass TestRegression(TestCase):\r\n def test_invalid_round(self,level=rlevel):\r\n # Ticket #3\r\n v = 4.7599999999999998\r\n assert_array_equal(np.array([v]), np.array(v))\r\n\r\n def test_mem_empty(self,level=rlevel):\r\n # Ticket #7\r\n np.empty((1,), dtype=[('x', np.int64)])\r\n\r\n def test_pickle_transposed(self,level=rlevel):\r\n # Ticket #16\r\n a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))\r\n f = BytesIO()\r\n pickle.dump(a, f)\r\n f.seek(0)\r\n b = pickle.load(f)\r\n f.close()\r\n assert_array_equal(a, b)\r\n\r\n def test_typeNA(self,level=rlevel):\r\n # Ticket #31\r\n assert_equal(np.typeNA[np.int64], 'Int64')\r\n assert_equal(np.typeNA[np.uint64], 'UInt64')\r\n\r\n def test_dtype_names(self,level=rlevel):\r\n # Ticket #35\r\n # Should succeed\r\n np.dtype([(('name', 'label'), np.int32, 3)])\r\n\r\n def test_reduce(self,level=rlevel):\r\n # Ticket #40\r\n assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)\r\n\r\n def test_zeros_order(self,level=rlevel):\r\n # Ticket #43\r\n np.zeros([3], int, 'C')\r\n np.zeros([3], order='C')\r\n np.zeros([3], int, order='C')\r\n\r\n def test_asarray_with_order(self,level=rlevel):\r\n # Check that nothing is done when order='F' and array C/F-contiguous\r\n a = np.ones(2)\r\n assert_(a is np.asarray(a, order='F'))\r\n\r\n def test_ravel_with_order(self,level=rlevel):\r\n # Check that ravel works when order='F' and array C/F-contiguous\r\n a = np.ones(2)\r\n assert_(not a.ravel('F').flags.owndata)\r\n\r\n def test_sort_bigendian(self,level=rlevel):\r\n # Ticket #47\r\n a = np.linspace(0, 10, 11)\r\n c = a.astype(np.dtype('<f8'))\r\n c.sort()\r\n assert_array_almost_equal(c, a)\r\n\r\n def test_negative_nd_indexing(self,level=rlevel):\r\n # Ticket #49\r\n c = np.arange(125).reshape((5, 5, 5))\r\n origidx = np.array([-1, 0, 1])\r\n idx = np.array(origidx)\r\n c[idx]\r\n assert_array_equal(idx, origidx)\r\n\r\n def test_char_dump(self,level=rlevel):\r\n # Ticket #50\r\n f = BytesIO()\r\n ca = np.char.array(np.arange(1000, 1010), itemsize=4)\r\n ca.dump(f)\r\n f.seek(0)\r\n ca = np.load(f)\r\n f.close()\r\n\r\n def test_noncontiguous_fill(self,level=rlevel):\r\n # Ticket #58.\r\n a = np.zeros((5, 3))\r\n b = a[:, :2,]\r\n\r\n def rs():\r\n b.shape = (10,)\r\n\r\n self.assertRaises(AttributeError, rs)\r\n\r\n def test_bool(self,level=rlevel):\r\n # Ticket #60\r\n np.bool_(1) # Should succeed\r\n\r\n def test_indexing1(self,level=rlevel):\r\n # Ticket #64\r\n descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]\r\n buffer = ((([6j, 4j],),),)\r\n h = np.array(buffer, dtype=descr)\r\n h['x']['y']['z']\r\n\r\n def test_indexing2(self,level=rlevel):\r\n # Ticket #65\r\n descr = [('x', 'i4', (2,))]\r\n buffer = ([3, 2],)\r\n h = np.array(buffer, dtype=descr)\r\n h['x']\r\n\r\n def test_round(self,level=rlevel):\r\n # Ticket #67\r\n x = np.array([1+2j])\r\n assert_almost_equal(x**(-1), [1/(1+2j)])\r\n\r\n def test_scalar_compare(self,level=rlevel):\r\n # Trac Ticket #72\r\n # https://github.com/numpy/numpy/issues/565\r\n a = np.array(['test', 'auto'])\r\n assert_array_equal(a == 'auto', np.array([False, True]))\r\n self.assertTrue(a[1] == 'auto')\r\n self.assertTrue(a[0] != 'auto')\r\n b = np.linspace(0, 10, 11)\r\n # This should return true for now, but will eventually raise an error:\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n self.assertTrue(b != 'auto')\r\n self.assertTrue(b[0] != 'auto')\r\n\r\n def test_unicode_swapping(self,level=rlevel):\r\n # Ticket #79\r\n ulen = 1\r\n ucs_value = sixu('\\U0010FFFF')\r\n ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)\r\n ua.newbyteorder() # Should succeed.\r\n\r\n def test_object_array_fill(self,level=rlevel):\r\n # Ticket #86\r\n x = np.zeros(1, 'O')\r\n x.fill([])\r\n\r\n def test_mem_dtype_align(self,level=rlevel):\r\n # Ticket #93\r\n self.assertRaises(TypeError, np.dtype,\r\n {'names':['a'],'formats':['foo']}, align=1)\r\n\r\n @dec.knownfailureif((sys.version_info[0] >= 3) or\r\n (sys.platform == \"win32\" and\r\n platform.architecture()[0] == \"64bit\"),\r\n \"numpy.intp('0xff', 16) not supported on Py3, \"\r\n \"as it does not inherit from Python int\")\r\n def test_intp(self,level=rlevel):\r\n # Ticket #99\r\n i_width = np.int_(0).nbytes*2 - 1\r\n np.intp('0x' + 'f'*i_width, 16)\r\n self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)\r\n self.assertRaises(ValueError, np.intp, '0x1', 32)\r\n assert_equal(255, np.intp('0xFF', 16))\r\n assert_equal(1024, np.intp(1024))\r\n\r\n def test_endian_bool_indexing(self,level=rlevel):\r\n # Ticket #105\r\n a = np.arange(10., dtype='>f8')\r\n b = np.arange(10., dtype='<f8')\r\n xa = np.where((a > 2) & (a < 6))\r\n xb = np.where((b > 2) & (b < 6))\r\n ya = ((a > 2) & (a < 6))\r\n yb = ((b > 2) & (b < 6))\r\n assert_array_almost_equal(xa, ya.nonzero())\r\n assert_array_almost_equal(xb, yb.nonzero())\r\n assert_(np.all(a[ya] > 0.5))\r\n assert_(np.all(b[yb] > 0.5))\r\n\r\n def test_endian_where(self,level=rlevel):\r\n # GitHub issue #369\r\n net = np.zeros(3, dtype='>f4')\r\n net[1] = 0.00458849\r\n net[2] = 0.605202\r\n max_net = net.max()\r\n test = np.where(net <= 0., max_net, net)\r\n correct = np.array([ 0.60520202, 0.00458849, 0.60520202])\r\n assert_array_almost_equal(test, correct)\r\n\r\n def test_endian_recarray(self,level=rlevel):\r\n # Ticket #2185\r\n dt = np.dtype([\r\n ('head', '>u4'),\r\n ('data', '>u4', 2),\r\n ])\r\n buf = np.recarray(1, dtype=dt)\r\n buf[0]['head'] = 1\r\n buf[0]['data'][:] = [1, 1]\r\n\r\n h = buf[0]['head']\r\n d = buf[0]['data'][0]\r\n buf[0]['head'] = h\r\n buf[0]['data'][0] = d\r\n assert_(buf[0]['head'] == 1)\r\n\r\n def test_mem_dot(self,level=rlevel):\r\n # Ticket #106\r\n x = np.random.randn(0, 1)\r\n y = np.random.randn(10, 1)\r\n # Dummy array to detect bad memory access:\r\n _z = np.ones(10)\r\n _dummy = np.empty((0, 10))\r\n z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)\r\n np.dot(x, np.transpose(y), out=z)\r\n assert_equal(_z, np.ones(10))\r\n # Do the same for the built-in dot:\r\n np.core.multiarray.dot(x, np.transpose(y), out=z)\r\n assert_equal(_z, np.ones(10))\r\n\r\n def test_arange_endian(self,level=rlevel):\r\n # Ticket #111\r\n ref = np.arange(10)\r\n x = np.arange(10, dtype='<f8')\r\n assert_array_equal(ref, x)\r\n x = np.arange(10, dtype='>f8')\r\n assert_array_equal(ref, x)\r\n\r\n def test_argmax(self,level=rlevel):\r\n # Ticket #119\r\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\r\n for i in range(a.ndim):\r\n a.argmax(i) # Should succeed\r\n\r\n def test_mem_divmod(self,level=rlevel):\r\n # Ticket #126\r\n for i in range(10):\r\n divmod(np.array([i])[0], 10)\r\n\r\n def test_hstack_invalid_dims(self,level=rlevel):\r\n # Ticket #128\r\n x = np.arange(9).reshape((3, 3))\r\n y = np.array([0, 0, 0])\r\n self.assertRaises(ValueError, np.hstack, (x, y))\r\n\r\n def test_squeeze_type(self,level=rlevel):\r\n # Ticket #133\r\n a = np.array([3])\r\n b = np.array(3)\r\n assert_(type(a.squeeze()) is np.ndarray)\r\n assert_(type(b.squeeze()) is np.ndarray)\r\n\r\n def test_add_identity(self,level=rlevel):\r\n # Ticket #143\r\n assert_equal(0, np.add.identity)\r\n\r\n def test_numpy_float_python_long_addition(self):\r\n # Check that numpy float and python longs can be added correctly.\r\n a = np.float_(23.) + 2**135\r\n assert_equal(a, 23. + 2**135)\r\n\r\n def test_binary_repr_0(self,level=rlevel):\r\n # Ticket #151\r\n assert_equal('0', np.binary_repr(0))\r\n\r\n def test_rec_iterate(self,level=rlevel):\r\n # Ticket #160\r\n descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])\r\n x = np.rec.array([(1, 1.1, '1.0'),\r\n (2, 2.2, '2.0')], dtype=descr)\r\n x[0].tolist()\r\n [i for i in x[0]]\r\n\r\n def test_unicode_string_comparison(self,level=rlevel):\r\n # Ticket #190\r\n a = np.array('hello', np.unicode_)\r\n b = np.array('world')\r\n a == b\r\n\r\n def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):\r\n # Fix in r2836\r\n # Create non-contiguous Fortran ordered array\r\n x = np.array(np.random.rand(3, 3), order='F')[:, :2]\r\n assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))\r\n\r\n def test_flat_assignment(self,level=rlevel):\r\n # Correct behaviour of ticket #194\r\n x = np.empty((3, 1))\r\n x.flat = np.arange(3)\r\n assert_array_almost_equal(x, [[0], [1], [2]])\r\n x.flat = np.arange(3, dtype=float)\r\n assert_array_almost_equal(x, [[0], [1], [2]])\r\n\r\n def test_broadcast_flat_assignment(self,level=rlevel):\r\n # Ticket #194\r\n x = np.empty((3, 1))\r\n\r\n def bfa():\r\n x[:] = np.arange(3)\r\n\r\n def bfb():\r\n x[:] = np.arange(3, dtype=float)\r\n\r\n self.assertRaises(ValueError, bfa)\r\n self.assertRaises(ValueError, bfb)\r\n\r\n def test_nonarray_assignment(self):\r\n # See also Issue gh-2870, test for non-array assignment\r\n # and equivalent unsafe casted array assignment\r\n a = np.arange(10)\r\n b = np.ones(10, dtype=bool)\r\n r = np.arange(10)\r\n\r\n def assign(a, b, c):\r\n a[b] = c\r\n\r\n assert_raises(ValueError, assign, a, b, np.nan)\r\n a[b] = np.array(np.nan) # but not this.\r\n assert_raises(ValueError, assign, a, r, np.nan)\r\n a[r] = np.array(np.nan)\r\n\r\n def test_unpickle_dtype_with_object(self,level=rlevel):\r\n # Implemented in r2840\r\n dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])\r\n f = BytesIO()\r\n pickle.dump(dt, f)\r\n f.seek(0)\r\n dt_ = pickle.load(f)\r\n f.close()\r\n assert_equal(dt, dt_)\r\n\r\n def test_mem_array_creation_invalid_specification(self,level=rlevel):\r\n # Ticket #196\r\n dt = np.dtype([('x', int), ('y', np.object_)])\r\n # Wrong way\r\n self.assertRaises(ValueError, np.array, [1, 'object'], dt)\r\n # Correct way\r\n np.array([(1, 'object')], dt)\r\n\r\n def test_recarray_single_element(self,level=rlevel):\r\n # Ticket #202\r\n a = np.array([1, 2, 3], dtype=np.int32)\r\n b = a.copy()\r\n r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])\r\n assert_array_equal(a, b)\r\n assert_equal(a, r[0][0])\r\n\r\n def test_zero_sized_array_indexing(self,level=rlevel):\r\n # Ticket #205\r\n tmp = np.array([])\r\n\r\n def index_tmp():\r\n tmp[np.array(10)]\r\n\r\n self.assertRaises(IndexError, index_tmp)\r\n\r\n def test_chararray_rstrip(self,level=rlevel):\r\n # Ticket #222\r\n x = np.chararray((1,), 5)\r\n x[0] = asbytes('a ')\r\n x = x.rstrip()\r\n assert_equal(x[0], asbytes('a'))\r\n\r\n def test_object_array_shape(self,level=rlevel):\r\n # Ticket #239\r\n assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))\r\n assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))\r\n assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))\r\n assert_equal(np.array([], dtype=object).shape, (0,))\r\n assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))\r\n assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))\r\n\r\n def test_mem_around(self,level=rlevel):\r\n # Ticket #243\r\n x = np.zeros((1,))\r\n y = [0]\r\n decimal = 6\r\n np.around(abs(x-y), decimal) <= 10.0**(-decimal)\r\n\r\n def test_character_array_strip(self,level=rlevel):\r\n # Ticket #246\r\n x = np.char.array((\"x\", \"x \", \"x \"))\r\n for c in x:\r\n assert_equal(c, \"x\")\r\n\r\n def test_lexsort(self,level=rlevel):\r\n # Lexsort memory error\r\n v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\r\n assert_equal(np.lexsort(v), 0)\r\n\r\n def test_lexsort_invalid_sequence(self):\r\n # Issue gh-4123\r\n class BuggySequence(object):\r\n def __len__(self):\r\n return 4\r\n\r\n def __getitem__(self, key):\r\n raise KeyError\r\n\r\n assert_raises(KeyError, np.lexsort, BuggySequence())\r\n\r\n def test_pickle_py2_bytes_encoding(self):\r\n # Check that arrays and scalars pickled on Py2 are\r\n # unpickleable on Py3 using encoding='bytes'\r\n\r\n test_data = [\r\n # (original, py2_pickle)\r\n (np.unicode_('\\u6f2c'),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\r\n \"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\n\"\r\n \"I0\\ntp6\\nbS',o\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\")),\r\n\r\n (np.array([9e123], dtype=np.float64),\r\n asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\n\"\r\n \"p1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\n\"\r\n \"p7\\n(S'f8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'<'\\np11\\nNNNI-1\\nI-1\\n\"\r\n \"I0\\ntp12\\nbI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np13\\ntp14\\nb.\")),\r\n\r\n (np.array([(9e123,)], dtype=[('name', float)]),\r\n asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n\"\r\n \"(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n\"\r\n \"(S'V8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'name'\\np12\\ntp13\\n\"\r\n \"(dp14\\ng12\\n(g7\\n(S'f8'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'<'\\np18\\nNNNI-1\\n\"\r\n \"I-1\\nI0\\ntp19\\nbI0\\ntp20\\nsI8\\nI1\\nI0\\ntp21\\n\"\r\n \"bI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np22\\ntp23\\nb.\")),\r\n ]\r\n\r\n if sys.version_info[:2] >= (3, 4):\r\n # encoding='bytes' was added in Py3.4\r\n for original, data in test_data:\r\n result = pickle.loads(data, encoding='bytes')\r\n assert_equal(result, original)\r\n\r\n if isinstance(result, np.ndarray) and result.dtype.names:\r\n for name in result.dtype.names:\r\n assert_(isinstance(name, str))\r\n\r\n def test_pickle_dtype(self,level=rlevel):\r\n # Ticket #251\r\n pickle.dumps(np.float)\r\n\r\n def test_swap_real(self, level=rlevel):\r\n # Ticket #265\r\n assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)\r\n assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)\r\n assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)\r\n assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)\r\n\r\n def test_object_array_from_list(self, level=rlevel):\r\n # Ticket #270\r\n np.array([1, 'A', None]) # Should succeed\r\n\r\n def test_multiple_assign(self, level=rlevel):\r\n # Ticket #273\r\n a = np.zeros((3, 1), int)\r\n a[[1, 2]] = 1\r\n\r\n def test_empty_array_type(self, level=rlevel):\r\n assert_equal(np.array([]).dtype, np.zeros(0).dtype)\r\n\r\n def test_void_copyswap(self, level=rlevel):\r\n dt = np.dtype([('one', '<i4'), ('two', '<i4')])\r\n x = np.array((1, 2), dtype=dt)\r\n x = x.byteswap()\r\n assert_(x['one'] > 1 and x['two'] > 2)\r\n\r\n def test_method_args(self, level=rlevel):\r\n # Make sure methods and functions have same default axis\r\n # keyword and arguments\r\n funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),\r\n ('sometrue', 'any'),\r\n ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),\r\n 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',\r\n 'round', 'min', 'max', 'argsort', 'sort']\r\n funcs2 = ['compress', 'take', 'repeat']\r\n\r\n for func in funcs1:\r\n arr = np.random.rand(8, 7)\r\n arr2 = arr.copy()\r\n if isinstance(func, tuple):\r\n func_meth = func[1]\r\n func = func[0]\r\n else:\r\n func_meth = func\r\n res1 = getattr(arr, func_meth)()\r\n res2 = getattr(np, func)(arr2)\r\n if res1 is None:\r\n res1 = arr\r\n\r\n if res1.dtype.kind in 'uib':\r\n assert_((res1 == res2).all(), func)\r\n else:\r\n assert_(abs(res1-res2).max() < 1e-8, func)\r\n\r\n for func in funcs2:\r\n arr1 = np.random.rand(8, 7)\r\n arr2 = np.random.rand(8, 7)\r\n res1 = None\r\n if func == 'compress':\r\n arr1 = arr1.ravel()\r\n res1 = getattr(arr2, func)(arr1)\r\n else:\r\n arr2 = (15*arr2).astype(int).ravel()\r\n if res1 is None:\r\n res1 = getattr(arr1, func)(arr2)\r\n res2 = getattr(np, func)(arr1, arr2)\r\n assert_(abs(res1-res2).max() < 1e-8, func)\r\n\r\n def test_mem_lexsort_strings(self, level=rlevel):\r\n # Ticket #298\r\n lst = ['abc', 'cde', 'fgh']\r\n np.lexsort((lst,))\r\n\r\n def test_fancy_index(self, level=rlevel):\r\n # Ticket #302\r\n x = np.array([1, 2])[np.array([0])]\r\n assert_equal(x.shape, (1,))\r\n\r\n def test_recarray_copy(self, level=rlevel):\r\n # Ticket #312\r\n dt = [('x', np.int16), ('y', np.float64)]\r\n ra = np.array([(1, 2.3)], dtype=dt)\r\n rb = np.rec.array(ra, dtype=dt)\r\n rb['x'] = 2.\r\n assert_(ra['x'] != rb['x'])\r\n\r\n def test_rec_fromarray(self, level=rlevel):\r\n # Ticket #322\r\n x1 = np.array([[1, 2], [3, 4], [5, 6]])\r\n x2 = np.array(['a', 'dd', 'xyz'])\r\n x3 = np.array([1.1, 2, 3])\r\n np.rec.fromarrays([x1, x2, x3], formats=\"(2,)i4,a3,f8\")\r\n\r\n def test_object_array_assign(self, level=rlevel):\r\n x = np.empty((2, 2), object)\r\n x.flat[2] = (1, 2, 3)\r\n assert_equal(x.flat[2], (1, 2, 3))\r\n\r\n def test_ndmin_float64(self, level=rlevel):\r\n # Ticket #324\r\n x = np.array([1, 2, 3], dtype=np.float64)\r\n assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)\r\n assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)\r\n\r\n def test_ndmin_order(self, level=rlevel):\r\n # Issue #465 and related checks\r\n assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)\r\n assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)\r\n assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)\r\n assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)\r\n\r\n def test_mem_axis_minimization(self, level=rlevel):\r\n # Ticket #327\r\n data = np.arange(5)\r\n data = np.add.outer(data, data)\r\n\r\n def test_mem_float_imag(self, level=rlevel):\r\n # Ticket #330\r\n np.float64(1.0).imag\r\n\r\n def test_dtype_tuple(self, level=rlevel):\r\n # Ticket #334\r\n assert_(np.dtype('i4') == np.dtype(('i4', ())))\r\n\r\n def test_dtype_posttuple(self, level=rlevel):\r\n # Ticket #335\r\n np.dtype([('col1', '()i4')])\r\n\r\n def test_numeric_carray_compare(self, level=rlevel):\r\n # Ticket #341\r\n assert_equal(np.array(['X'], 'c'), asbytes('X'))\r\n\r\n def test_string_array_size(self, level=rlevel):\r\n # Ticket #342\r\n self.assertRaises(ValueError,\r\n np.array, [['X'], ['X', 'X', 'X']], '|S1')\r\n\r\n def test_dtype_repr(self, level=rlevel):\r\n # Ticket #344\r\n dt1 = np.dtype(('uint32', 2))\r\n dt2 = np.dtype(('uint32', (2,)))\r\n assert_equal(dt1.__repr__(), dt2.__repr__())\r\n\r\n def test_reshape_order(self, level=rlevel):\r\n # Make sure reshape order works.\r\n a = np.arange(6).reshape(2, 3, order='F')\r\n assert_equal(a, [[0, 2, 4], [1, 3, 5]])\r\n a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\r\n b = a[:, 1]\r\n assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])\r\n\r\n def test_reshape_zero_strides(self, level=rlevel):\r\n # Issue #380, test reshaping of zero strided arrays\r\n a = np.ones(1)\r\n a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))\r\n assert_(a.reshape(5, 1).strides[0] == 0)\r\n\r\n def test_reshape_zero_size(self, level=rlevel):\r\n # GitHub Issue #2700, setting shape failed for 0-sized arrays\r\n a = np.ones((0, 2))\r\n a.shape = (-1, 2)\r\n\r\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\r\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.\r\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\r\n def test_reshape_trailing_ones_strides(self):\r\n # GitHub issue gh-2949, bad strides for trailing ones of new shape\r\n a = np.zeros(12, dtype=np.int32)[::2] # not contiguous\r\n strides_c = (16, 8, 8, 8)\r\n strides_f = (8, 24, 48, 48)\r\n assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)\r\n assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)\r\n assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))\r\n\r\n def test_repeat_discont(self, level=rlevel):\r\n # Ticket #352\r\n a = np.arange(12).reshape(4, 3)[:, 2]\r\n assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])\r\n\r\n def test_array_index(self, level=rlevel):\r\n # Make sure optimization is not called in this case.\r\n a = np.array([1, 2, 3])\r\n a2 = np.array([[1, 2, 3]])\r\n assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])\r\n\r\n def test_object_argmax(self, level=rlevel):\r\n a = np.array([1, 2, 3], dtype=object)\r\n assert_(a.argmax() == 2)\r\n\r\n def test_recarray_fields(self, level=rlevel):\r\n # Ticket #372\r\n dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])\r\n dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])\r\n for a in [np.array([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.array([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.array([(1, 2), (3, 4)]),\r\n np.rec.fromarrays([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.fromarrays([(1, 2), (3, 4)])]:\r\n assert_(a.dtype in [dt0, dt1])\r\n\r\n def test_random_shuffle(self, level=rlevel):\r\n # Ticket #374\r\n a = np.arange(5).reshape((5, 1))\r\n b = a.copy()\r\n np.random.shuffle(b)\r\n assert_equal(np.sort(b, axis=0), a)\r\n\r\n def test_refcount_vdot(self, level=rlevel):\r\n # Changeset #3443\r\n _assert_valid_refcount(np.vdot)\r\n\r\n def test_startswith(self, level=rlevel):\r\n ca = np.char.array(['Hi', 'There'])\r\n assert_equal(ca.startswith('H'), [True, False])\r\n\r\n def test_noncommutative_reduce_accumulate(self, level=rlevel):\r\n # Ticket #413\r\n tosubtract = np.arange(5)\r\n todivide = np.array([2.0, 0.5, 0.25])\r\n assert_equal(np.subtract.reduce(tosubtract), -10)\r\n assert_equal(np.divide.reduce(todivide), 16.0)\r\n assert_array_equal(np.subtract.accumulate(tosubtract),\r\n np.array([0, -1, -3, -6, -10]))\r\n assert_array_equal(np.divide.accumulate(todivide),\r\n np.array([2., 4., 16.]))\r\n\r\n def test_convolve_empty(self, level=rlevel):\r\n # Convolve should raise an error for empty input array.\r\n self.assertRaises(ValueError, np.convolve, [], [1])\r\n self.assertRaises(ValueError, np.convolve, [1], [])\r\n\r\n def test_multidim_byteswap(self, level=rlevel):\r\n # Ticket #449\r\n r = np.array([(1, (0, 1, 2))], dtype=\"i2,3i2\")\r\n assert_array_equal(r.byteswap(),\r\n np.array([(256, (0, 256, 512))], r.dtype))\r\n\r\n def test_string_NULL(self, level=rlevel):\r\n # Changeset 3557\r\n assert_equal(np.array(\"a\\x00\\x0b\\x0c\\x00\").item(),\r\n 'a\\x00\\x0b\\x0c')\r\n\r\n def test_junk_in_string_fields_of_recarray(self, level=rlevel):\r\n # Ticket #483\r\n r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])\r\n assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))\r\n\r\n def test_take_output(self, level=rlevel):\r\n # Ensure that 'take' honours output parameter.\r\n x = np.arange(12).reshape((3, 4))\r\n a = np.take(x, [0, 2], axis=1)\r\n b = np.zeros_like(a)\r\n np.take(x, [0, 2], axis=1, out=b)\r\n assert_array_equal(a, b)\r\n\r\n def test_take_object_fail(self):\r\n # Issue gh-3001\r\n d = 123.\r\n a = np.array([d, 1], dtype=object)\r\n ref_d = sys.getrefcount(d)\r\n try:\r\n a.take([0, 100])\r\n except IndexError:\r\n pass\r\n assert_(ref_d == sys.getrefcount(d))\r\n\r\n def test_array_str_64bit(self, level=rlevel):\r\n # Ticket #501\r\n s = np.array([1, np.nan], dtype=np.float64)\r\n with np.errstate(all='raise'):\r\n np.array_str(s) # Should succeed\r\n\r\n def test_frompyfunc_endian(self, level=rlevel):\r\n # Ticket #503\r\n from math import radians\r\n uradians = np.frompyfunc(radians, 1, 1)\r\n big_endian = np.array([83.4, 83.5], dtype='>f8')\r\n little_endian = np.array([83.4, 83.5], dtype='<f8')\r\n assert_almost_equal(uradians(big_endian).astype(float),\r\n uradians(little_endian).astype(float))\r\n\r\n def test_mem_string_arr(self, level=rlevel):\r\n # Ticket #514\r\n s = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\r\n t = []\r\n np.hstack((t, s))\r\n\r\n def test_arr_transpose(self, level=rlevel):\r\n # Ticket #516\r\n x = np.random.rand(*(2,)*16)\r\n x.transpose(list(range(16))) # Should succeed\r\n\r\n def test_string_mergesort(self, level=rlevel):\r\n # Ticket #540\r\n x = np.array(['a']*32)\r\n assert_array_equal(x.argsort(kind='m'), np.arange(32))\r\n\r\n def test_argmax_byteorder(self, level=rlevel):\r\n # Ticket #546\r\n a = np.arange(3, dtype='>f')\r\n assert_(a[a.argmax()] == a.max())\r\n\r\n def test_rand_seed(self, level=rlevel):\r\n # Ticket #555\r\n for l in np.arange(4):\r\n np.random.seed(l)\r\n\r\n def test_mem_deallocation_leak(self, level=rlevel):\r\n # Ticket #562\r\n a = np.zeros(5, dtype=float)\r\n b = np.array(a, dtype=float)\r\n del a, b\r\n\r\n def test_mem_on_invalid_dtype(self):\r\n \"Ticket #583\"\r\n self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)\r\n\r\n def test_dot_negative_stride(self, level=rlevel):\r\n # Ticket #588\r\n x = np.array([[1, 5, 25, 125., 625]])\r\n y = np.array([[20.], [160.], [640.], [1280.], [1024.]])\r\n z = y[::-1].copy()\r\n y2 = y[::-1]\r\n assert_equal(np.dot(x, z), np.dot(x, y2))\r\n\r\n def test_object_casting(self, level=rlevel):\r\n # This used to trigger the object-type version of\r\n # the bitwise_or operation, because float64 -> object\r\n # casting succeeds\r\n def rs():\r\n x = np.ones([484, 286])\r\n y = np.zeros([484, 286])\r\n x |= y\r\n\r\n self.assertRaises(TypeError, rs)\r\n\r\n def test_unicode_scalar(self, level=rlevel):\r\n # Ticket #600\r\n x = np.array([\"DROND\", \"DROND1\"], dtype=\"U6\")\r\n el = x[1]\r\n new = pickle.loads(pickle.dumps(el))\r\n assert_equal(new, el)\r\n\r\n def test_arange_non_native_dtype(self, level=rlevel):\r\n # Ticket #616\r\n for T in ('>f4', '<f4'):\r\n dt = np.dtype(T)\r\n assert_equal(np.arange(0, dtype=dt).dtype, dt)\r\n assert_equal(np.arange(0.5, dtype=dt).dtype, dt)\r\n assert_equal(np.arange(5, dtype=dt).dtype, dt)\r\n\r\n def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):\r\n s = np.ones(10, dtype=float)\r\n x = np.array((15,), dtype=float)\r\n\r\n def ia(x, s, v):\r\n x[(s > 0)] = v\r\n\r\n # After removing deprecation, the following are ValueErrors.\r\n # This might seem odd as compared to the value error below. This\r\n # is due to the fact that the new code always uses \"nonzero\" logic\r\n # and the boolean special case is not taken.\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DeprecationWarning)\r\n warnings.simplefilter('ignore', np.VisibleDeprecationWarning)\r\n self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))\r\n self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))\r\n # Old special case (different code path):\r\n self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))\r\n self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))\r\n\r\n def test_mem_scalar_indexing(self, level=rlevel):\r\n # Ticket #603\r\n x = np.array([0], dtype=float)\r\n index = np.array(0, dtype=np.int32)\r\n x[index]\r\n\r\n def test_binary_repr_0_width(self, level=rlevel):\r\n assert_equal(np.binary_repr(0, width=3), '000')\r\n\r\n def test_fromstring(self, level=rlevel):\r\n assert_equal(np.fromstring(\"12:09:09\", dtype=int, sep=\":\"),\r\n [12, 9, 9])\r\n\r\n def test_searchsorted_variable_length(self, level=rlevel):\r\n x = np.array(['a', 'aa', 'b'])\r\n y = np.array(['d', 'e'])\r\n assert_equal(x.searchsorted(y), [3, 3])\r\n\r\n def test_string_argsort_with_zeros(self, level=rlevel):\r\n # Check argsort for strings containing zeros.\r\n x = np.fromstring(\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\r\n assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))\r\n assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))\r\n\r\n def test_string_sort_with_zeros(self, level=rlevel):\r\n # Check sort for strings containing zeros.\r\n x = np.fromstring(\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\r\n y = np.fromstring(\"\\x00\\x01\\x00\\x02\", dtype=\"|S2\")\r\n assert_array_equal(np.sort(x, kind=\"q\"), y)\r\n\r\n def test_copy_detection_zero_dim(self, level=rlevel):\r\n # Ticket #658\r\n np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n\r\n def test_flat_byteorder(self, level=rlevel):\r\n # Ticket #657\r\n x = np.arange(10)\r\n assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])\r\n assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))\r\n\r\n def test_uint64_from_negative(self, level=rlevel):\r\n assert_equal(np.uint64(-2), np.uint64(18446744073709551614))\r\n\r\n def test_sign_bit(self, level=rlevel):\r\n x = np.array([0, -0.0, 0])\r\n assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')\r\n\r\n def test_flat_index_byteswap(self, level=rlevel):\r\n for dt in (np.dtype('<i4'), np.dtype('>i4')):\r\n x = np.array([-1, 0, 1], dtype=dt)\r\n assert_equal(x.flat[0].dtype, x[0].dtype)\r\n\r\n def test_copy_detection_corner_case(self, level=rlevel):\r\n # Ticket #658\r\n np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n\r\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\r\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,\r\n # 0-sized reshape itself is tested elsewhere.\r\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\r\n def test_copy_detection_corner_case2(self, level=rlevel):\r\n # Ticket #771: strides are not set correctly when reshaping 0-sized\r\n # arrays\r\n b = np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n assert_equal(b.strides, (3 * b.itemsize, b.itemsize))\r\n\r\n def test_object_array_refcounting(self, level=rlevel):\r\n # Ticket #633\r\n if not hasattr(sys, 'getrefcount'):\r\n return\r\n\r\n # NB. this is probably CPython-specific\r\n\r\n cnt = sys.getrefcount\r\n\r\n a = object()\r\n b = object()\r\n c = object()\r\n\r\n cnt0_a = cnt(a)\r\n cnt0_b = cnt(b)\r\n cnt0_c = cnt(c)\r\n\r\n # -- 0d -> 1-d broadcast slice assignment\r\n\r\n arr = np.zeros(5, dtype=np.object_)\r\n\r\n arr[:] = a\r\n assert_equal(cnt(a), cnt0_a + 5)\r\n\r\n arr[:] = b\r\n assert_equal(cnt(a), cnt0_a)\r\n assert_equal(cnt(b), cnt0_b + 5)\r\n\r\n arr[:2] = c\r\n assert_equal(cnt(b), cnt0_b + 3)\r\n assert_equal(cnt(c), cnt0_c + 2)\r\n\r\n del arr\r\n\r\n # -- 1-d -> 2-d broadcast slice assignment\r\n\r\n arr = np.zeros((5, 2), dtype=np.object_)\r\n arr0 = np.zeros(2, dtype=np.object_)\r\n\r\n arr0[0] = a\r\n assert_(cnt(a) == cnt0_a + 1)\r\n arr0[1] = b\r\n assert_(cnt(b) == cnt0_b + 1)\r\n\r\n arr[:,:] = arr0\r\n assert_(cnt(a) == cnt0_a + 6)\r\n assert_(cnt(b) == cnt0_b + 6)\r\n\r\n arr[:, 0] = None\r\n assert_(cnt(a) == cnt0_a + 1)\r\n\r\n del arr, arr0\r\n\r\n # -- 2-d copying + flattening\r\n\r\n arr = np.zeros((5, 2), dtype=np.object_)\r\n\r\n arr[:, 0] = a\r\n arr[:, 1] = b\r\n assert_(cnt(a) == cnt0_a + 5)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n arr2 = arr.copy()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 10)\r\n\r\n arr2 = arr[:, 0].copy()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n arr2 = arr.flatten()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 10)\r\n\r\n del arr, arr2\r\n\r\n # -- concatenate, repeat, take, choose\r\n\r\n arr1 = np.zeros((5, 1), dtype=np.object_)\r\n arr2 = np.zeros((5, 1), dtype=np.object_)\r\n\r\n arr1[...] = a\r\n arr2[...] = b\r\n assert_(cnt(a) == cnt0_a + 5)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n tmp = np.concatenate((arr1, arr2))\r\n assert_(cnt(a) == cnt0_a + 5 + 5)\r\n assert_(cnt(b) == cnt0_b + 5 + 5)\r\n\r\n tmp = arr1.repeat(3, axis=0)\r\n assert_(cnt(a) == cnt0_a + 5 + 3*5)\r\n\r\n tmp = arr1.take([1, 2, 3], axis=0)\r\n assert_(cnt(a) == cnt0_a + 5 + 3)\r\n\r\n x = np.array([[0], [1], [0], [1], [1]], int)\r\n tmp = x.choose(arr1, arr2)\r\n assert_(cnt(a) == cnt0_a + 5 + 2)\r\n assert_(cnt(b) == cnt0_b + 5 + 3)\r\n\r\n del tmp # Avoid pyflakes unused variable warning\r\n\r\n def test_mem_custom_float_to_array(self, level=rlevel):\r\n # Ticket 702\r\n class MyFloat(object):\r\n def __float__(self):\r\n return 1.0\r\n\r\n tmp = np.atleast_1d([MyFloat()])\r\n tmp.astype(float) # Should succeed\r\n\r\n def test_object_array_refcount_self_assign(self, level=rlevel):\r\n # Ticket #711\r\n class VictimObject(object):\r\n deleted = False\r\n\r\n def __del__(self):\r\n self.deleted = True\r\n\r\n d = VictimObject()\r\n arr = np.zeros(5, dtype=np.object_)\r\n arr[:] = d\r\n del d\r\n arr[:] = arr # refcount of 'd' might hit zero here\r\n assert_(not arr[0].deleted)\r\n arr[:] = arr # trying to induce a segfault by doing it again...\r\n assert_(not arr[0].deleted)\r\n\r\n def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):\r\n x = [1, 2, 3]\r\n self.assertRaises(ValueError,\r\n np.fromiter, [xi for xi in x], dtype='S')\r\n\r\n def test_reduce_big_object_array(self, level=rlevel):\r\n # Ticket #713\r\n oldsize = np.setbufsize(10*16)\r\n a = np.array([None]*161, object)\r\n assert_(not np.any(a))\r\n np.setbufsize(oldsize)\r\n\r\n def test_mem_0d_array_index(self, level=rlevel):\r\n # Ticket #714\r\n np.zeros(10)[np.array(0)]\r\n\r\n def test_floats_from_string(self, level=rlevel):\r\n # Ticket #640, floats from string\r\n fsingle = np.single('1.234')\r\n fdouble = np.double('1.234')\r\n flongdouble = np.longdouble('1.234')\r\n assert_almost_equal(fsingle, 1.234)\r\n assert_almost_equal(fdouble, 1.234)\r\n assert_almost_equal(flongdouble, 1.234)\r\n\r\n def test_nonnative_endian_fill(self, level=rlevel):\r\n # Non-native endian arrays were incorrectly filled with scalars\r\n # before r5034.\r\n if sys.byteorder == 'little':\r\n dtype = np.dtype('>i4')\r\n else:\r\n dtype = np.dtype('<i4')\r\n x = np.empty([1], dtype=dtype)\r\n x.fill(1)\r\n assert_equal(x, np.array([1], dtype=dtype))\r\n\r\n def test_dot_alignment_sse2(self, level=rlevel):\r\n # Test for ticket #551, changeset r5140\r\n x = np.zeros((30, 40))\r\n y = pickle.loads(pickle.dumps(x))\r\n # y is now typically not aligned on a 8-byte boundary\r\n z = np.ones((1, y.shape[0]))\r\n # This shouldn't cause a segmentation fault:\r\n np.dot(z, y)\r\n\r\n def test_astype_copy(self, level=rlevel):\r\n # Ticket #788, changeset r5155\r\n # The test data file was generated by scipy.io.savemat.\r\n # The dtype is float64, but the isbuiltin attribute is 0.\r\n data_dir = path.join(path.dirname(__file__), 'data')\r\n filename = path.join(data_dir, \"astype_copy.pkl\")\r\n if sys.version_info[0] >= 3:\r\n f = open(filename, 'rb')\r\n xp = pickle.load(f, encoding='latin1')\r\n f.close()\r\n else:\r\n f = open(filename)\r\n xp = pickle.load(f)\r\n f.close()\r\n xpd = xp.astype(np.float64)\r\n assert_((xp.__array_interface__['data'][0] !=\r\n xpd.__array_interface__['data'][0]))\r\n\r\n def test_compress_small_type(self, level=rlevel):\r\n # Ticket #789, changeset 5217.\r\n # compress with out argument segfaulted if cannot cast safely\r\n import numpy as np\r\n a = np.array([[1, 2], [3, 4]])\r\n b = np.zeros((2, 1), dtype=np.single)\r\n try:\r\n a.compress([True, False], axis=1, out=b)\r\n raise AssertionError(\"compress with an out which cannot be \"\r\n \"safely casted should not return \"\r\n \"successfully\")\r\n except TypeError:\r\n pass\r\n\r\n def test_attributes(self, level=rlevel):\r\n # Ticket #791\r\n class TestArray(np.ndarray):\r\n def __new__(cls, data, info):\r\n result = np.array(data)\r\n result = result.view(cls)\r\n result.info = info\r\n return result\r\n\r\n def __array_finalize__(self, obj):\r\n self.info = getattr(obj, 'info', '')\r\n\r\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\r\n assert_(dat.info == 'jubba')\r\n dat.resize((4, 2))\r\n assert_(dat.info == 'jubba')\r\n dat.sort()\r\n assert_(dat.info == 'jubba')\r\n dat.fill(2)\r\n assert_(dat.info == 'jubba')\r\n dat.put([2, 3, 4], [6, 3, 4])\r\n assert_(dat.info == 'jubba')\r\n dat.setfield(4, np.int32, 0)\r\n assert_(dat.info == 'jubba')\r\n dat.setflags()\r\n assert_(dat.info == 'jubba')\r\n assert_(dat.all(1).info == 'jubba')\r\n assert_(dat.any(1).info == 'jubba')\r\n assert_(dat.argmax(1).info == 'jubba')\r\n assert_(dat.argmin(1).info == 'jubba')\r\n assert_(dat.argsort(1).info == 'jubba')\r\n assert_(dat.astype(TestArray).info == 'jubba')\r\n assert_(dat.byteswap().info == 'jubba')\r\n assert_(dat.clip(2, 7).info == 'jubba')\r\n assert_(dat.compress([0, 1, 1]).info == 'jubba')\r\n assert_(dat.conj().info == 'jubba')\r\n assert_(dat.conjugate().info == 'jubba')\r\n assert_(dat.copy().info == 'jubba')\r\n dat2 = TestArray([2, 3, 1, 0], 'jubba')\r\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\r\n [20, 21, 22, 23], [30, 31, 32, 33]]\r\n assert_(dat2.choose(choices).info == 'jubba')\r\n assert_(dat.cumprod(1).info == 'jubba')\r\n assert_(dat.cumsum(1).info == 'jubba')\r\n assert_(dat.diagonal().info == 'jubba')\r\n assert_(dat.flatten().info == 'jubba')\r\n assert_(dat.getfield(np.int32, 0).info == 'jubba')\r\n assert_(dat.imag.info == 'jubba')\r\n assert_(dat.max(1).info == 'jubba')\r\n assert_(dat.mean(1).info == 'jubba')\r\n assert_(dat.min(1).info == 'jubba')\r\n assert_(dat.newbyteorder().info == 'jubba')\r\n assert_(dat.prod(1).info == 'jubba')\r\n assert_(dat.ptp(1).info == 'jubba')\r\n assert_(dat.ravel().info == 'jubba')\r\n assert_(dat.real.info == 'jubba')\r\n assert_(dat.repeat(2).info == 'jubba')\r\n assert_(dat.reshape((2, 4)).info == 'jubba')\r\n assert_(dat.round().info == 'jubba')\r\n assert_(dat.squeeze().info == 'jubba')\r\n assert_(dat.std(1).info == 'jubba')\r\n assert_(dat.sum(1).info == 'jubba')\r\n assert_(dat.swapaxes(0, 1).info == 'jubba')\r\n assert_(dat.take([2, 3, 5]).info == 'jubba')\r\n assert_(dat.transpose().info == 'jubba')\r\n assert_(dat.T.info == 'jubba')\r\n assert_(dat.var(1).info == 'jubba')\r\n assert_(dat.view(TestArray).info == 'jubba')\r\n # These methods do not preserve subclasses\r\n assert_(type(dat.nonzero()[0]) is np.ndarray)\r\n assert_(type(dat.nonzero()[1]) is np.ndarray)\r\n\r\n def test_recarray_tolist(self, level=rlevel):\r\n # Ticket #793, changeset r5215\r\n # Comparisons fail for NaN, so we can't use random memory\r\n # for the test.\r\n buf = np.zeros(40, dtype=np.int8)\r\n a = np.recarray(2, formats=\"i4,f8,f8\", names=\"id,x,y\", buf=buf)\r\n b = a.tolist()\r\n assert_( a[0].tolist() == b[0])\r\n assert_( a[1].tolist() == b[1])\r\n\r\n def test_nonscalar_item_method(self):\r\n # Make sure that .item() fails graciously when it should\r\n a = np.arange(5)\r\n assert_raises(ValueError, a.item)\r\n\r\n def test_char_array_creation(self, level=rlevel):\r\n a = np.array('123', dtype='c')\r\n b = np.array(asbytes_nested(['1', '2', '3']))\r\n assert_equal(a, b)\r\n\r\n def test_unaligned_unicode_access(self, level=rlevel):\r\n # Ticket #825\r\n for i in range(1, 9):\r\n msg = 'unicode offset: %d chars' % i\r\n t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])\r\n x = np.array([(asbytes('a'), sixu('b'))], dtype=t)\r\n if sys.version_info[0] >= 3:\r\n assert_equal(str(x), \"[(b'a', 'b')]\", err_msg=msg)\r\n else:\r\n assert_equal(str(x), \"[('a', u'b')]\", err_msg=msg)\r\n\r\n def test_sign_for_complex_nan(self, level=rlevel):\r\n # Ticket 794.\r\n with np.errstate(invalid='ignore'):\r\n C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])\r\n have = np.sign(C)\r\n want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])\r\n assert_equal(have, want)\r\n\r\n def test_for_equal_names(self, level=rlevel):\r\n # Ticket #674\r\n dt = np.dtype([('foo', float), ('bar', float)])\r\n a = np.zeros(10, dt)\r\n b = list(a.dtype.names)\r\n b[0] = \"notfoo\"\r\n a.dtype.names = b\r\n assert_(a.dtype.names[0] == \"notfoo\")\r\n assert_(a.dtype.names[1] == \"bar\")\r\n\r\n def test_for_object_scalar_creation(self, level=rlevel):\r\n # Ticket #816\r\n a = np.object_()\r\n b = np.object_(3)\r\n b2 = np.object_(3.0)\r\n c = np.object_([4, 5])\r\n d = np.object_([None, {}, []])\r\n assert_(a is None)\r\n assert_(type(b) is int)\r\n assert_(type(b2) is float)\r\n assert_(type(c) is np.ndarray)\r\n assert_(c.dtype == object)\r\n assert_(d.dtype == object)\r\n\r\n def test_array_resize_method_system_error(self):\r\n # Ticket #840 - order should be an invalid keyword.\r\n x = np.array([[0, 1], [2, 3]])\r\n self.assertRaises(TypeError, x.resize, (2, 2), order='C')\r\n\r\n def test_for_zero_length_in_choose(self, level=rlevel):\r\n \"Ticket #882\"\r\n a = np.array(1)\r\n self.assertRaises(ValueError, lambda x: x.choose([]), a)\r\n\r\n def test_array_ndmin_overflow(self):\r\n \"Ticket #947.\"\r\n self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))\r\n\r\n def test_errobj_reference_leak(self, level=rlevel):\r\n # Ticket #955\r\n with np.errstate(all=\"ignore\"):\r\n z = int(0)\r\n p = np.int32(-1)\r\n\r\n gc.collect()\r\n n_before = len(gc.get_objects())\r\n z**p # this shouldn't leak a reference to errobj\r\n gc.collect()\r\n n_after = len(gc.get_objects())\r\n assert_(n_before >= n_after, (n_before, n_after))\r\n\r\n def test_void_scalar_with_titles(self, level=rlevel):\r\n # No ticket\r\n data = [('john', 4), ('mary', 5)]\r\n dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]\r\n arr = np.array(data, dtype=dtype1)\r\n assert_(arr[0][0] == 'john')\r\n assert_(arr[0][1] == 4)\r\n\r\n def test_void_scalar_constructor(self):\r\n #Issue #1550\r\n\r\n #Create test string data, construct void scalar from data and assert\r\n #that void scalar contains original data.\r\n test_string = np.array(\"test\")\r\n test_string_void_scalar = np.core.multiarray.scalar(\r\n np.dtype((\"V\", test_string.dtype.itemsize)), test_string.tobytes())\r\n\r\n assert_(test_string_void_scalar.view(test_string.dtype) == test_string)\r\n\r\n #Create record scalar, construct from data and assert that\r\n #reconstructed scalar is correct.\r\n test_record = np.ones((), \"i,i\")\r\n test_record_void_scalar = np.core.multiarray.scalar(\r\n test_record.dtype, test_record.tobytes())\r\n\r\n assert_(test_record_void_scalar == test_record)\r\n\r\n #Test pickle and unpickle of void and record scalars\r\n assert_(pickle.loads(pickle.dumps(test_string)) == test_string)\r\n assert_(pickle.loads(pickle.dumps(test_record)) == test_record)\r\n\r\n def test_blasdot_uninitialized_memory(self):\r\n # Ticket #950\r\n for m in [0, 1, 2]:\r\n for n in [0, 1, 2]:\r\n for k in range(3):\r\n # Try to ensure that x->data contains non-zero floats\r\n x = np.array([123456789e199], dtype=np.float64)\r\n x.resize((m, 0))\r\n y = np.array([123456789e199], dtype=np.float64)\r\n y.resize((0, n))\r\n\r\n # `dot` should just return zero (m,n) matrix\r\n z = np.dot(x, y)\r\n assert_(np.all(z == 0))\r\n assert_(z.shape == (m, n))\r\n\r\n def test_zeros(self):\r\n # Regression test for #1061.\r\n # Set a size which cannot fit into a 64 bits signed integer\r\n sz = 2 ** 64\r\n good = 'Maximum allowed dimension exceeded'\r\n try:\r\n np.empty(sz)\r\n except ValueError as e:\r\n if not str(e) == good:\r\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\r\n except Exception as e:\r\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\r\n\r\n def test_huge_arange(self):\r\n # Regression test for #1062.\r\n # Set a size which cannot fit into a 64 bits signed integer\r\n sz = 2 ** 64\r\n good = 'Maximum allowed size exceeded'\r\n try:\r\n np.arange(sz)\r\n self.assertTrue(np.size == sz)\r\n except ValueError as e:\r\n if not str(e) == good:\r\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\r\n except Exception as e:\r\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\r\n\r\n def test_fromiter_bytes(self):\r\n # Ticket #1058\r\n a = np.fromiter(list(range(10)), dtype='b')\r\n b = np.fromiter(list(range(10)), dtype='B')\r\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n\r\n def test_array_from_sequence_scalar_array(self):\r\n # Ticket #1078: segfaults when creating an array with a sequence of\r\n # 0d arrays.\r\n a = np.array((np.ones(2), np.array(2)))\r\n assert_equal(a.shape, (2,))\r\n assert_equal(a.dtype, np.dtype(object))\r\n assert_equal(a[0], np.ones(2))\r\n assert_equal(a[1], np.array(2))\r\n\r\n a = np.array(((1,), np.array(1)))\r\n assert_equal(a.shape, (2,))\r\n assert_equal(a.dtype, np.dtype(object))\r\n assert_equal(a[0], (1,))\r\n assert_equal(a[1], np.array(1))\r\n\r\n def test_array_from_sequence_scalar_array2(self):\r\n # Ticket #1081: weird array with strange input...\r\n t = np.array([np.array([]), np.array(0, object)])\r\n assert_equal(t.shape, (2,))\r\n assert_equal(t.dtype, np.dtype(object))\r\n\r\n def test_array_too_big(self):\r\n # Ticket #1080.\r\n assert_raises(ValueError, np.zeros, [975]*7, np.int8)\r\n assert_raises(ValueError, np.zeros, [26244]*5, np.int8)\r\n\r\n def test_dtype_keyerrors_(self):\r\n # Ticket #1106.\r\n dt = np.dtype([('f1', np.uint)])\r\n assert_raises(KeyError, dt.__getitem__, \"f2\")\r\n assert_raises(IndexError, dt.__getitem__, 1)\r\n assert_raises(ValueError, dt.__getitem__, 0.0)\r\n\r\n def test_lexsort_buffer_length(self):\r\n # Ticket #1217, don't segfault.\r\n a = np.ones(100, dtype=np.int8)\r\n b = np.ones(100, dtype=np.int32)\r\n i = np.lexsort((a[::-1], b))\r\n assert_equal(i, np.arange(100, dtype=np.int))\r\n\r\n def test_object_array_to_fixed_string(self):\r\n # Ticket #1235.\r\n a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)\r\n b = np.array(a, dtype=(np.str_, 8))\r\n assert_equal(a, b)\r\n c = np.array(a, dtype=(np.str_, 5))\r\n assert_equal(c, np.array(['abcde', 'ijklm']))\r\n d = np.array(a, dtype=(np.str_, 12))\r\n assert_equal(a, d)\r\n e = np.empty((2, ), dtype=(np.str_, 8))\r\n e[:] = a[:]\r\n assert_equal(a, e)\r\n\r\n def test_unicode_to_string_cast(self):\r\n # Ticket #1240.\r\n a = np.array([[sixu('abc'), sixu('\\u03a3')],\r\n [sixu('asdf'), sixu('erw')]],\r\n dtype='U')\r\n self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')\r\n\r\n def test_mixed_string_unicode_array_creation(self):\r\n a = np.array(['1234', sixu('123')])\r\n assert_(a.itemsize == 16)\r\n a = np.array([sixu('123'), '1234'])\r\n assert_(a.itemsize == 16)\r\n a = np.array(['1234', sixu('123'), '12345'])\r\n assert_(a.itemsize == 20)\r\n a = np.array([sixu('123'), '1234', sixu('12345')])\r\n assert_(a.itemsize == 20)\r\n a = np.array([sixu('123'), '1234', sixu('1234')])\r\n assert_(a.itemsize == 16)\r\n\r\n def test_misaligned_objects_segfault(self):\r\n # Ticket #1198 and #1267\r\n a1 = np.zeros((10,), dtype='O,c')\r\n a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')\r\n a1['f0'] = a2\r\n repr(a1)\r\n np.argmax(a1['f0'])\r\n a1['f0'][1] = \"FOO\"\r\n a1['f0'] = \"FOO\"\r\n np.array(a1['f0'], dtype='S')\r\n np.nonzero(a1['f0'])\r\n a1.sort()\r\n copy.deepcopy(a1)\r\n\r\n def test_misaligned_scalars_segfault(self):\r\n # Ticket #1267\r\n s1 = np.array(('a', 'Foo'), dtype='c,O')\r\n s2 = np.array(('b', 'Bar'), dtype='c,O')\r\n s1['f1'] = s2['f1']\r\n s1['f1'] = 'Baz'\r\n\r\n def test_misaligned_dot_product_objects(self):\r\n # Ticket #1267\r\n # This didn't require a fix, but it's worth testing anyway, because\r\n # it may fail if .dot stops enforcing the arrays to be BEHAVED\r\n a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')\r\n b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')\r\n np.dot(a['f0'], b['f0'])\r\n\r\n def test_byteswap_complex_scalar(self):\r\n # Ticket #1259 and gh-441\r\n for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:\r\n z = np.array([2.2-1.1j], dtype)\r\n x = z[0] # always native-endian\r\n y = x.byteswap()\r\n if x.dtype.byteorder == z.dtype.byteorder:\r\n # little-endian machine\r\n assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))\r\n else:\r\n # big-endian machine\r\n assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))\r\n # double check real and imaginary parts:\r\n assert_equal(x.real, y.real.byteswap())\r\n assert_equal(x.imag, y.imag.byteswap())\r\n\r\n def test_structured_arrays_with_objects1(self):\r\n # Ticket #1299\r\n stra = 'aaaa'\r\n strb = 'bbbb'\r\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\r\n x[x.nonzero()] = x.ravel()[:1]\r\n assert_(x[0, 1] == x[0, 0])\r\n\r\n def test_structured_arrays_with_objects2(self):\r\n # Ticket #1299 second test\r\n stra = 'aaaa'\r\n strb = 'bbbb'\r\n numb = sys.getrefcount(strb)\r\n numa = sys.getrefcount(stra)\r\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\r\n x[x.nonzero()] = x.ravel()[:1]\r\n assert_(sys.getrefcount(strb) == numb)\r\n assert_(sys.getrefcount(stra) == numa + 2)\r\n\r\n def test_duplicate_title_and_name(self):\r\n # Ticket #1254\r\n dtspec = [(('a', 'a'), 'i'), ('b', 'i')]\r\n self.assertRaises(ValueError, np.dtype, dtspec)\r\n\r\n def test_signed_integer_division_overflow(self):\r\n # Ticket #1317.\r\n def test_type(t):\r\n min = np.array([np.iinfo(t).min])\r\n min //= -1\r\n\r\n with np.errstate(divide=\"ignore\"):\r\n for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):\r\n test_type(t)\r\n\r\n def test_buffer_hashlib(self):\r\n try:\r\n from hashlib import md5\r\n except ImportError:\r\n from md5 import new as md5\r\n\r\n x = np.array([1, 2, 3], dtype=np.dtype('<i4'))\r\n assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')\r\n\r\n def test_0d_string_scalar(self):\r\n # Bug #1436; the following should succeed\r\n np.asarray('x', '>c')\r\n\r\n def test_log1p_compiler_shenanigans(self):\r\n # Check if log1p is behaving on 32 bit intel systems.\r\n assert_(np.isfinite(np.log1p(np.exp2(-53))))\r\n\r\n def test_fromiter_comparison(self, level=rlevel):\r\n a = np.fromiter(list(range(10)), dtype='b')\r\n b = np.fromiter(list(range(10)), dtype='B')\r\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n\r\n def test_fromstring_crash(self):\r\n # Ticket #1345: the following should not cause a crash\r\n np.fromstring(asbytes('aa, aa, 1.0'), sep=',')\r\n\r\n def test_ticket_1539(self):\r\n dtypes = [x for x in np.typeDict.values()\r\n if (issubclass(x, np.number)\r\n and not issubclass(x, np.timedelta64))]\r\n a = np.array([], dtypes[0])\r\n failures = []\r\n # ignore complex warnings\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', np.ComplexWarning)\r\n for x in dtypes:\r\n b = a.astype(x)\r\n for y in dtypes:\r\n c = a.astype(y)\r\n try:\r\n np.dot(b, c)\r\n except TypeError:\r\n failures.append((x, y))\r\n if failures:\r\n raise AssertionError(\"Failures: %r\" % failures)\r\n\r\n def test_ticket_1538(self):\r\n x = np.finfo(np.float32)\r\n for name in 'eps epsneg max min resolution tiny'.split():\r\n assert_equal(type(getattr(x, name)), np.float32,\r\n err_msg=name)\r\n\r\n def test_ticket_1434(self):\r\n # Check that the out= argument in var and std has an effect\r\n data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\r\n out = np.zeros((3,))\r\n\r\n ret = data.var(axis=1, out=out)\r\n assert_(ret is out)\r\n assert_array_equal(ret, data.var(axis=1))\r\n\r\n ret = data.std(axis=1, out=out)\r\n assert_(ret is out)\r\n assert_array_equal(ret, data.std(axis=1))\r\n\r\n def test_complex_nan_maximum(self):\r\n cnan = complex(0, np.nan)\r\n assert_equal(np.maximum(1, cnan), cnan)\r\n\r\n def test_subclass_int_tuple_assignment(self):\r\n # ticket #1563\r\n class Subclass(np.ndarray):\r\n def __new__(cls, i):\r\n return np.ones((i,)).view(cls)\r\n\r\n x = Subclass(5)\r\n x[(0,)] = 2 # shouldn't raise an exception\r\n assert_equal(x[0], 2)\r\n\r\n def test_ufunc_no_unnecessary_views(self):\r\n # ticket #1548\r\n class Subclass(np.ndarray):\r\n pass\r\n x = np.array([1, 2, 3]).view(Subclass)\r\n y = np.add(x, x, x)\r\n assert_equal(id(x), id(y))\r\n\r\n def test_take_refcount(self):\r\n # ticket #939\r\n a = np.arange(16, dtype=np.float)\r\n a.shape = (4, 4)\r\n lut = np.ones((5 + 3, 4), np.float)\r\n rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)\r\n c1 = sys.getrefcount(rgba)\r\n try:\r\n lut.take(a, axis=0, mode='clip', out=rgba)\r\n except TypeError:\r\n pass\r\n c2 = sys.getrefcount(rgba)\r\n assert_equal(c1, c2)\r\n\r\n def test_fromfile_tofile_seeks(self):\r\n # On Python 3, tofile/fromfile used to get (#1610) the Python\r\n # file handle out of sync\r\n f0 = tempfile.NamedTemporaryFile()\r\n f = f0.file\r\n f.write(np.arange(255, dtype='u1').tobytes())\r\n\r\n f.seek(20)\r\n ret = np.fromfile(f, count=4, dtype='u1')\r\n assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))\r\n assert_equal(f.tell(), 24)\r\n\r\n f.seek(40)\r\n np.array([1, 2, 3], dtype='u1').tofile(f)\r\n assert_equal(f.tell(), 43)\r\n\r\n f.seek(40)\r\n data = f.read(3)\r\n assert_equal(data, asbytes(\"\\x01\\x02\\x03\"))\r\n\r\n f.seek(80)\r\n f.read(4)\r\n data = np.fromfile(f, dtype='u1', count=4)\r\n assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))\r\n\r\n f.close()\r\n\r\n def test_complex_scalar_warning(self):\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = tp(1+2j)\r\n assert_warns(np.ComplexWarning, float, x)\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore')\r\n assert_equal(float(x), float(x.real))\r\n\r\n def test_complex_scalar_complex_cast(self):\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = tp(1+2j)\r\n assert_equal(complex(x), 1+2j)\r\n\r\n def test_complex_boolean_cast(self):\r\n # Ticket #2218\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)\r\n assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))\r\n assert_(np.any(x))\r\n assert_(np.all(x[1:]))\r\n\r\n def test_uint_int_conversion(self):\r\n x = 2**64 - 1\r\n assert_equal(int(np.uint64(x)), x)\r\n\r\n def test_duplicate_field_names_assign(self):\r\n ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')\r\n ra.dtype.names = ('f1', 'f2')\r\n repr(ra) # should not cause a segmentation fault\r\n assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))\r\n\r\n def test_eq_string_and_object_array(self):\r\n # From e-mail thread \"__eq__ with str and object\" (Keith Goodman)\r\n a1 = np.array(['a', 'b'], dtype=object)\r\n a2 = np.array(['a', 'c'])\r\n assert_array_equal(a1 == a2, [True, False])\r\n assert_array_equal(a2 == a1, [True, False])\r\n\r\n def test_nonzero_byteswap(self):\r\n a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)\r\n a.dtype = np.float32\r\n assert_equal(a.nonzero()[0], [1])\r\n a = a.byteswap().newbyteorder()\r\n assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap\r\n\r\n def test_find_common_type_boolean(self):\r\n # Ticket #1695\r\n assert_(np.find_common_type([], ['?', '?']) == '?')\r\n\r\n def test_empty_mul(self):\r\n a = np.array([1.])\r\n a[1:1] *= 2\r\n assert_equal(a, [1.])\r\n\r\n def test_array_side_effect(self):\r\n # The second use of itemsize was throwing an exception because in\r\n # ctors.c, discover_itemsize was calling PyObject_Length without\r\n # checking the return code. This failed to get the length of the\r\n # number 2, and the exception hung around until something checked\r\n # PyErr_Occurred() and returned an error.\r\n assert_equal(np.dtype('S10').itemsize, 10)\r\n np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)\r\n assert_equal(np.dtype('S10').itemsize, 10)\r\n\r\n def test_any_float(self):\r\n # all and any for floats\r\n a = np.array([0.1, 0.9])\r\n assert_(np.any(a))\r\n assert_(np.all(a))\r\n\r\n def test_large_float_sum(self):\r\n a = np.arange(10000, dtype='f')\r\n assert_equal(a.sum(dtype='d'), a.astype('d').sum())\r\n\r\n def test_ufunc_casting_out(self):\r\n a = np.array(1.0, dtype=np.float32)\r\n b = np.array(1.0, dtype=np.float64)\r\n c = np.array(1.0, dtype=np.float32)\r\n np.add(a, b, out=c)\r\n assert_equal(c, 2.0)\r\n\r\n def test_array_scalar_contiguous(self):\r\n # Array scalars are both C and Fortran contiguous\r\n assert_(np.array(1.0).flags.c_contiguous)\r\n assert_(np.array(1.0).flags.f_contiguous)\r\n assert_(np.array(np.float32(1.0)).flags.c_contiguous)\r\n assert_(np.array(np.float32(1.0)).flags.f_contiguous)\r\n\r\n def test_squeeze_contiguous(self):\r\n # Similar to GitHub issue #387\r\n a = np.zeros((1, 2)).squeeze()\r\n b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()\r\n assert_(a.flags.c_contiguous)\r\n assert_(a.flags.f_contiguous)\r\n assert_(b.flags.f_contiguous)\r\n\r\n def test_reduce_contiguous(self):\r\n # GitHub issue #387\r\n a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))\r\n b = np.add.reduce(np.zeros((2, 1, 2)), 1)\r\n assert_(a.flags.c_contiguous)\r\n assert_(a.flags.f_contiguous)\r\n assert_(b.flags.c_contiguous)\r\n\r\n def test_object_array_self_reference(self):\r\n # Object arrays with references to themselves can cause problems\r\n a = np.array(0, dtype=object)\r\n a[()] = a\r\n assert_raises(TypeError, int, a)\r\n assert_raises(TypeError, long, a)\r\n assert_raises(TypeError, float, a)\r\n assert_raises(TypeError, oct, a)\r\n assert_raises(TypeError, hex, a)\r\n\r\n # Test the same for a circular reference.\r\n b = np.array(a, dtype=object)\r\n a[()] = b\r\n assert_raises(TypeError, int, a)\r\n # Numpy has no tp_traverse currently, so circular references\r\n # cannot be detected. So resolve it:\r\n a[()] = 0\r\n\r\n # This was causing a to become like the above\r\n a = np.array(0, dtype=object)\r\n a[...] += 1\r\n assert_equal(a, 1)\r\n\r\n def test_object_array_self_copy(self):\r\n # An object array being copied into itself DECREF'ed before INCREF'ing\r\n # causing segmentation faults (gh-3787)\r\n a = np.array(object(), dtype=object)\r\n np.copyto(a, a)\r\n assert_equal(sys.getrefcount(a[()]), 2)\r\n a[()].__class__ # will segfault if object was deleted\r\n\r\n def test_zerosize_accumulate(self):\r\n \"Ticket #1733\"\r\n x = np.array([[42, 0]], dtype=np.uint32)\r\n assert_equal(np.add.accumulate(x[:-1, 0]), [])\r\n\r\n def test_objectarray_setfield(self):\r\n # Setfield should not overwrite Object fields with non-Object data\r\n x = np.array([1, 2, 3], dtype=object)\r\n assert_raises(TypeError, x.setfield, 4, np.int32, 0)\r\n\r\n def test_setting_rank0_string(self):\r\n \"Ticket #1736\"\r\n s1 = asbytes(\"hello1\")\r\n s2 = asbytes(\"hello2\")\r\n a = np.zeros((), dtype=\"S10\")\r\n a[()] = s1\r\n assert_equal(a, np.array(s1))\r\n a[()] = np.array(s2)\r\n assert_equal(a, np.array(s2))\r\n\r\n a = np.zeros((), dtype='f4')\r\n a[()] = 3\r\n assert_equal(a, np.array(3))\r\n a[()] = np.array(4)\r\n assert_equal(a, np.array(4))\r\n\r\n def test_string_astype(self):\r\n \"Ticket #1748\"\r\n s1 = asbytes('black')\r\n s2 = asbytes('white')\r\n s3 = asbytes('other')\r\n a = np.array([[s1], [s2], [s3]])\r\n assert_equal(a.dtype, np.dtype('S5'))\r\n b = a.astype(np.dtype('S0'))\r\n assert_equal(b.dtype, np.dtype('S5'))\r\n\r\n def test_ticket_1756(self):\r\n # Ticket #1756\r\n s = asbytes('0123456789abcdef')\r\n a = np.array([s]*5)\r\n for i in range(1, 17):\r\n a1 = np.array(a, \"|S%d\" % i)\r\n a2 = np.array([s[:i]]*5)\r\n assert_equal(a1, a2)\r\n\r\n def test_fields_strides(self):\r\n \"Ticket #1760\"\r\n r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')\r\n assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])\r\n assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])\r\n assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])\r\n assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)\r\n\r\n def test_alignment_update(self):\r\n # Check that alignment flag is updated on stride setting\r\n a = np.arange(10)\r\n assert_(a.flags.aligned)\r\n a.strides = 3\r\n assert_(not a.flags.aligned)\r\n\r\n def test_ticket_1770(self):\r\n \"Should not segfault on python 3k\"\r\n import numpy as np\r\n try:\r\n a = np.zeros((1,), dtype=[('f1', 'f')])\r\n a['f1'] = 1\r\n a['f2'] = 1\r\n except ValueError:\r\n pass\r\n except:\r\n raise AssertionError\r\n\r\n def test_ticket_1608(self):\r\n \"x.flat shouldn't modify data\"\r\n x = np.array([[1, 2], [3, 4]]).T\r\n np.array(x.flat)\r\n assert_equal(x, [[1, 3], [2, 4]])\r\n\r\n def test_pickle_string_overwrite(self):\r\n import re\r\n\r\n data = np.array([1], dtype='b')\r\n blob = pickle.dumps(data, protocol=1)\r\n data = pickle.loads(blob)\r\n\r\n # Check that loads does not clobber interned strings\r\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\r\n assert_equal(s[0], \"\\x01\")\r\n data[0] = 0xbb\r\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\r\n assert_equal(s[0], \"\\x01\")\r\n\r\n def test_pickle_bytes_overwrite(self):\r\n if sys.version_info[0] >= 3:\r\n data = np.array([1], dtype='b')\r\n data = pickle.loads(pickle.dumps(data))\r\n data[0] = 0xdd\r\n bytestring = \"\\x01 \".encode('ascii')\r\n assert_equal(bytestring[0:1], '\\x01'.encode('ascii'))\r\n\r\n def test_pickle_py2_array_latin1_hack(self):\r\n # Check that unpickling hacks in Py3 that support\r\n # encoding='latin1' work correctly.\r\n\r\n # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))\r\n data = asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\n\"\r\n \"tp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'i1'\\np8\\n\"\r\n \"I0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nNNNI-1\\nI-1\\nI0\\ntp12\\nbI00\\nS'\\\\x81'\\n\"\r\n \"p13\\ntp14\\nb.\")\r\n if sys.version_info[0] >= 3:\r\n # This should work:\r\n result = pickle.loads(data, encoding='latin1')\r\n assert_array_equal(result, np.array([129], dtype='b'))\r\n # Should not segfault:\r\n assert_raises(Exception, pickle.loads, data, encoding='koi8-r')\r\n\r\n def test_pickle_py2_scalar_latin1_hack(self):\r\n # Check that scalar unpickling hack in Py3 that supports\r\n # encoding='latin1' work correctly.\r\n\r\n # Python2 output for pickle.dumps(...)\r\n datas = [\r\n # (original, python2_pickle, koi8r_validity)\r\n (np.unicode_('\\u6bd2'),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\r\n \"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\nI0\\n\"\r\n \"tp6\\nbS'\\\\xd2k\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\r\n 'invalid'),\r\n\r\n (np.float64(9e123),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'f8'\\n\"\r\n \"p2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI-1\\nI-1\\nI0\\ntp6\\n\"\r\n \"bS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np7\\ntp8\\nRp9\\n.\"),\r\n 'invalid'),\r\n\r\n (np.bytes_(asbytes('\\x9c')), # different 8-bit code point in KOI8-R vs latin1\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'S1'\\np2\\n\"\r\n \"I0\\nI1\\ntp3\\nRp4\\n(I3\\nS'|'\\np5\\nNNNI1\\nI1\\nI0\\ntp6\\nbS'\\\\x9c'\\np7\\n\"\r\n \"tp8\\nRp9\\n.\"),\r\n 'different'),\r\n ]\r\n if sys.version_info[0] >= 3:\r\n for original, data, koi8r_validity in datas:\r\n result = pickle.loads(data, encoding='latin1')\r\n assert_equal(result, original)\r\n\r\n # Decoding under non-latin1 encoding (e.g.) KOI8-R can\r\n # produce bad results, but should not segfault.\r\n if koi8r_validity == 'different':\r\n # Unicode code points happen to lie within latin1,\r\n # but are different in koi8-r, resulting to silent\r\n # bogus results\r\n result = pickle.loads(data, encoding='koi8-r')\r\n assert_(result != original)\r\n elif koi8r_validity == 'invalid':\r\n # Unicode code points outside latin1, so results\r\n # to an encoding exception\r\n assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')\r\n else:\r\n raise ValueError(koi8r_validity)\r\n\r\n def test_structured_type_to_object(self):\r\n a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')\r\n a_obj = np.empty((2,), dtype=object)\r\n a_obj[0] = (0, 1)\r\n a_obj[1] = (3, 2)\r\n # astype records -> object\r\n assert_equal(a_rec.astype(object), a_obj)\r\n # '=' records -> object\r\n b = np.empty_like(a_obj)\r\n b[...] = a_rec\r\n assert_equal(b, a_obj)\r\n # '=' object -> records\r\n b = np.empty_like(a_rec)\r\n b[...] = a_obj\r\n assert_equal(b, a_rec)\r\n\r\n def test_assign_obj_listoflists(self):\r\n # Ticket # 1870\r\n # The inner list should get assigned to the object elements\r\n a = np.zeros(4, dtype=object)\r\n b = a.copy()\r\n a[0] = [1]\r\n a[1] = [2]\r\n a[2] = [3]\r\n a[3] = [4]\r\n b[...] = [[1], [2], [3], [4]]\r\n assert_equal(a, b)\r\n # The first dimension should get broadcast\r\n a = np.zeros((2, 2), dtype=object)\r\n a[...] = [[1, 2]]\r\n assert_equal(a, [[1, 2], [1, 2]])\r\n\r\n def test_memoryleak(self):\r\n # Ticket #1917 - ensure that array data doesn't leak\r\n for i in range(1000):\r\n # 100MB times 1000 would give 100GB of memory usage if it leaks\r\n a = np.empty((100000000,), dtype='i1')\r\n del a\r\n\r\n def test_ufunc_reduce_memoryleak(self):\r\n a = np.arange(6)\r\n acnt = sys.getrefcount(a)\r\n np.add.reduce(a)\r\n assert_equal(sys.getrefcount(a), acnt)\r\n\r\n def test_search_sorted_invalid_arguments(self):\r\n # Ticket #2021, should not segfault.\r\n x = np.arange(0, 4, dtype='datetime64[D]')\r\n assert_raises(TypeError, x.searchsorted, 1)\r\n\r\n def test_string_truncation(self):\r\n # Ticket #1990 - Data can be truncated in creation of an array from a\r\n # mixed sequence of numeric values and strings\r\n for val in [True, 1234, 123.4, complex(1, 234)]:\r\n for tostr in [asunicode, asbytes]:\r\n b = np.array([val, tostr('xx')])\r\n assert_equal(tostr(b[0]), tostr(val))\r\n b = np.array([tostr('xx'), val])\r\n assert_equal(tostr(b[1]), tostr(val))\r\n\r\n # test also with longer strings\r\n b = np.array([val, tostr('xxxxxxxxxx')])\r\n assert_equal(tostr(b[0]), tostr(val))\r\n b = np.array([tostr('xxxxxxxxxx'), val])\r\n assert_equal(tostr(b[1]), tostr(val))\r\n\r\n def test_string_truncation_ucs2(self):\r\n # Ticket #2081. Python compiled with two byte unicode\r\n # can lead to truncation if itemsize is not properly\r\n # adjusted for Numpy's four byte unicode.\r\n if sys.version_info[0] >= 3:\r\n a = np.array(['abcd'])\r\n else:\r\n a = np.array([sixu('abcd')])\r\n assert_equal(a.dtype.itemsize, 16)\r\n\r\n def test_unique_stable(self):\r\n # Ticket #2063 must always choose stable sort for argsort to\r\n # get consistent results\r\n v = np.array(([0]*5 + [1]*6 + [2]*6)*4)\r\n res = np.unique(v, return_index=True)\r\n tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))\r\n assert_equal(res, tgt)\r\n\r\n def test_unicode_alloc_dealloc_match(self):\r\n # Ticket #1578, the mismatch only showed up when running\r\n # python-debug for python versions >= 2.7, and then as\r\n # a core dump and error message.\r\n a = np.array(['abc'], dtype=np.unicode)[0]\r\n del a\r\n\r\n def test_refcount_error_in_clip(self):\r\n # Ticket #1588\r\n a = np.zeros((2,), dtype='>i2').clip(min=0)\r\n x = a + a\r\n # This used to segfault:\r\n y = str(x)\r\n # Check the final string:\r\n assert_(y == \"[0 0]\")\r\n\r\n def test_searchsorted_wrong_dtype(self):\r\n # Ticket #2189, it used to segfault, so we check that it raises the\r\n # proper exception.\r\n a = np.array([('a', 1)], dtype='S1, int')\r\n assert_raises(TypeError, np.searchsorted, a, 1.2)\r\n # Ticket #2066, similar problem:\r\n dtype = np.format_parser(['i4', 'i4'], [], [])\r\n a = np.recarray((2, ), dtype)\r\n assert_raises(TypeError, np.searchsorted, a, 1)\r\n\r\n def test_complex64_alignment(self):\r\n # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment\r\n dtt = np.complex64\r\n arr = np.arange(10, dtype=dtt)\r\n # 2D array\r\n arr2 = np.reshape(arr, (2, 5))\r\n # Fortran write followed by (C or F) read caused bus error\r\n data_str = arr2.tobytes('F')\r\n data_back = np.ndarray(arr2.shape,\r\n arr2.dtype,\r\n buffer=data_str,\r\n order='F')\r\n assert_array_equal(arr2, data_back)\r\n\r\n def test_structured_count_nonzero(self):\r\n arr = np.array([0, 1]).astype('i4, (2)i4')[:1]\r\n count = np.count_nonzero(arr)\r\n assert_equal(count, 0)\r\n\r\n def test_copymodule_preserves_f_contiguity(self):\r\n a = np.empty((2, 2), order='F')\r\n b = copy.copy(a)\r\n c = copy.deepcopy(a)\r\n assert_(b.flags.fortran)\r\n assert_(b.flags.f_contiguous)\r\n assert_(c.flags.fortran)\r\n assert_(c.flags.f_contiguous)\r\n\r\n def test_fortran_order_buffer(self):\r\n import numpy as np\r\n a = np.array([['Hello', 'Foob']], dtype='U5', order='F')\r\n arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)\r\n arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],\r\n [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])\r\n assert_array_equal(arr, arr2)\r\n\r\n def test_assign_from_sequence_error(self):\r\n # Ticket #4024.\r\n arr = np.array([1, 2, 3])\r\n assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])\r\n arr.__setitem__(slice(None), [9])\r\n assert_equal(arr, [9, 9, 9])\r\n\r\n def test_format_on_flex_array_element(self):\r\n # Ticket #4369.\r\n dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])\r\n arr = np.array([('2000-01-01', 1)], dt)\r\n formatted = '{0}'.format(arr[0])\r\n assert_equal(formatted, str(arr[0]))\r\n\r\n def test_deepcopy_on_0d_array(self):\r\n # Ticket #3311.\r\n arr = np.array(3)\r\n arr_cp = copy.deepcopy(arr)\r\n\r\n assert_equal(arr, arr_cp)\r\n assert_equal(arr.shape, arr_cp.shape)\r\n assert_equal(int(arr), int(arr_cp))\r\n self.assertTrue(arr is not arr_cp)\r\n self.assertTrue(isinstance(arr_cp, type(arr)))\r\n\r\n def test_bool_subscript_crash(self):\r\n # gh-4494\r\n c = np.rec.array([(1, 2, 3), (4, 5, 6)])\r\n masked = c[np.array([True, False])]\r\n base = masked.base\r\n del masked, c\r\n base.dtype\r\n\r\n def test_richcompare_crash(self):\r\n # gh-4613\r\n import operator as op\r\n\r\n # dummy class where __array__ throws exception\r\n class Foo(object):\r\n __array_priority__ = 1002\r\n\r\n def __array__(self,*args,**kwargs):\r\n raise Exception()\r\n\r\n rhs = Foo()\r\n lhs = np.array(1)\r\n for f in [op.lt, op.le, op.gt, op.ge]:\r\n if sys.version_info[0] >= 3:\r\n assert_raises(TypeError, f, lhs, rhs)\r\n else:\r\n f(lhs, rhs)\r\n assert_(not op.eq(lhs, rhs))\r\n assert_(op.ne(lhs, rhs))\r\n\r\n def test_richcompare_scalar_and_subclass(self):\r\n # gh-4709\r\n class Foo(np.ndarray):\r\n def __eq__(self, other):\r\n return \"OK\"\r\n\r\n x = np.array([1,2,3]).view(Foo)\r\n assert_equal(10 == x, \"OK\")\r\n assert_equal(np.int32(10) == x, \"OK\")\r\n assert_equal(np.array([10]) == x, \"OK\")\r\n\r\n def test_pickle_empty_string(self):\r\n # gh-3926\r\n\r\n import pickle\r\n test_string = np.string_('')\r\n assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)\r\n\r\n def test_frompyfunc_many_args(self):\r\n # gh-5672\r\n\r\n def passer(*args):\r\n pass\r\n\r\n assert_raises(ValueError, np.frompyfunc, passer, 32, 1)\r\n\r\n def test_repeat_broadcasting(self):\r\n # gh-5743\r\n a = np.arange(60).reshape(3, 4, 5)\r\n for axis in chain(range(-a.ndim, a.ndim), [None]):\r\n assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))\r\n\r\n def test_frompyfunc_nout_0(self):\r\n # gh-2014\r\n\r\n def f(x):\r\n x[0], x[-1] = x[-1], x[0]\r\n\r\n uf = np.frompyfunc(f, 1, 0)\r\n a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])\r\n assert_equal(uf(a), ())\r\n assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])\r\n\r\n def test_leak_in_structured_dtype_comparison(self):\r\n # gh-6250\r\n recordtype = np.dtype([('a', np.float64),\r\n ('b', np.int32),\r\n ('d', (np.str, 5))])\r\n\r\n # Simple case\r\n a = np.zeros(2, dtype=recordtype)\r\n for i in range(100):\r\n a == a\r\n assert_(sys.getrefcount(a) < 10)\r\n\r\n # The case in the bug report.\r\n before = sys.getrefcount(a)\r\n u, v = a[0], a[1]\r\n u == v\r\n del u, v\r\n gc.collect()\r\n after = sys.getrefcount(a)\r\n assert_equal(before, after)\r\n\r\n def test_empty_percentile(self):\r\n # gh-6530 / gh-6553\r\n assert_array_equal(np.percentile(np.arange(10), []), np.array([]))\r\n\r\n def test_void_compare_segfault(self):\r\n # gh-6922. The following should not segfault\r\n a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])\r\n a.sort()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\" Test functions for linalg.decomp module\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n__usage__ = \"\"\"\r\nBuild linalg:\r\n python setup_linalg.py build\r\nRun tests if scipy is installed:\r\n python -c 'import scipy;scipy.linalg.test()'\r\nRun tests if linalg is not installed:\r\n python tests/test_decomp.py\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom numpy.testing import (TestCase, assert_equal, assert_almost_equal,\r\n assert_array_almost_equal, assert_array_equal,\r\n assert_raises, assert_, assert_allclose,\r\n run_module_suite, dec)\r\n\r\nfrom scipy._lib.six import xrange\r\n\r\nfrom scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,\r\n schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,\r\n eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz)\r\nfrom scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \\\r\n dsbev, dsbevd, dsbevx, zhbevd, zhbevx\r\nfrom scipy.linalg.misc import norm\r\n\r\nfrom numpy import array, transpose, sometrue, diag, ones, linalg, \\\r\n argsort, zeros, arange, float32, complex64, dot, conj, identity, \\\r\n ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \\\r\n asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\\\r\n triu, tril\r\n\r\nfrom numpy.random import normal, seed, random\r\n\r\nfrom scipy.linalg._testutils import assert_no_overwrite\r\n\r\n# digit precision to use in asserts for different types\r\nDIGITS = {'d':11, 'D':11, 'f':4, 'F':4}\r\n\r\n# XXX: This function should be available through numpy.testing\r\n\r\n\r\ndef assert_dtype_equal(act, des):\r\n if isinstance(act, ndarray):\r\n act = act.dtype\r\n else:\r\n act = dtype(act)\r\n\r\n if isinstance(des, ndarray):\r\n des = des.dtype\r\n else:\r\n des = dtype(des)\r\n\r\n assert_(act == des, 'dtype mismatch: \"%s\" (should be \"%s\") ' % (act, des))\r\n\r\n# XXX: This function should not be defined here, but somewhere in\r\n# scipy.linalg namespace\r\n\r\n\r\ndef symrand(dim_or_eigv):\r\n \"\"\"Return a random symmetric (Hermitian) matrix.\r\n\r\n If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues\r\n uniformly distributed on (-1,1).\r\n\r\n If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose\r\n eigenvalues are 'a'.\r\n \"\"\"\r\n if isinstance(dim_or_eigv, int):\r\n dim = dim_or_eigv\r\n d = random(dim)*2 - 1\r\n elif (isinstance(dim_or_eigv, ndarray) and\r\n len(dim_or_eigv.shape) == 1):\r\n dim = dim_or_eigv.shape[0]\r\n d = dim_or_eigv\r\n else:\r\n raise TypeError(\"input type not supported.\")\r\n\r\n v = random_rot(dim)\r\n h = dot(dot(v.T.conj(), diag(d)), v)\r\n # to avoid roundoff errors, symmetrize the matrix (again)\r\n h = 0.5*(h.T+h)\r\n return h\r\n\r\n# XXX: This function should not be defined here, but somewhere in\r\n# scipy.linalg namespace\r\n\r\n\r\ndef random_rot(dim):\r\n \"\"\"Return a random rotation matrix, drawn from the Haar distribution\r\n (the only uniform distribution on SO(n)).\r\n The algorithm is described in the paper\r\n Stewart, G.W., 'The efficient generation of random orthogonal\r\n matrices with an application to condition estimators', SIAM Journal\r\n on Numerical Analysis, 17(3), pp. 403-409, 1980.\r\n For more information see\r\n http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization\"\"\"\r\n H = eye(dim)\r\n D = ones((dim,))\r\n for n in range(1, dim):\r\n x = normal(size=(dim-n+1,))\r\n D[n-1] = sign(x[0])\r\n x[0] -= D[n-1]*sqrt((x*x).sum())\r\n # Householder transformation\r\n\r\n Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()\r\n mat = eye(dim)\r\n mat[n-1:,n-1:] = Hx\r\n H = dot(H, mat)\r\n # Fix the last sign such that the determinant is 1\r\n D[-1] = -D.prod()\r\n H = (D*H.T).T\r\n return H\r\n\r\n\r\nclass TestEigVals(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w = eigvals(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_simple_tr(self):\r\n a = array([[1,2,3],[1,2,3],[2,5,6]],'d')\r\n a = transpose(a).copy()\r\n a = transpose(a)\r\n w = eigvals(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\r\n w = eigvals(a)\r\n exact_w = [(9+1j+sqrt(92+6j))/2,\r\n 0,\r\n (9+1j-sqrt(92+6j))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w = eigvals(a, check_finite=False)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n\r\nclass TestEig(object):\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w,v = eig(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n v0 = array([1,1,(1+sqrt(93)/3)/2])\r\n v1 = array([3.,0,-1])\r\n v2 = array([1,1,(1-sqrt(93)/3)/2])\r\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\r\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\r\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\r\n assert_array_almost_equal(w,exact_w)\r\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\r\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\r\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\r\n w,v = eig(a,left=1,right=0)\r\n for i in range(3):\r\n assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])\r\n\r\n def test_simple_complex_eig(self):\r\n a = [[1,2],[-2,1]]\r\n w,vl,vr = eig(a,left=1,right=1)\r\n assert_array_almost_equal(w, array([1+2j, 1-2j]))\r\n for i in range(2):\r\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\r\n for i in range(2):\r\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\r\n conjugate(w[i])*vl[:,i])\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\r\n w,vl,vr = eig(a,left=1,right=1)\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\r\n for i in range(3):\r\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\r\n conjugate(w[i])*vl[:,i])\r\n\r\n def _check_gen_eig(self, A, B):\r\n A, B = asarray(A), asarray(B)\r\n msg = \"\\n%r\\n%r\" % (A, B)\r\n w, vr = eig(A,B)\r\n wt = eigvals(A,B)\r\n val1 = dot(A, vr)\r\n val2 = dot(B, vr) * w\r\n res = val1 - val2\r\n for i in range(res.shape[1]):\r\n if all(isfinite(res[:, i])):\r\n assert_array_almost_equal(res[:, i], 0, err_msg=msg)\r\n\r\n assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),\r\n err_msg=msg)\r\n\r\n length = np.empty(len(vr))\r\n for i in xrange(len(vr)):\r\n length[i] = norm(vr[:, i])\r\n assert_array_almost_equal(length, np.ones(length.size), err_msg=msg)\r\n\r\n @dec.knownfailureif(True, \"See gh-2254.\")\r\n def test_singular(self):\r\n # Example taken from\r\n # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html\r\n A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],\r\n [27,31,26,21,15], [38,44,44,24,30]))\r\n B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],\r\n [16,25,27,14,23], [24,35,18,21,22]))\r\n\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_falker(self):\r\n \"\"\"Test matrices giving some Nan generalized eigen values.\"\"\"\r\n M = diag(array(([1,0,3])))\r\n K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))\r\n D = array(([1,-1,0],[-1,1,0],[0,0,0]))\r\n Z = zeros((3,3))\r\n I = identity(3)\r\n A = bmat([[I,Z],[Z,-K]])\r\n B = bmat([[Z,I],[M,D]])\r\n\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_bad_geneig(self):\r\n # Ticket #709 (strange return values from DGGEV)\r\n\r\n def matrices(omega):\r\n c1 = -9 + omega**2\r\n c2 = 2*omega\r\n A = [[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, c1, 0],\r\n [0, 0, 0, c1]]\r\n B = [[0, 0, 1, 0],\r\n [0, 0, 0, 1],\r\n [1, 0, 0, -c2],\r\n [0, 1, c2, 0]]\r\n return A, B\r\n\r\n # With a buggy LAPACK, this can fail for different omega on different\r\n # machines -- so we need to test several values\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n for k in xrange(100):\r\n A, B = matrices(omega=k*5./100)\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w,v = eig(a, check_finite=False)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n v0 = array([1,1,(1+sqrt(93)/3)/2])\r\n v1 = array([3.,0,-1])\r\n v2 = array([1,1,(1-sqrt(93)/3)/2])\r\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\r\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\r\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\r\n assert_array_almost_equal(w,exact_w)\r\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\r\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\r\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\r\n\r\n def test_not_square_error(self):\r\n \"\"\"Check that passing a non-square array raises a ValueError.\"\"\"\r\n A = np.arange(6).reshape(3,2)\r\n assert_raises(ValueError, eig, A)\r\n\r\n def test_shape_mismatch(self):\r\n \"\"\"Check that passing arrays of with different shapes raises a ValueError.\"\"\"\r\n A = identity(2)\r\n B = np.arange(9.0).reshape(3,3)\r\n assert_raises(ValueError, eig, A, B)\r\n assert_raises(ValueError, eig, B, A)\r\n\r\n\r\nclass TestEigBanded(TestCase):\r\n\r\n def __init__(self, *args):\r\n TestCase.__init__(self, *args)\r\n\r\n self.create_bandmat()\r\n\r\n def create_bandmat(self):\r\n \"\"\"Create the full matrix `self.fullmat` and\r\n the corresponding band matrix `self.bandmat`.\"\"\"\r\n N = 10\r\n self.KL = 2 # number of subdiagonals (below the diagonal)\r\n self.KU = 2 # number of superdiagonals (above the diagonal)\r\n\r\n # symmetric band matrix\r\n self.sym_mat = (diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)\r\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # hermitian band matrix\r\n self.herm_mat = (diag(-1.0*ones(N))\r\n + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)\r\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # general real band matrix\r\n self.real_mat = (diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)\r\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # general complex band matrix\r\n self.comp_mat = (1j*diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)\r\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # Eigenvalues and -vectors from linalg.eig\r\n ew, ev = linalg.eig(self.sym_mat)\r\n ew = ew.real\r\n args = argsort(ew)\r\n self.w_sym_lin = ew[args]\r\n self.evec_sym_lin = ev[:,args]\r\n\r\n ew, ev = linalg.eig(self.herm_mat)\r\n ew = ew.real\r\n args = argsort(ew)\r\n self.w_herm_lin = ew[args]\r\n self.evec_herm_lin = ev[:,args]\r\n\r\n # Extract upper bands from symmetric and hermitian band matrices\r\n # (for use in dsbevd, dsbevx, zhbevd, zhbevx\r\n # and their single precision versions)\r\n LDAB = self.KU + 1\r\n self.bandmat_sym = zeros((LDAB, N), dtype=float)\r\n self.bandmat_herm = zeros((LDAB, N), dtype=complex)\r\n for i in xrange(LDAB):\r\n self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)\r\n self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)\r\n\r\n # Extract bands from general real and complex band matrix\r\n # (for use in dgbtrf, dgbtrs and their single precision versions)\r\n LDAB = 2*self.KL + self.KU + 1\r\n self.bandmat_real = zeros((LDAB, N), dtype=float)\r\n self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal\r\n for i in xrange(self.KL):\r\n # superdiagonals\r\n self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)\r\n # subdiagonals\r\n self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)\r\n\r\n self.bandmat_comp = zeros((LDAB, N), dtype=complex)\r\n self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal\r\n for i in xrange(self.KL):\r\n # superdiagonals\r\n self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)\r\n # subdiagonals\r\n self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)\r\n\r\n # absolute value for linear equation system A*x = b\r\n self.b = 1.0*arange(N)\r\n self.bc = self.b * (1 + 1j)\r\n\r\n #####################################################################\r\n\r\n def test_dsbev(self):\r\n \"\"\"Compare dsbev eigenvalues and eigenvectors with\r\n the result of linalg.eig.\"\"\"\r\n w, evec, info = dsbev(self.bandmat_sym, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_dsbevd(self):\r\n \"\"\"Compare dsbevd eigenvalues and eigenvectors with\r\n the result of linalg.eig.\"\"\"\r\n w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_dsbevx(self):\r\n \"\"\"Compare dsbevx eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n N,N = shape(self.sym_mat)\r\n ## Achtung: Argumente 0.0,0.0,range?\r\n w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,\r\n compute_v=1, range=2)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_zhbevd(self):\r\n \"\"\"Compare zhbevd eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\r\n\r\n def test_zhbevx(self):\r\n \"\"\"Compare zhbevx eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n N,N = shape(self.herm_mat)\r\n ## Achtung: Argumente 0.0,0.0,range?\r\n w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,\r\n compute_v=1, range=2)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\r\n\r\n def test_eigvals_banded(self):\r\n \"\"\"Compare eigenvalues of eigvals_banded with those of linalg.eig.\"\"\"\r\n w_sym = eigvals_banded(self.bandmat_sym)\r\n w_sym = w_sym.real\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n\r\n w_herm = eigvals_banded(self.bandmat_herm)\r\n w_herm = w_herm.real\r\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\r\n\r\n # extracting eigenvalues with respect to an index range\r\n ind1 = 2\r\n ind2 = 6\r\n w_sym_ind = eigvals_banded(self.bandmat_sym,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_sym_ind),\r\n self.w_sym_lin[ind1:ind2+1])\r\n w_herm_ind = eigvals_banded(self.bandmat_herm,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_herm_ind),\r\n self.w_herm_lin[ind1:ind2+1])\r\n\r\n # extracting eigenvalues with respect to a value range\r\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\r\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\r\n w_sym_val = eigvals_banded(self.bandmat_sym,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_sym_val),\r\n self.w_sym_lin[ind1:ind2+1])\r\n\r\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\r\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\r\n w_herm_val = eigvals_banded(self.bandmat_herm,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_herm_val),\r\n self.w_herm_lin[ind1:ind2+1])\r\n\r\n w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)\r\n w_sym = w_sym.real\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n\r\n def test_eig_banded(self):\r\n \"\"\"Compare eigenvalues and eigenvectors of eig_banded\r\n with those of linalg.eig. \"\"\"\r\n w_sym, evec_sym = eig_banded(self.bandmat_sym)\r\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\r\n\r\n w_herm, evec_herm = eig_banded(self.bandmat_herm)\r\n evec_herm_ = evec_herm[:,argsort(w_herm.real)]\r\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))\r\n\r\n # extracting eigenvalues with respect to an index range\r\n ind1 = 2\r\n ind2 = 6\r\n w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_sym_ind),\r\n self.w_sym_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_sym_ind),\r\n abs(self.evec_sym_lin[:,ind1:ind2+1]))\r\n\r\n w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_herm_ind),\r\n self.w_herm_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_herm_ind),\r\n abs(self.evec_herm_lin[:,ind1:ind2+1]))\r\n\r\n # extracting eigenvalues with respect to a value range\r\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\r\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\r\n w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_sym_val),\r\n self.w_sym_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_sym_val),\r\n abs(self.evec_sym_lin[:,ind1:ind2+1]))\r\n\r\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\r\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\r\n w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_herm_val),\r\n self.w_herm_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_herm_val),\r\n abs(self.evec_herm_lin[:,ind1:ind2+1]))\r\n\r\n w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)\r\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\r\n\r\n def test_dgbtrf(self):\r\n \"\"\"Compare dgbtrf LU factorisation with the LU factorisation result\r\n of linalg.lu.\"\"\"\r\n M,N = shape(self.real_mat)\r\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\r\n\r\n # extract matrix u from lu_symm_band\r\n u = diag(lu_symm_band[2*self.KL,:])\r\n for i in xrange(self.KL + self.KU):\r\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\r\n\r\n p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)\r\n assert_array_almost_equal(u, u_lin)\r\n\r\n def test_zgbtrf(self):\r\n \"\"\"Compare zgbtrf LU factorisation with the LU factorisation result\r\n of linalg.lu.\"\"\"\r\n M,N = shape(self.comp_mat)\r\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\r\n\r\n # extract matrix u from lu_symm_band\r\n u = diag(lu_symm_band[2*self.KL,:])\r\n for i in xrange(self.KL + self.KU):\r\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\r\n\r\n p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)\r\n assert_array_almost_equal(u, u_lin)\r\n\r\n def test_dgbtrs(self):\r\n \"\"\"Compare dgbtrs solutions for linear equation system A*x = b\r\n with solutions of linalg.solve.\"\"\"\r\n\r\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\r\n y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)\r\n\r\n y_lin = linalg.solve(self.real_mat, self.b)\r\n assert_array_almost_equal(y, y_lin)\r\n\r\n def test_zgbtrs(self):\r\n \"\"\"Compare zgbtrs solutions for linear equation system A*x = b\r\n with solutions of linalg.solve.\"\"\"\r\n\r\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\r\n y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)\r\n\r\n y_lin = linalg.solve(self.comp_mat, self.bc)\r\n assert_array_almost_equal(y, y_lin)\r\n\r\n\r\ndef test_eigh():\r\n DIM = 6\r\n v = {'dim': (DIM,),\r\n 'dtype': ('f','d','F','D'),\r\n 'overwrite': (True, False),\r\n 'lower': (True, False),\r\n 'turbo': (True, False),\r\n 'eigvals': (None, (2, DIM-2))}\r\n\r\n for dim in v['dim']:\r\n for typ in v['dtype']:\r\n for overwrite in v['overwrite']:\r\n for turbo in v['turbo']:\r\n for eigenvalues in v['eigvals']:\r\n for lower in v['lower']:\r\n yield (eigenhproblem_standard,\r\n 'ordinary',\r\n dim, typ, overwrite, lower,\r\n turbo, eigenvalues)\r\n yield (eigenhproblem_general,\r\n 'general ',\r\n dim, typ, overwrite, lower,\r\n turbo, eigenvalues)\r\n\r\n\r\ndef test_eigh_of_sparse():\r\n # This tests the rejection of inputs that eigh cannot currently handle.\r\n import scipy.sparse\r\n a = scipy.sparse.identity(2).tocsc()\r\n b = np.atleast_2d(a)\r\n assert_raises(ValueError, eigh, a)\r\n assert_raises(ValueError, eigh, b)\r\n\r\n\r\ndef _complex_symrand(dim, dtype):\r\n a1, a2 = symrand(dim), symrand(dim)\r\n # add antisymmetric matrix as imag part\r\n a = a1 + 1j*(triu(a2)-tril(a2))\r\n return a.astype(dtype)\r\n\r\n\r\ndef eigenhproblem_standard(desc, dim, dtype,\r\n overwrite, lower, turbo,\r\n eigenvalues):\r\n \"\"\"Solve a standard eigenvalue problem.\"\"\"\r\n if iscomplex(empty(1, dtype=dtype)):\r\n a = _complex_symrand(dim, dtype)\r\n else:\r\n a = symrand(dim).astype(dtype)\r\n\r\n if overwrite:\r\n a_c = a.copy()\r\n else:\r\n a_c = a\r\n w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)\r\n assert_dtype_equal(z.dtype, dtype)\r\n w = w.astype(dtype)\r\n diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real\r\n assert_array_almost_equal(diag_, w, DIGITS[dtype])\r\n\r\n\r\ndef eigenhproblem_general(desc, dim, dtype,\r\n overwrite, lower, turbo,\r\n eigenvalues):\r\n \"\"\"Solve a generalized eigenvalue problem.\"\"\"\r\n if iscomplex(empty(1, dtype=dtype)):\r\n a = _complex_symrand(dim, dtype)\r\n b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)\r\n else:\r\n a = symrand(dim).astype(dtype)\r\n b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)\r\n\r\n if overwrite:\r\n a_c, b_c = a.copy(), b.copy()\r\n else:\r\n a_c, b_c = a, b\r\n\r\n w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,\r\n overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)\r\n assert_dtype_equal(z.dtype, dtype)\r\n w = w.astype(dtype)\r\n diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real\r\n assert_array_almost_equal(diag1_, w, DIGITS[dtype])\r\n diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real\r\n assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])\r\n\r\n\r\ndef test_eigh_integer():\r\n a = array([[1,2],[2,7]])\r\n b = array([[3,1],[1,5]])\r\n w,z = eigh(a)\r\n w,z = eigh(a,b)\r\n\r\n\r\nclass TestLU(TestCase):\r\n\r\n def __init__(self, *args, **kw):\r\n TestCase.__init__(self, *args, **kw)\r\n\r\n self.a = array([[1,2,3],[1,2,3],[2,5,6]])\r\n self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])\r\n # Those matrices are more robust to detect problems in permutation\r\n # matrices than the ones above\r\n self.b = array([[1,2,3],[4,5,6],[7,8,9]])\r\n self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])\r\n\r\n # Reectangular matrices\r\n self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\r\n self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\r\n\r\n self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\r\n self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\r\n\r\n # Medium sizes matrices\r\n self.med = random((30, 40))\r\n self.cmed = random((30, 40)) + 1.j * random((30, 40))\r\n\r\n def _test_common(self, data):\r\n p,l,u = lu(data)\r\n assert_array_almost_equal(dot(dot(p,l),u),data)\r\n pl,u = lu(data,permute_l=1)\r\n assert_array_almost_equal(dot(pl,u),data)\r\n\r\n # Simple tests\r\n def test_simple(self):\r\n self._test_common(self.a)\r\n\r\n def test_simple_complex(self):\r\n self._test_common(self.ca)\r\n\r\n def test_simple2(self):\r\n self._test_common(self.b)\r\n\r\n def test_simple2_complex(self):\r\n self._test_common(self.cb)\r\n\r\n # rectangular matrices tests\r\n def test_hrectangular(self):\r\n self._test_common(self.hrect)\r\n\r\n def test_vrectangular(self):\r\n self._test_common(self.vrect)\r\n\r\n def test_hrectangular_complex(self):\r\n self._test_common(self.chrect)\r\n\r\n def test_vrectangular_complex(self):\r\n self._test_common(self.cvrect)\r\n\r\n # Bigger matrices\r\n def test_medium1(self):\r\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\r\n self._test_common(self.med)\r\n\r\n def test_medium1_complex(self):\r\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\r\n self._test_common(self.cmed)\r\n\r\n def test_check_finite(self):\r\n p, l, u = lu(self.a, check_finite=False)\r\n assert_array_almost_equal(dot(dot(p,l),u), self.a)\r\n\r\n def test_simple_known(self):\r\n # Ticket #1458\r\n for order in ['C', 'F']:\r\n A = np.array([[2, 1],[0, 1.]], order=order)\r\n LU, P = lu_factor(A)\r\n assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))\r\n assert_array_equal(P, np.array([0, 1]))\r\n\r\n\r\nclass TestLUSingle(TestLU):\r\n \"\"\"LU testers for single precision, real and double\"\"\"\r\n def __init__(self, *args, **kw):\r\n TestLU.__init__(self, *args, **kw)\r\n\r\n self.a = self.a.astype(float32)\r\n self.ca = self.ca.astype(complex64)\r\n self.b = self.b.astype(float32)\r\n self.cb = self.cb.astype(complex64)\r\n\r\n self.hrect = self.hrect.astype(float32)\r\n self.chrect = self.hrect.astype(complex64)\r\n\r\n self.vrect = self.vrect.astype(float32)\r\n self.cvrect = self.vrect.astype(complex64)\r\n\r\n self.med = self.vrect.astype(float32)\r\n self.cmed = self.vrect.astype(complex64)\r\n\r\n\r\nclass TestLUSolve(TestCase):\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_lu(self):\r\n a0 = random((10,10))\r\n b = random((10,))\r\n\r\n for order in ['C', 'F']:\r\n a = np.array(a0, order=order)\r\n\r\n x1 = solve(a,b)\r\n\r\n lu_a = lu_factor(a)\r\n x2 = lu_solve(lu_a,b)\r\n\r\n assert_array_almost_equal(x1,x2)\r\n\r\n def test_check_finite(self):\r\n a = random((10,10))\r\n b = random((10,))\r\n x1 = solve(a,b)\r\n\r\n lu_a = lu_factor(a, check_finite=False)\r\n x2 = lu_solve(lu_a,b, check_finite=False)\r\n\r\n assert_array_almost_equal(x1,x2)\r\n\r\n\r\nclass TestSVD_GESDD(TestCase):\r\n def setUp(self):\r\n self.lapack_driver = 'gesdd'\r\n seed(1234)\r\n\r\n def test_degenerate(self):\r\n assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)\r\n assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,20,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_singular(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_underdet(self):\r\n a = [[1,2,3],[4,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_overdet(self):\r\n a = [[1,2],[4,5],[3,4]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(2))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_random(self):\r\n n = 20\r\n m = 15\r\n for i in range(3):\r\n for a in [random([n,m]),random([m,n])]:\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))\r\n assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2j,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\r\n assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n m = 15\r\n for i in range(3):\r\n for full_matrices in (True, False):\r\n for a in [random([n,m]),random([m,n])]:\r\n a = a + 1j*random(list(a.shape))\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\r\n # This fails when [m,n]\r\n # assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_crash_1580(self):\r\n sizes = [(13, 23), (30, 50), (60, 100)]\r\n np.random.seed(1234)\r\n for sz in sizes:\r\n for dt in [np.float32, np.float64, np.complex64, np.complex128]:\r\n a = np.random.rand(*sz).astype(dt)\r\n # should not crash\r\n svd(a, lapack_driver=self.lapack_driver)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,20,3],[2,5,6]]\r\n u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_gh_5039(self):\r\n # This is a smoke test for https://github.com/scipy/scipy/issues/5039\r\n #\r\n # The following is reported to raise \"ValueError: On entry to DGESDD\r\n # parameter number 12 had an illegal value\".\r\n # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`\r\n # This is reported to only show up on LAPACK 3.0.3.\r\n #\r\n # The matrix below is taken from the call to\r\n # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest\r\n b = np.array(\r\n [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],\r\n [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],\r\n [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],\r\n [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])\r\n svd(b, lapack_driver=self.lapack_driver)\r\n\r\n\r\nclass TestSVD_GESVD(TestSVD_GESDD):\r\n def setUp(self):\r\n self.lapack_driver = 'gesvd'\r\n seed(1234)\r\n\r\n\r\nclass TestSVDVals(TestCase):\r\n\r\n def test_empty(self):\r\n for a in [[]], np.empty((2, 0)), np.ones((0, 3)):\r\n s = svdvals(a)\r\n assert_equal(s, np.empty(0))\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n def test_simple_underdet(self):\r\n a = [[1,2,3],[4,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_overdet(self):\r\n a = [[1,2],[4,5],[3,4]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,20,3j],[2,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n def test_simple_underdet_complex(self):\r\n a = [[1,2,3],[4,5j,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_overdet_complex(self):\r\n a = [[1,2],[4,5],[3j,4]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n s = svdvals(a, check_finite=False)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n @dec.slow\r\n def test_crash_2609(self):\r\n np.random.seed(1234)\r\n a = np.random.rand(1500, 2800)\r\n # Shouldn't crash:\r\n svdvals(a)\r\n\r\n\r\nclass TestDiagSVD(TestCase):\r\n\r\n def test_simple(self):\r\n assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])\r\n\r\n\r\nclass TestQR(TestCase):\r\n\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_simple(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_left(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n c = [1, 2, 3]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r2 = qr_multiply(a, identity(3), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_right(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n c = [1, 2, 3]\r\n qc,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_pivoting(self):\r\n a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_left_pivoting(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n\r\n def test_simple_right_pivoting(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3]\r\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n\r\n def test_simple_trap(self):\r\n a = [[8,2,3],[2,9,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_trap_pivoting(self):\r\n a = np.asarray([[8,2,3],[2,9,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall(self):\r\n # full version\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_tall_pivoting(self):\r\n # full version pivoting\r\n a = np.asarray([[8,2],[2,9],[5,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall_e(self):\r\n # economy version\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (3,2))\r\n assert_equal(r.shape, (2,2))\r\n\r\n def test_simple_tall_e_pivoting(self):\r\n # economy version pivoting\r\n a = np.asarray([[8,2],[2,9],[5,3]])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall_left(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n c = array([1,2,0])\r\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\r\n assert_array_almost_equal(dot(q, c[:2]), qc)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_tall_left_pivoting(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\r\n c = [1, 2]\r\n qc,r,kpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_equal(jpvt, kpvt)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_tall_right(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2, 3]\r\n cq,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n assert_array_almost_equal(r, r2)\r\n cq,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_tall_right_pivoting(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\r\n c = [1, 2, 3]\r\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_fat(self):\r\n # full version\r\n a = [[8,2,5],[2,9,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n\r\n def test_simple_fat_pivoting(self):\r\n # full version pivoting\r\n a = np.asarray([[8,2,5],[2,9,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_fat_e(self):\r\n # economy version\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n\r\n def test_simple_fat_e_pivoting(self):\r\n # economy version pivoting\r\n a = np.asarray([[8,2,3],[2,9,5]])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_fat_left(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_fat_left_pivoting(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\r\n c = [1, 2]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_fat_right(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n cq,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n assert_array_almost_equal(r, r2)\r\n cq,r = qr_multiply(a, identity(2))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_fat_right_pivoting(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\r\n c = [1, 2]\r\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_complex(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_complex_left(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(3), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_complex_right(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n qc,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_tall_complex_left(self):\r\n a = [[8,2+3j],[2,9],[5+7j,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2+2j]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n c = array([1,2,0])\r\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\r\n assert_array_almost_equal(dot(q, c[:2]), qc)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_complex_left_conjugate(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\r\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\r\n\r\n def test_simple_complex_tall_left_conjugate(self):\r\n a = [[3,3+4j],[5,2+2j],[3,2]]\r\n q,r = qr(a, mode='economic')\r\n c = [1, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\r\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\r\n\r\n def test_simple_complex_right_conjugate(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, conjugate=True)\r\n assert_array_almost_equal(dot(c, q.conjugate()), qc)\r\n\r\n def test_simple_complex_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_complex_left_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3+4j]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n\r\n def test_simple_complex_right_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3+4j]\r\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_left(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n c = random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_random_right(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n c = random([n])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(n))\r\n assert_array_almost_equal(q, cq)\r\n\r\n def test_random_pivoting(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_tall(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_tall_left(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode=\"economic\")\r\n c = random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_random_tall_right(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode=\"economic\")\r\n c = random([m])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(m))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_random_tall_pivoting(self):\r\n # full version pivoting\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_tall_e(self):\r\n # economy version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (m,n))\r\n assert_equal(r.shape, (n,n))\r\n\r\n def test_random_tall_e_pivoting(self):\r\n # economy version pivoting\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (m,n))\r\n assert_equal(r.shape, (n,n))\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_trap(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_trap_pivoting(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_complex_left(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n c = random([n])+1j*random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_random_complex_right(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n c = random([n])+1j*random([n])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(n))\r\n assert_array_almost_equal(q, cq)\r\n\r\n def test_random_complex_pivoting(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_check_finite(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a, check_finite=False)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_lwork(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n # Get comparison values\r\n q,r = qr(a, lwork=None)\r\n\r\n # Test against minimum valid lwork\r\n q2,r2 = qr(a, lwork=3)\r\n assert_array_almost_equal(q2,q)\r\n assert_array_almost_equal(r2,r)\r\n\r\n # Test against larger lwork\r\n q3,r3 = qr(a, lwork=10)\r\n assert_array_almost_equal(q3,q)\r\n assert_array_almost_equal(r3,r)\r\n\r\n # Test against explicit lwork=-1\r\n q4,r4 = qr(a, lwork=-1)\r\n assert_array_almost_equal(q4,q)\r\n assert_array_almost_equal(r4,r)\r\n\r\n # Test against invalid lwork\r\n assert_raises(Exception, qr, (a,), {'lwork':0})\r\n assert_raises(Exception, qr, (a,), {'lwork':2})\r\n\r\nclass TestRQ(TestCase):\r\n\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_simple(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_r(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a)\r\n r2 = rq(a, mode='r')\r\n assert_array_almost_equal(r, r2)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_trap(self):\r\n a = [[8,2,3],[2,9,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_tall(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_fat(self):\r\n a = [[8,2,5],[2,9,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_complex(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_tall(self):\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_trap(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_trap_economic(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a, mode='economic')\r\n assert_array_almost_equal(dot(q,transpose(q)),identity(m))\r\n assert_array_almost_equal(dot(r,q),a)\r\n assert_equal(q.shape, (m, n))\r\n assert_equal(r.shape, (m, m))\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_complex_economic(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])+1j*random([m,n])\r\n r,q = rq(a, mode='economic')\r\n assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))\r\n assert_array_almost_equal(dot(r,q),a)\r\n assert_equal(q.shape, (m, n))\r\n assert_equal(r.shape, (m, m))\r\n\r\n def test_check_finite(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a, check_finite=False)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n\r\ntransp = transpose\r\nany = sometrue\r\n\r\n\r\nclass TestSchur(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[8,12,3],[2,9,3],[10,3,6]]\r\n t,z = schur(a)\r\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\r\n tc,zc = schur(a,'complex')\r\n assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))\r\n assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)\r\n tc2,zc2 = rsf2csf(tc,zc)\r\n assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)\r\n\r\n def test_sort(self):\r\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\r\n s,u,sdim = schur(a,sort='lhp')\r\n assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],\r\n [-0.1134,-0.8245,0.5544,0.],\r\n [-0.8213,0.1308,0.0265,-0.5547],\r\n [-0.5475,0.0872,0.0177,0.8321]],\r\n u,3)\r\n assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],\r\n [0.,-0.5000,9.4472,-0.7184],\r\n [0.,0.,1.4142,-0.1456],\r\n [0.,0.,0.,0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='rhp')\r\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\r\n [-0.4862,0.4930,-0.1434,-0.7071],\r\n [0.6042,0.3944,-0.6924,0.],\r\n [0.4028,0.5986,0.6924,0.]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\r\n [0.,0.5,6.5809,-3.1870],\r\n [0.,0.,-1.4142,0.9270],\r\n [0.,0.,0.,-0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='iuc')\r\n assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],\r\n [-0.8321,0.,-0.3814,-0.4028],\r\n [0.,0.7071,-0.5134,0.4862],\r\n [0.,0.7071,0.5134,-0.4862]],\r\n u,3)\r\n assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],\r\n [0.,0.5000,-3.3191,-14.4130],\r\n [0.,0.,1.4142,2.1573],\r\n [0.,0.,0.,-1.4142]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='ouc')\r\n assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],\r\n [-0.4862,0.5134,0.7071,0.],\r\n [0.6042,0.5721,0.,-0.5547],\r\n [0.4028,0.3814,0.,0.8321]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],\r\n [0.,-1.4142,3.3191,6.5809],\r\n [0.,0.,-0.5000,0.],\r\n [0.,0.,0.,0.5000]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n rhp_function = lambda x: x >= 0.0\r\n s,u,sdim = schur(a,sort=rhp_function)\r\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\r\n [-0.4862,0.4930,-0.1434,-0.7071],\r\n [0.6042,0.3944,-0.6924,0.],\r\n [0.4028,0.5986,0.6924,0.]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\r\n [0.,0.5,6.5809,-3.1870],\r\n [0.,0.,-1.4142,0.9270],\r\n [0.,0.,0.,-0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n def test_sort_errors(self):\r\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\r\n assert_raises(ValueError, schur, a, sort='unsupported')\r\n assert_raises(ValueError, schur, a, sort=1)\r\n\r\n def test_check_finite(self):\r\n a = [[8,12,3],[2,9,3],[10,3,6]]\r\n t,z = schur(a, check_finite=False)\r\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\r\n\r\n\r\nclass TestHessenberg(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[-149, -50,-154],\r\n [537, 180, 546],\r\n [-27, -9, -25]]\r\n h1 = [[-149.0000,42.2037,-156.3165],\r\n [-537.6783,152.5511,-554.9272],\r\n [0,0.0728, 2.4489]]\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n assert_array_almost_equal(h,h1,decimal=4)\r\n\r\n def test_simple_complex(self):\r\n a = [[-149, -50,-154],\r\n [537, 180j, 546],\r\n [-27j, -9, -25]]\r\n h,q = hessenberg(a,calc_q=1)\r\n h1 = dot(transp(conj(q)),dot(a,q))\r\n assert_array_almost_equal(h1,h)\r\n\r\n def test_simple2(self):\r\n a = [[1,2,3,4,5,6,7],\r\n [0,2,3,4,6,7,2],\r\n [0,2,2,3,0,3,2],\r\n [0,0,2,8,0,0,2],\r\n [0,3,1,2,0,1,2],\r\n [0,1,2,3,0,1,0],\r\n [0,0,0,0,0,1,2]]\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n\r\n def test_simple3(self):\r\n a = np.eye(3)\r\n a[-1, 0] = 2\r\n h, q = hessenberg(a, calc_q=1)\r\n assert_array_almost_equal(dot(transp(q), dot(a, q)), h)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n h,q = hessenberg(a,calc_q=1)\r\n h1 = dot(transp(conj(q)),dot(a,q))\r\n assert_array_almost_equal(h1,h)\r\n\r\n def test_check_finite(self):\r\n a = [[-149, -50,-154],\r\n [537, 180, 546],\r\n [-27, -9, -25]]\r\n h1 = [[-149.0000,42.2037,-156.3165],\r\n [-537.6783,152.5511,-554.9272],\r\n [0,0.0728, 2.4489]]\r\n h,q = hessenberg(a,calc_q=1, check_finite=False)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n assert_array_almost_equal(h,h1,decimal=4)\r\n\r\n def test_2x2(self):\r\n a = [[2, 1], [7, 12]]\r\n\r\n h, q = hessenberg(a, calc_q=1)\r\n assert_array_almost_equal(q, np.eye(2))\r\n assert_array_almost_equal(h, a)\r\n\r\n b = [[2-7j, 1+2j], [7+3j, 12-2j]]\r\n h2, q2 = hessenberg(b, calc_q=1)\r\n assert_array_almost_equal(q2, np.eye(2))\r\n assert_array_almost_equal(h2, b)\r\n\r\n\r\nclass TestQZ(TestCase):\r\n def setUp(self):\r\n seed(12345)\r\n\r\n def test_qz_single(self):\r\n n = 5\r\n A = random([n,n]).astype(float32)\r\n B = random([n,n]).astype(float32)\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_double(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_complex(self):\r\n n = 5\r\n A = random([n,n]) + 1j*random([n,n])\r\n B = random([n,n]) + 1j*random([n,n])\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n assert_(all(diag(BB).imag == 0))\r\n\r\n def test_qz_complex64(self):\r\n n = 5\r\n A = (random([n,n]) + 1j*random([n,n])).astype(complex64)\r\n B = (random([n,n]) + 1j*random([n,n])).astype(complex64)\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)\r\n assert_(all(diag(BB) >= 0))\r\n assert_(all(diag(BB).imag == 0))\r\n\r\n def test_qz_double_complex(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B, output='complex')\r\n aa = dot(dot(Q,AA),Z.conjugate().T)\r\n assert_array_almost_equal(aa.real, A)\r\n assert_array_almost_equal(aa.imag, 0)\r\n bb = dot(dot(Q,BB),Z.conjugate().T)\r\n assert_array_almost_equal(bb.real, B)\r\n assert_array_almost_equal(bb.imag, 0)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_double_sort(self):\r\n # from http://www.nag.com/lapack-ex/node119.html\r\n # NOTE: These matrices may be ill-conditioned and lead to a\r\n # seg fault on certain python versions when compiled with\r\n # sse2 or sse3 older ATLAS/LAPACK binaries for windows\r\n # A = np.array([[3.9, 12.5, -34.5, -0.5],\r\n # [ 4.3, 21.5, -47.5, 7.5],\r\n # [ 4.3, 21.5, -43.5, 3.5],\r\n # [ 4.4, 26.0, -46.0, 6.0 ]])\r\n\r\n # B = np.array([[ 1.0, 2.0, -3.0, 1.0],\r\n # [1.0, 3.0, -5.0, 4.0],\r\n # [1.0, 3.0, -4.0, 3.0],\r\n # [1.0, 3.0, -4.0, 4.0]])\r\n A = np.array([[3.9, 12.5, -34.5, 2.5],\r\n [4.3, 21.5, -47.5, 7.5],\r\n [4.3, 1.5, -43.5, 3.5],\r\n [4.4, 6.0, -46.0, 6.0]])\r\n\r\n B = np.array([[1.0, 1.0, -3.0, 1.0],\r\n [1.0, 3.0, -5.0, 4.4],\r\n [1.0, 2.0, -4.0, 1.0],\r\n [1.2, 3.0, -4.0, 4.0]])\r\n\r\n sort = lambda ar,ai,beta: ai == 0\r\n\r\n assert_raises(ValueError, qz, A, B, sort=sort)\r\n if False:\r\n AA,BB,Q,Z,sdim = qz(A,B,sort=sort)\r\n # assert_(sdim == 2)\r\n assert_(sdim == 4)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n\r\n # test absolute values bc the sign is ambiguous and might be platform\r\n # dependent\r\n assert_array_almost_equal(np.abs(AA), np.abs(np.array(\r\n [[35.7864, -80.9061, -12.0629, -9.498],\r\n [0., 2.7638, -2.3505, 7.3256],\r\n [0., 0., 0.6258, -0.0398],\r\n [0., 0., 0., -12.8217]])), 4)\r\n assert_array_almost_equal(np.abs(BB), np.abs(np.array(\r\n [[4.5324, -8.7878, 3.2357, -3.5526],\r\n [0., 1.4314, -2.1894, 0.9709],\r\n [0., 0., 1.3126, -0.3468],\r\n [0., 0., 0., 0.559]])), 4)\r\n assert_array_almost_equal(np.abs(Q), np.abs(np.array(\r\n [[-0.4193, -0.605, -0.1894, -0.6498],\r\n [-0.5495, 0.6987, 0.2654, -0.3734],\r\n [-0.4973, -0.3682, 0.6194, 0.4832],\r\n [-0.5243, 0.1008, -0.7142, 0.4526]])), 4)\r\n assert_array_almost_equal(np.abs(Z), np.abs(np.array(\r\n [[-0.9471, -0.2971, -0.1217, 0.0055],\r\n [-0.0367, 0.1209, 0.0358, 0.9913],\r\n [0.3171, -0.9041, -0.2547, 0.1312],\r\n [0.0346, 0.2824, -0.9587, 0.0014]])), 4)\r\n\r\n # test absolute values bc the sign is ambiguous and might be platform\r\n # dependent\r\n # assert_array_almost_equal(abs(AA), abs(np.array([\r\n # [3.8009, -69.4505, 50.3135, -43.2884],\r\n # [0.0000, 9.2033, -0.2001, 5.9881],\r\n # [0.0000, 0.0000, 1.4279, 4.4453],\r\n # [0.0000, 0.0000, 0.9019, -1.1962]])), 4)\r\n # assert_array_almost_equal(abs(BB), abs(np.array([\r\n # [1.9005, -10.2285, 0.8658, -5.2134],\r\n # [0.0000, 2.3008, 0.7915, 0.4262],\r\n # [0.0000, 0.0000, 0.8101, 0.0000],\r\n # [0.0000, 0.0000, 0.0000, -0.2823]])), 4)\r\n # assert_array_almost_equal(abs(Q), abs(np.array([\r\n # [0.4642, 0.7886, 0.2915, -0.2786],\r\n # [0.5002, -0.5986, 0.5638, -0.2713],\r\n # [0.5002, 0.0154, -0.0107, 0.8657],\r\n # [0.5331, -0.1395, -0.7727, -0.3151]])), 4)\r\n # assert_array_almost_equal(dot(Q,Q.T), eye(4))\r\n # assert_array_almost_equal(abs(Z), abs(np.array([\r\n # [0.9961, -0.0014, 0.0887, -0.0026],\r\n # [0.0057, -0.0404, -0.0938, -0.9948],\r\n # [0.0626, 0.7194, -0.6908, 0.0363],\r\n # [0.0626, -0.6934, -0.7114, 0.0956]])), 4)\r\n # assert_array_almost_equal(dot(Z,Z.T), eye(4))\r\n\r\n # def test_qz_complex_sort(self):\r\n # cA = np.array([\r\n # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],\r\n # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],\r\n # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],\r\n # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])\r\n\r\n # cB = np.array([\r\n # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],\r\n # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],\r\n # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],\r\n # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])\r\n\r\n # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')\r\n\r\n # eigenvalues = diag(AAS)/diag(BBS)\r\n # assert_(all(np.real(eigenvalues[:sdim] < 0)))\r\n # assert_(all(np.real(eigenvalues[sdim:] > 0)))\r\n\r\n def test_check_finite(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B,check_finite=False)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n\r\ndef _make_pos(X):\r\n # the decompositions can have different signs than verified results\r\n return np.sign(X)*X\r\n\r\n\r\nclass TestOrdQZ(TestCase):\r\n @classmethod\r\n def setupClass(cls):\r\n # http://www.nag.com/lapack-ex/node119.html\r\n cls.A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,\r\n 7.5 + 0.5j],\r\n [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,\r\n -10.5 - 1.5j],\r\n [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,\r\n -7.5 - 3.5j],\r\n [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,\r\n -19.0 - 32.5j]])\r\n\r\n cls.B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],\r\n [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],\r\n [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],\r\n [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])\r\n\r\n # http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml\r\n cls.A2 = np.array([[3.9, 12.5, -34.5, -0.5],\r\n [4.3, 21.5, -47.5, 7.5],\r\n [4.3, 21.5, -43.5, 3.5],\r\n [4.4, 26.0, -46.0, 6.0]])\r\n\r\n cls.B2 = np.array([[1, 2, -3, 1],\r\n [1, 3, -5, 4],\r\n [1, 3, -4, 3],\r\n [1, 3, -4, 4]])\r\n\r\n # example with the eigenvalues\r\n # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,\r\n # 0.61244091\r\n # thus featuring:\r\n # * one complex conjugate eigenvalue pair,\r\n # * one eigenvalue in the lhp\r\n # * 2 eigenvalues in the unit circle\r\n # * 2 non-real eigenvalues\r\n cls.A3 = np.array([[5., 1., 3., 3.],\r\n [4., 4., 2., 7.],\r\n [7., 4., 1., 3.],\r\n [0., 4., 8., 7.]])\r\n cls.B3 = np.array([[8., 10., 6., 10.],\r\n [7., 7., 2., 9.],\r\n [9., 1., 6., 6.],\r\n [5., 1., 4., 7.]])\r\n\r\n def qz_decomp(self, sort):\r\n retc = ordqz(self.A1, self.B1, sort=sort)\r\n ret1 = ordqz(self.A2, self.B2, sort=sort)\r\n ret2 = ordqz(self.A3, self.B3, sort=sort)\r\n return retc, ret1, ret2\r\n\r\n def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):\r\n I = np.eye(*A.shape)\r\n # make sure Q and Z are orthogonal\r\n assert_array_almost_equal(Q.dot(Q.T.conj()), I)\r\n assert_array_almost_equal(Z.dot(Z.T.conj()), I)\r\n # check factorization\r\n assert_array_almost_equal(Q.dot(AA), A.dot(Z))\r\n assert_array_almost_equal(Q.dot(BB), B.dot(Z))\r\n # check shape of AA and BB\r\n assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))\r\n assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))\r\n # check eigenvalues\r\n for i in range(A.shape[0]):\r\n # does the current diagonal element belong to a 2-by-2 block\r\n # that was already checked?\r\n if i > 0 and A[i, i - 1] != 0:\r\n continue\r\n # take care of 2-by-2 blocks\r\n if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:\r\n evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])\r\n # make sure the pair of complex conjugate eigenvalues\r\n # is ordered consistently (positive imaginary part first)\r\n if evals[0].imag < 0:\r\n evals = evals[[1, 0]]\r\n tmp = alpha[i:i + 2]/beta[i:i + 2]\r\n if tmp[0].imag < 0:\r\n tmp = tmp[[1, 0]]\r\n assert_array_almost_equal(evals, tmp)\r\n else:\r\n assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])\r\n sortfun = sort\r\n if sortfun == 'lhp':\r\n sortfun = lambda x, y: (x/y).real < 0\r\n if sortfun == 'rhp':\r\n sortfun = lambda x, y: (x/y).real > 0\r\n if sortfun == 'iuc':\r\n sortfun = lambda x, y: np.abs(x/y) < 1\r\n if sortfun == 'ouc':\r\n sortfun = lambda x, y: np.abs(x/y) > 1\r\n lastsort = True\r\n for i in range(A.shape[0]):\r\n cursort = sortfun(alpha[i], beta[i])\r\n # once the sorting criterion was not matched all subsequent\r\n # eigenvalues also shouldn't match\r\n if not lastsort:\r\n assert(not cursort)\r\n lastsort = cursort\r\n\r\n def test_lhp(self):\r\n retc, ret1, ret2 = self.qz_decomp('lhp')\r\n\r\n self.check(self.A1, self.B1, 'lhp', *retc)\r\n self.check(self.A2, self.B2, 'lhp', *ret1)\r\n self.check(self.A3, self.B3, 'lhp', *ret2)\r\n\r\n def test_rhp(self):\r\n retc, ret1, ret2 = self.qz_decomp('rhp')\r\n\r\n self.check(self.A1, self.B1, 'rhp', *retc)\r\n self.check(self.A2, self.B2, 'rhp', *ret1)\r\n self.check(self.A3, self.B3, 'rhp', *ret2)\r\n\r\n def test_iuc(self):\r\n retc, ret1, ret2 = self.qz_decomp('iuc')\r\n\r\n self.check(self.A1, self.B1, 'iuc', *retc)\r\n self.check(self.A2, self.B2, 'iuc', *ret1)\r\n self.check(self.A3, self.B3, 'iuc', *ret2)\r\n\r\n def test_ouc(self):\r\n retc, ret1, ret2 = self.qz_decomp('ouc')\r\n\r\n self.check(self.A1, self.B1, 'ouc', *retc)\r\n self.check(self.A2, self.B2, 'ouc', *ret1)\r\n self.check(self.A3, self.B3, 'ouc', *ret2)\r\n\r\n def test_ref(self):\r\n # real eigenvalues first (top-left corner)\r\n sort = lambda x, y: (x/y).imag == 0\r\n retc, ret1, ret2 = self.qz_decomp(sort)\r\n\r\n self.check(self.A1, self.B1, sort, *retc)\r\n self.check(self.A2, self.B2, sort, *ret1)\r\n self.check(self.A3, self.B3, sort, *ret2)\r\n\r\n def test_cef(self):\r\n # complex eigenvalues first (top-left corner)\r\n sort = lambda x, y: (x/y).imag != 0\r\n retc, ret1, ret2 = self.qz_decomp(sort)\r\n\r\n self.check(self.A1, self.B1, sort, *retc)\r\n self.check(self.A2, self.B2, sort, *ret1)\r\n self.check(self.A3, self.B3, sort, *ret2)\r\n\r\n def test_diff_input_types(self):\r\n ret = ordqz(self.A1, self.B2, sort='lhp')\r\n self.check(self.A1, self.B2, 'lhp', *ret)\r\n\r\n ret = ordqz(self.B2, self.A1, sort='lhp')\r\n self.check(self.B2, self.A1, 'lhp', *ret)\r\n\r\nclass TestOrdQZWorkspaceSize(TestCase):\r\n\r\n def setUp(self):\r\n seed(12345)\r\n\r\n def test_decompose(self):\r\n\r\n N = 202\r\n\r\n # raises error if lwork parameter to dtrsen is too small\r\n for ddtype in [np.float32, np.float64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n # sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2\r\n sort = lambda alpha, beta: alpha < beta\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')\r\n\r\n for ddtype in [np.complex, np.complex64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n sort = lambda alpha, beta: alpha < beta\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')\r\n\r\n @dec.slow\r\n def test_decompose_ouc(self):\r\n\r\n N = 202\r\n\r\n # segfaults if lwork parameter to dtrsen is too small\r\n for ddtype in [np.float32, np.float64, np.complex, np.complex64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')\r\n\r\n\r\nclass TestDatacopied(TestCase):\r\n\r\n def test_datacopied(self):\r\n from scipy.linalg.decomp import _datacopied\r\n\r\n M = matrix([[0,1],[2,3]])\r\n A = asarray(M)\r\n L = M.tolist()\r\n M2 = M.copy()\r\n\r\n class Fake1:\r\n def __array__(self):\r\n return A\r\n\r\n class Fake2:\r\n __array_interface__ = A.__array_interface__\r\n\r\n F1 = Fake1()\r\n F2 = Fake2()\r\n\r\n for item, status in [(M, False), (A, False), (L, True),\r\n (M2, False), (F1, False), (F2, False)]:\r\n arr = asarray(item)\r\n assert_equal(_datacopied(arr, item), status,\r\n err_msg=repr(item))\r\n\r\n\r\ndef test_aligned_mem_float():\r\n \"\"\"Check linalg works with non-aligned memory\"\"\"\r\n # Allocate 402 bytes of memory (allocated on boundary)\r\n a = arange(402, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 4\r\n z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef test_aligned_mem():\r\n \"\"\"Check linalg works with non-aligned memory\"\"\"\r\n # Allocate 804 bytes of memory (allocated on boundary)\r\n a = arange(804, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 4\r\n z = np.frombuffer(a.data, offset=4, count=100, dtype=float)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef test_aligned_mem_complex():\r\n \"\"\"Check that complex objects don't need to be completely aligned\"\"\"\r\n # Allocate 1608 bytes of memory (allocated on boundary)\r\n a = zeros(1608, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 8\r\n z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n # This does not need special handling\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef check_lapack_misaligned(func, args, kwargs):\r\n args = list(args)\r\n for i in range(len(args)):\r\n a = args[:]\r\n if isinstance(a[i],np.ndarray):\r\n # Try misaligning a[i]\r\n aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)\r\n aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)\r\n aa.shape = a[i].shape\r\n aa[...] = a[i]\r\n a[i] = aa\r\n func(*a,**kwargs)\r\n if len(a[i].shape) > 1:\r\n a[i] = a[i].T\r\n func(*a,**kwargs)\r\n\r\n\r\[email protected](True, \"Ticket #1152, triggers a segfault in rare cases.\")\r\ndef test_lapack_misaligned():\r\n M = np.eye(10,dtype=float)\r\n R = np.arange(100)\r\n R.shape = 10,10\r\n S = np.arange(20000,dtype=np.uint8)\r\n S = np.frombuffer(S.data, offset=4, count=100, dtype=float)\r\n S.shape = 10, 10\r\n b = np.ones(10)\r\n LU, piv = lu_factor(S)\r\n for (func, args, kwargs) in [\r\n (eig,(S,),dict(overwrite_a=True)), # crash\r\n (eigvals,(S,),dict(overwrite_a=True)), # no crash\r\n (lu,(S,),dict(overwrite_a=True)), # no crash\r\n (lu_factor,(S,),dict(overwrite_a=True)), # no crash\r\n (lu_solve,((LU,piv),b),dict(overwrite_b=True)),\r\n (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),\r\n (svd,(M,),dict(overwrite_a=True)), # no crash\r\n (svd,(R,),dict(overwrite_a=True)), # no crash\r\n (svd,(S,),dict(overwrite_a=True)), # crash\r\n (svdvals,(S,),dict()), # no crash\r\n (svdvals,(S,),dict(overwrite_a=True)), # crash\r\n (cholesky,(M,),dict(overwrite_a=True)), # no crash\r\n (qr,(S,),dict(overwrite_a=True)), # crash\r\n (rq,(S,),dict(overwrite_a=True)), # crash\r\n (hessenberg,(S,),dict(overwrite_a=True)), # crash\r\n (schur,(S,),dict(overwrite_a=True)), # crash\r\n ]:\r\n yield check_lapack_misaligned, func, args, kwargs\r\n# not properly tested\r\n# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd\r\n\r\n\r\nclass TestOverwrite(object):\r\n def test_eig(self):\r\n assert_no_overwrite(eig, [(3,3)])\r\n assert_no_overwrite(eig, [(3,3), (3,3)])\r\n\r\n def test_eigh(self):\r\n assert_no_overwrite(eigh, [(3,3)])\r\n assert_no_overwrite(eigh, [(3,3), (3,3)])\r\n\r\n def test_eig_banded(self):\r\n assert_no_overwrite(eig_banded, [(3,2)])\r\n\r\n def test_eigvals(self):\r\n assert_no_overwrite(eigvals, [(3,3)])\r\n\r\n def test_eigvalsh(self):\r\n assert_no_overwrite(eigvalsh, [(3,3)])\r\n\r\n def test_eigvals_banded(self):\r\n assert_no_overwrite(eigvals_banded, [(3,2)])\r\n\r\n def test_hessenberg(self):\r\n assert_no_overwrite(hessenberg, [(3,3)])\r\n\r\n def test_lu_factor(self):\r\n assert_no_overwrite(lu_factor, [(3,3)])\r\n\r\n def test_lu_solve(self):\r\n x = np.array([[1,2,3], [4,5,6], [7,8,8]])\r\n xlu = lu_factor(x)\r\n assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])\r\n\r\n def test_lu(self):\r\n assert_no_overwrite(lu, [(3,3)])\r\n\r\n def test_qr(self):\r\n assert_no_overwrite(qr, [(3,3)])\r\n\r\n def test_rq(self):\r\n assert_no_overwrite(rq, [(3,3)])\r\n\r\n def test_schur(self):\r\n assert_no_overwrite(schur, [(3,3)])\r\n\r\n def test_schur_complex(self):\r\n assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],\r\n dtypes=[np.float32, np.float64])\r\n\r\n def test_svd(self):\r\n assert_no_overwrite(svd, [(3,3)])\r\n assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])\r\n\r\n def test_svdvals(self):\r\n assert_no_overwrite(svdvals, [(3,3)])\r\n\r\n\r\ndef _check_orth(n):\r\n X = np.ones((n, 2), dtype=float)\r\n Y = orth(X)\r\n assert_equal(Y.shape, (n, 1))\r\n assert_allclose(Y, Y.mean(), atol=1e-10)\r\n Y = orth(X.T)\r\n assert_equal(Y.shape, (2, 1))\r\n assert_allclose(Y, Y.mean())\r\n\r\n\r\[email protected]\r\[email protected](np.dtype(np.intp).itemsize < 8, \"test only on 64-bit, else too slow\")\r\ndef test_orth_memory_efficiency():\r\n # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.\r\n # Keep in mind that @dec.slow tests are likely to be running\r\n # under configurations that support 4Gb+ memory for tests related to\r\n # 32 bit overflow.\r\n n = 10*1000*1000\r\n try:\r\n _check_orth(n)\r\n except MemoryError:\r\n raise AssertionError('memory error perhaps caused by orth regression')\r\n\r\n\r\ndef test_orth():\r\n for n in 1, 2, 3, 10, 100:\r\n _check_orth(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\"Base class for sparse matrices\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport sys\r\n\r\nimport numpy as np\r\n\r\nfrom scipy._lib.six import xrange\r\nfrom .sputils import (isdense, isscalarlike, isintlike,\r\n get_sum_dtype, validateaxis)\r\n\r\n__all__ = ['spmatrix', 'isspmatrix', 'issparse',\r\n 'SparseWarning', 'SparseEfficiencyWarning']\r\n\r\n\r\nclass SparseWarning(Warning):\r\n pass\r\n\r\n\r\nclass SparseFormatWarning(SparseWarning):\r\n pass\r\n\r\n\r\nclass SparseEfficiencyWarning(SparseWarning):\r\n pass\r\n\r\n\r\n# The formats that we might potentially understand.\r\n_formats = {'csc': [0, \"Compressed Sparse Column\"],\r\n 'csr': [1, \"Compressed Sparse Row\"],\r\n 'dok': [2, \"Dictionary Of Keys\"],\r\n 'lil': [3, \"LInked List\"],\r\n 'dod': [4, \"Dictionary of Dictionaries\"],\r\n 'sss': [5, \"Symmetric Sparse Skyline\"],\r\n 'coo': [6, \"COOrdinate\"],\r\n 'lba': [7, \"Linpack BAnded\"],\r\n 'egd': [8, \"Ellpack-itpack Generalized Diagonal\"],\r\n 'dia': [9, \"DIAgonal\"],\r\n 'bsr': [10, \"Block Sparse Row\"],\r\n 'msr': [11, \"Modified compressed Sparse Row\"],\r\n 'bsc': [12, \"Block Sparse Column\"],\r\n 'msc': [13, \"Modified compressed Sparse Column\"],\r\n 'ssk': [14, \"Symmetric SKyline\"],\r\n 'nsk': [15, \"Nonsymmetric SKyline\"],\r\n 'jad': [16, \"JAgged Diagonal\"],\r\n 'uss': [17, \"Unsymmetric Sparse Skyline\"],\r\n 'vbr': [18, \"Variable Block Row\"],\r\n 'und': [19, \"Undefined\"]\r\n }\r\n\r\n\r\n# These univariate ufuncs preserve zeros.\r\n_ufuncs_with_fixed_point_at_zero = frozenset([\r\n np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,\r\n np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,\r\n np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])\r\n\r\n\r\nMAXPRINT = 50\r\n\r\n\r\nclass spmatrix(object):\r\n \"\"\" This class provides a base class for all sparse matrices. It\r\n cannot be instantiated. Most of the work is provided by subclasses.\r\n \"\"\"\r\n\r\n __array_priority__ = 10.1\r\n ndim = 2\r\n\r\n def __init__(self, maxprint=MAXPRINT):\r\n self._shape = None\r\n if self.__class__.__name__ == 'spmatrix':\r\n raise ValueError(\"This class is not intended\"\r\n \" to be instantiated directly.\")\r\n self.maxprint = maxprint\r\n\r\n def set_shape(self, shape):\r\n shape = tuple(shape)\r\n\r\n if len(shape) != 2:\r\n raise ValueError(\"Only two-dimensional sparse \"\r\n \"arrays are supported.\")\r\n try:\r\n shape = int(shape[0]), int(shape[1]) # floats, other weirdness\r\n except:\r\n raise TypeError('invalid shape')\r\n\r\n if not (shape[0] >= 0 and shape[1] >= 0):\r\n raise ValueError('invalid shape')\r\n\r\n if (self._shape != shape) and (self._shape is not None):\r\n try:\r\n self = self.reshape(shape)\r\n except NotImplementedError:\r\n raise NotImplementedError(\"Reshaping not implemented for %s.\" %\r\n self.__class__.__name__)\r\n self._shape = shape\r\n\r\n def get_shape(self):\r\n return self._shape\r\n\r\n shape = property(fget=get_shape, fset=set_shape)\r\n\r\n def reshape(self, shape, order='C'):\r\n \"\"\"\r\n Gives a new shape to a sparse matrix without changing its data.\r\n\r\n Parameters\r\n ----------\r\n shape : length-2 tuple of ints\r\n The new shape should be compatible with the original shape.\r\n order : 'C', optional\r\n This argument is in the signature *solely* for NumPy\r\n compatibility reasons. Do not pass in anything except\r\n for the default value, as this argument is not used.\r\n\r\n Returns\r\n -------\r\n reshaped_matrix : `self` with the new dimensions of `shape`\r\n\r\n See Also\r\n --------\r\n np.matrix.reshape : NumPy's implementation of 'reshape' for matrices\r\n \"\"\"\r\n raise NotImplementedError(\"Reshaping not implemented for %s.\" %\r\n self.__class__.__name__)\r\n\r\n def astype(self, t):\r\n return self.tocsr().astype(t).asformat(self.format)\r\n\r\n def asfptype(self):\r\n \"\"\"Upcast matrix to a floating point format (if necessary)\"\"\"\r\n\r\n fp_types = ['f', 'd', 'F', 'D']\r\n\r\n if self.dtype.char in fp_types:\r\n return self\r\n else:\r\n for fp_type in fp_types:\r\n if self.dtype <= np.dtype(fp_type):\r\n return self.astype(fp_type)\r\n\r\n raise TypeError('cannot upcast [%s] to a floating '\r\n 'point format' % self.dtype.name)\r\n\r\n def __iter__(self):\r\n for r in xrange(self.shape[0]):\r\n yield self[r, :]\r\n\r\n def getmaxprint(self):\r\n return self.maxprint\r\n\r\n def count_nonzero(self):\r\n \"\"\"Number of non-zero entries, equivalent to\r\n\r\n np.count_nonzero(a.toarray())\r\n\r\n Unlike getnnz() and the nnz property, which return the number of stored\r\n entries (the length of the data attribute), this method counts the\r\n actual number of non-zero entries in data.\r\n \"\"\"\r\n raise NotImplementedError(\"count_nonzero not implemented for %s.\" %\r\n self.__class__.__name__)\r\n\r\n def getnnz(self, axis=None):\r\n \"\"\"Number of stored values, including explicit zeros.\r\n\r\n Parameters\r\n ----------\r\n axis : None, 0, or 1\r\n Select between the number of values across the whole matrix, in\r\n each column, or in each row.\r\n\r\n See also\r\n --------\r\n count_nonzero : Number of non-zero entries\r\n \"\"\"\r\n raise NotImplementedError(\"getnnz not implemented for %s.\" %\r\n self.__class__.__name__)\r\n\r\n @property\r\n def nnz(self):\r\n \"\"\"Number of stored values, including explicit zeros.\r\n\r\n See also\r\n --------\r\n count_nonzero : Number of non-zero entries\r\n \"\"\"\r\n return self.getnnz()\r\n\r\n def getformat(self):\r\n return getattr(self, 'format', 'und')\r\n\r\n def __repr__(self):\r\n _, format_name = _formats[self.getformat()]\r\n return \"<%dx%d sparse matrix of type '%s'\\n\" \\\r\n \"\\twith %d stored elements in %s format>\" % \\\r\n (self.shape + (self.dtype.type, self.nnz, format_name))\r\n\r\n def __str__(self):\r\n maxprint = self.getmaxprint()\r\n\r\n A = self.tocoo()\r\n\r\n # helper function, outputs \"(i,j) v\"\r\n def tostr(row, col, data):\r\n triples = zip(list(zip(row, col)), data)\r\n return '\\n'.join([(' %s\\t%s' % t) for t in triples])\r\n\r\n if self.nnz > maxprint:\r\n half = maxprint // 2\r\n out = tostr(A.row[:half], A.col[:half], A.data[:half])\r\n out += \"\\n :\\t:\\n\"\r\n half = maxprint - maxprint//2\r\n out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])\r\n else:\r\n out = tostr(A.row, A.col, A.data)\r\n\r\n return out\r\n\r\n def __bool__(self): # Simple -- other ideas?\r\n if self.shape == (1, 1):\r\n return self.nnz != 0\r\n else:\r\n raise ValueError(\"The truth value of an array with more than one \"\r\n \"element is ambiguous. Use a.any() or a.all().\")\r\n __nonzero__ = __bool__\r\n\r\n # What should len(sparse) return? For consistency with dense matrices,\r\n # perhaps it should be the number of rows? But for some uses the number of\r\n # non-zeros is more important. For now, raise an exception!\r\n def __len__(self):\r\n raise TypeError(\"sparse matrix length is ambiguous; use getnnz()\"\r\n \" or shape[0]\")\r\n\r\n def asformat(self, format):\r\n \"\"\"Return this matrix in a given sparse format\r\n\r\n Parameters\r\n ----------\r\n format : {string, None}\r\n desired sparse matrix format\r\n - None for no format conversion\r\n - \"csr\" for csr_matrix format\r\n - \"csc\" for csc_matrix format\r\n - \"lil\" for lil_matrix format\r\n - \"dok\" for dok_matrix format and so on\r\n\r\n \"\"\"\r\n\r\n if format is None or format == self.format:\r\n return self\r\n else:\r\n return getattr(self, 'to' + format)()\r\n\r\n ###################################################################\r\n # NOTE: All arithmetic operations use csr_matrix by default.\r\n # Therefore a new sparse matrix format just needs to define a\r\n # .tocsr() method to provide arithmetic support. Any of these\r\n # methods can be overridden for efficiency.\r\n ####################################################################\r\n\r\n def multiply(self, other):\r\n \"\"\"Point-wise multiplication by another matrix\r\n \"\"\"\r\n return self.tocsr().multiply(other)\r\n\r\n def maximum(self, other):\r\n return self.tocsr().maximum(other)\r\n\r\n def minimum(self, other):\r\n return self.tocsr().minimum(other)\r\n\r\n def dot(self, other):\r\n \"\"\"Ordinary dot product\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from scipy.sparse import csr_matrix\r\n >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])\r\n >>> v = np.array([1, 0, -1])\r\n >>> A.dot(v)\r\n array([ 1, -3, -1], dtype=int64)\r\n\r\n \"\"\"\r\n return self * other\r\n\r\n def power(self, n, dtype=None):\r\n return self.tocsr().power(n, dtype=dtype)\r\n\r\n def __eq__(self, other):\r\n return self.tocsr().__eq__(other)\r\n\r\n def __ne__(self, other):\r\n return self.tocsr().__ne__(other)\r\n\r\n def __lt__(self, other):\r\n return self.tocsr().__lt__(other)\r\n\r\n def __gt__(self, other):\r\n return self.tocsr().__gt__(other)\r\n\r\n def __le__(self, other):\r\n return self.tocsr().__le__(other)\r\n\r\n def __ge__(self, other):\r\n return self.tocsr().__ge__(other)\r\n\r\n def __abs__(self):\r\n return abs(self.tocsr())\r\n\r\n def __add__(self, other): # self + other\r\n return self.tocsr().__add__(other)\r\n\r\n def __radd__(self, other): # other + self\r\n return self.tocsr().__radd__(other)\r\n\r\n def __sub__(self, other): # self - other\r\n # note: this can't be replaced by self + (-other) for unsigned types\r\n return self.tocsr().__sub__(other)\r\n\r\n def __rsub__(self, other): # other - self\r\n return self.tocsr().__rsub__(other)\r\n\r\n def __mul__(self, other):\r\n \"\"\"interpret other and call one of the following\r\n\r\n self._mul_scalar()\r\n self._mul_vector()\r\n self._mul_multivector()\r\n self._mul_sparse_matrix()\r\n \"\"\"\r\n\r\n M, N = self.shape\r\n\r\n if other.__class__ is np.ndarray:\r\n # Fast path for the most common case\r\n if other.shape == (N,):\r\n return self._mul_vector(other)\r\n elif other.shape == (N, 1):\r\n return self._mul_vector(other.ravel()).reshape(M, 1)\r\n elif other.ndim == 2 and other.shape[0] == N:\r\n return self._mul_multivector(other)\r\n\r\n if isscalarlike(other):\r\n # scalar value\r\n return self._mul_scalar(other)\r\n\r\n if issparse(other):\r\n if self.shape[1] != other.shape[0]:\r\n raise ValueError('dimension mismatch')\r\n return self._mul_sparse_matrix(other)\r\n\r\n try:\r\n other.shape\r\n except AttributeError:\r\n # If it's a list or whatever, treat it like a matrix\r\n other_a = np.asanyarray(other)\r\n\r\n if other_a.ndim == 0 and other_a.dtype == np.object_:\r\n # Not interpretable as an array; return NotImplemented so that\r\n # other's __rmul__ can kick in if that's implemented.\r\n return NotImplemented\r\n\r\n other = other_a\r\n\r\n if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:\r\n # dense row or column vector\r\n if other.shape != (N,) and other.shape != (N, 1):\r\n raise ValueError('dimension mismatch')\r\n\r\n result = self._mul_vector(np.ravel(other))\r\n\r\n if isinstance(other, np.matrix):\r\n result = np.asmatrix(result)\r\n\r\n if other.ndim == 2 and other.shape[1] == 1:\r\n # If 'other' was an (nx1) column vector, reshape the result\r\n result = result.reshape(-1, 1)\r\n\r\n return result\r\n\r\n elif other.ndim == 2:\r\n ##\r\n # dense 2D array or matrix (\"multivector\")\r\n\r\n if other.shape[0] != self.shape[1]:\r\n raise ValueError('dimension mismatch')\r\n\r\n result = self._mul_multivector(np.asarray(other))\r\n\r\n if isinstance(other, np.matrix):\r\n result = np.asmatrix(result)\r\n\r\n return result\r\n else:\r\n raise ValueError('could not interpret dimensions')\r\n\r\n # by default, use CSR for __mul__ handlers\r\n def _mul_scalar(self, other):\r\n return self.tocsr()._mul_scalar(other)\r\n\r\n def _mul_vector(self, other):\r\n return self.tocsr()._mul_vector(other)\r\n\r\n def _mul_multivector(self, other):\r\n return self.tocsr()._mul_multivector(other)\r\n\r\n def _mul_sparse_matrix(self, other):\r\n return self.tocsr()._mul_sparse_matrix(other)\r\n\r\n def __rmul__(self, other): # other * self\r\n if isscalarlike(other):\r\n return self.__mul__(other)\r\n else:\r\n # Don't use asarray unless we have to\r\n try:\r\n tr = other.transpose()\r\n except AttributeError:\r\n tr = np.asarray(other).transpose()\r\n return (self.transpose() * tr).transpose()\r\n\r\n #####################################\r\n # matmul (@) operator (Python 3.5+) #\r\n #####################################\r\n\r\n def __matmul__(self, other):\r\n if isscalarlike(other):\r\n raise ValueError(\"Scalar operands are not allowed, \"\r\n \"use '*' instead\")\r\n return self.__mul__(other)\r\n\r\n def __rmatmul__(self, other):\r\n if isscalarlike(other):\r\n raise ValueError(\"Scalar operands are not allowed, \"\r\n \"use '*' instead\")\r\n return self.__rmul__(other)\r\n\r\n ####################\r\n # Other Arithmetic #\r\n ####################\r\n\r\n def _divide(self, other, true_divide=False, rdivide=False):\r\n if isscalarlike(other):\r\n if rdivide:\r\n if true_divide:\r\n return np.true_divide(other, self.todense())\r\n else:\r\n return np.divide(other, self.todense())\r\n\r\n if true_divide and np.can_cast(self.dtype, np.float_):\r\n return self.astype(np.float_)._mul_scalar(1./other)\r\n else:\r\n r = self._mul_scalar(1./other)\r\n\r\n scalar_dtype = np.asarray(other).dtype\r\n if (np.issubdtype(self.dtype, np.integer) and\r\n np.issubdtype(scalar_dtype, np.integer)):\r\n return r.astype(self.dtype)\r\n else:\r\n return r\r\n\r\n elif isdense(other):\r\n if not rdivide:\r\n if true_divide:\r\n return np.true_divide(self.todense(), other)\r\n else:\r\n return np.divide(self.todense(), other)\r\n else:\r\n if true_divide:\r\n return np.true_divide(other, self.todense())\r\n else:\r\n return np.divide(other, self.todense())\r\n elif isspmatrix(other):\r\n if rdivide:\r\n return other._divide(self, true_divide, rdivide=False)\r\n\r\n self_csr = self.tocsr()\r\n if true_divide and np.can_cast(self.dtype, np.float_):\r\n return self_csr.astype(np.float_)._divide_sparse(other)\r\n else:\r\n return self_csr._divide_sparse(other)\r\n else:\r\n return NotImplemented\r\n\r\n def __truediv__(self, other):\r\n return self._divide(other, true_divide=True)\r\n\r\n def __div__(self, other):\r\n # Always do true division\r\n return self._divide(other, true_divide=True)\r\n\r\n def __rtruediv__(self, other):\r\n # Implementing this as the inverse would be too magical -- bail out\r\n return NotImplemented\r\n\r\n def __rdiv__(self, other):\r\n # Implementing this as the inverse would be too magical -- bail out\r\n return NotImplemented\r\n\r\n def __neg__(self):\r\n return -self.tocsr()\r\n\r\n def __iadd__(self, other):\r\n return NotImplemented\r\n\r\n def __isub__(self, other):\r\n return NotImplemented\r\n\r\n def __imul__(self, other):\r\n return NotImplemented\r\n\r\n def __idiv__(self, other):\r\n return self.__itruediv__(other)\r\n\r\n def __itruediv__(self, other):\r\n return NotImplemented\r\n\r\n def __pow__(self, other):\r\n if self.shape[0] != self.shape[1]:\r\n raise TypeError('matrix is not square')\r\n\r\n if isintlike(other):\r\n other = int(other)\r\n if other < 0:\r\n raise ValueError('exponent must be >= 0')\r\n\r\n if other == 0:\r\n from .construct import eye\r\n return eye(self.shape[0], dtype=self.dtype)\r\n elif other == 1:\r\n return self.copy()\r\n else:\r\n tmp = self.__pow__(other//2)\r\n if (other % 2):\r\n return self * tmp * tmp\r\n else:\r\n return tmp * tmp\r\n elif isscalarlike(other):\r\n raise ValueError('exponent must be an integer')\r\n else:\r\n return NotImplemented\r\n\r\n def __getattr__(self, attr):\r\n if attr == 'A':\r\n return self.toarray()\r\n elif attr == 'T':\r\n return self.transpose()\r\n elif attr == 'H':\r\n return self.getH()\r\n elif attr == 'real':\r\n return self._real()\r\n elif attr == 'imag':\r\n return self._imag()\r\n elif attr == 'size':\r\n return self.getnnz()\r\n else:\r\n raise AttributeError(attr + \" not found\")\r\n\r\n def transpose(self, axes=None, copy=False):\r\n \"\"\"\r\n Reverses the dimensions of the sparse matrix.\r\n\r\n Parameters\r\n ----------\r\n axes : None, optional\r\n This argument is in the signature *solely* for NumPy\r\n compatibility reasons. Do not pass in anything except\r\n for the default value.\r\n copy : bool, optional\r\n Indicates whether or not attributes of `self` should be\r\n copied whenever possible. The degree to which attributes\r\n are copied varies depending on the type of sparse matrix\r\n being used.\r\n\r\n Returns\r\n -------\r\n p : `self` with the dimensions reversed.\r\n\r\n See Also\r\n --------\r\n np.matrix.transpose : NumPy's implementation of 'transpose'\r\n for matrices\r\n \"\"\"\r\n return self.tocsr().transpose(axes=axes, copy=copy)\r\n\r\n def conj(self):\r\n return self.tocsr().conj()\r\n\r\n def conjugate(self):\r\n return self.conj()\r\n\r\n # Renamed conjtranspose() -> getH() for compatibility with dense matrices\r\n def getH(self):\r\n return self.transpose().conj()\r\n\r\n def _real(self):\r\n return self.tocsr()._real()\r\n\r\n def _imag(self):\r\n return self.tocsr()._imag()\r\n\r\n def nonzero(self):\r\n \"\"\"nonzero indices\r\n\r\n Returns a tuple of arrays (row,col) containing the indices\r\n of the non-zero elements of the matrix.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.sparse import csr_matrix\r\n >>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])\r\n >>> A.nonzero()\r\n (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))\r\n\r\n \"\"\"\r\n\r\n # convert to COOrdinate format\r\n A = self.tocoo()\r\n nz_mask = A.data != 0\r\n return (A.row[nz_mask], A.col[nz_mask])\r\n\r\n def getcol(self, j):\r\n \"\"\"Returns a copy of column j of the matrix, as an (m x 1) sparse\r\n matrix (column vector).\r\n \"\"\"\r\n # Spmatrix subclasses should override this method for efficiency.\r\n # Post-multiply by a (n x 1) column vector 'a' containing all zeros\r\n # except for a_j = 1\r\n from .csc import csc_matrix\r\n n = self.shape[1]\r\n if j < 0:\r\n j += n\r\n if j < 0 or j >= n:\r\n raise IndexError(\"index out of bounds\")\r\n col_selector = csc_matrix(([1], [[j], [0]]),\r\n shape=(n, 1), dtype=self.dtype)\r\n return self * col_selector\r\n\r\n def getrow(self, i):\r\n \"\"\"Returns a copy of row i of the matrix, as a (1 x n) sparse\r\n matrix (row vector).\r\n \"\"\"\r\n # Spmatrix subclasses should override this method for efficiency.\r\n # Pre-multiply by a (1 x m) row vector 'a' containing all zeros\r\n # except for a_i = 1\r\n from .csr import csr_matrix\r\n m = self.shape[0]\r\n if i < 0:\r\n i += m\r\n if i < 0 or i >= m:\r\n raise IndexError(\"index out of bounds\")\r\n row_selector = csr_matrix(([1], [[0], [i]]),\r\n shape=(1, m), dtype=self.dtype)\r\n return row_selector * self\r\n\r\n # def __array__(self):\r\n # return self.toarray()\r\n\r\n def todense(self, order=None, out=None):\r\n \"\"\"\r\n Return a dense matrix representation of this matrix.\r\n\r\n Parameters\r\n ----------\r\n order : {'C', 'F'}, optional\r\n Whether to store multi-dimensional data in C (row-major)\r\n or Fortran (column-major) order in memory. The default\r\n is 'None', indicating the NumPy default of C-ordered.\r\n Cannot be specified in conjunction with the `out`\r\n argument.\r\n\r\n out : ndarray, 2-dimensional, optional\r\n If specified, uses this array (or `numpy.matrix`) as the\r\n output buffer instead of allocating a new array to\r\n return. The provided array must have the same shape and\r\n dtype as the sparse matrix on which you are calling the\r\n method.\r\n\r\n Returns\r\n -------\r\n arr : numpy.matrix, 2-dimensional\r\n A NumPy matrix object with the same shape and containing\r\n the same data represented by the sparse matrix, with the\r\n requested memory order. If `out` was passed and was an\r\n array (rather than a `numpy.matrix`), it will be filled\r\n with the appropriate values and returned wrapped in a\r\n `numpy.matrix` object that shares the same memory.\r\n \"\"\"\r\n return np.asmatrix(self.toarray(order=order, out=out))\r\n\r\n def toarray(self, order=None, out=None):\r\n \"\"\"\r\n Return a dense ndarray representation of this matrix.\r\n\r\n Parameters\r\n ----------\r\n order : {'C', 'F'}, optional\r\n Whether to store multi-dimensional data in C (row-major)\r\n or Fortran (column-major) order in memory. The default\r\n is 'None', indicating the NumPy default of C-ordered.\r\n Cannot be specified in conjunction with the `out`\r\n argument.\r\n\r\n out : ndarray, 2-dimensional, optional\r\n If specified, uses this array as the output buffer\r\n instead of allocating a new array to return. The provided\r\n array must have the same shape and dtype as the sparse\r\n matrix on which you are calling the method. For most\r\n sparse types, `out` is required to be memory contiguous\r\n (either C or Fortran ordered).\r\n\r\n Returns\r\n -------\r\n arr : ndarray, 2-dimensional\r\n An array with the same shape and containing the same\r\n data represented by the sparse matrix, with the requested\r\n memory order. If `out` was passed, the same object is\r\n returned after being modified in-place to contain the\r\n appropriate values.\r\n \"\"\"\r\n return self.tocoo(copy=False).toarray(order=order, out=out)\r\n\r\n # Any sparse matrix format deriving from spmatrix must define one of\r\n # tocsr or tocoo. The other conversion methods may be implemented for\r\n # efficiency, but are not required.\r\n def tocsr(self, copy=False):\r\n \"\"\"Convert this matrix to Compressed Sparse Row format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant csr_matrix.\r\n \"\"\"\r\n return self.tocoo(copy=copy).tocsr(copy=False)\r\n\r\n def todok(self, copy=False):\r\n \"\"\"Convert this matrix to Dictionary Of Keys format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant dok_matrix.\r\n \"\"\"\r\n return self.tocoo(copy=copy).todok(copy=False)\r\n\r\n def tocoo(self, copy=False):\r\n \"\"\"Convert this matrix to COOrdinate format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant coo_matrix.\r\n \"\"\"\r\n return self.tocsr(copy=False).tocoo(copy=copy)\r\n\r\n def tolil(self, copy=False):\r\n \"\"\"Convert this matrix to LInked List format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant lil_matrix.\r\n \"\"\"\r\n return self.tocsr(copy=False).tolil(copy=copy)\r\n\r\n def todia(self, copy=False):\r\n \"\"\"Convert this matrix to sparse DIAgonal format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant dia_matrix.\r\n \"\"\"\r\n return self.tocoo(copy=copy).todia(copy=False)\r\n\r\n def tobsr(self, blocksize=None, copy=False):\r\n \"\"\"Convert this matrix to Block Sparse Row format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant bsr_matrix.\r\n\r\n When blocksize=(R, C) is provided, it will be used for construction of\r\n the bsr_matrix.\r\n \"\"\"\r\n return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)\r\n\r\n def tocsc(self, copy=False):\r\n \"\"\"Convert this matrix to Compressed Sparse Column format.\r\n\r\n With copy=False, the data/indices may be shared between this matrix and\r\n the resultant csc_matrix.\r\n \"\"\"\r\n return self.tocsr(copy=copy).tocsc(copy=False)\r\n\r\n def copy(self):\r\n \"\"\"Returns a copy of this matrix.\r\n\r\n No data/indices will be shared between the returned value and current\r\n matrix.\r\n \"\"\"\r\n return self.__class__(self, copy=True)\r\n\r\n def sum(self, axis=None, dtype=None, out=None):\r\n \"\"\"\r\n Sum the matrix elements over a given axis.\r\n\r\n Parameters\r\n ----------\r\n axis : {-2, -1, 0, 1, None} optional\r\n Axis along which the sum is computed. The default is to\r\n compute the sum of all the matrix elements, returning a scalar\r\n (i.e. `axis` = `None`).\r\n dtype : dtype, optional\r\n The type of the returned matrix and of the accumulator in which\r\n the elements are summed. The dtype of `a` is used by default\r\n unless `a` has an integer dtype of less precision than the default\r\n platform integer. In that case, if `a` is signed then the platform\r\n integer is used while if `a` is unsigned then an unsigned integer\r\n of the same precision as the platform integer is used.\r\n\r\n .. versionadded: 0.18.0\r\n\r\n out : np.matrix, optional\r\n Alternative output matrix in which to place the result. It must\r\n have the same shape as the expected output, but the type of the\r\n output values will be cast if necessary.\r\n\r\n .. versionadded: 0.18.0\r\n\r\n Returns\r\n -------\r\n sum_along_axis : np.matrix\r\n A matrix with the same shape as `self`, with the specified\r\n axis removed.\r\n\r\n See Also\r\n --------\r\n np.matrix.sum : NumPy's implementation of 'sum' for matrices\r\n\r\n \"\"\"\r\n validateaxis(axis)\r\n\r\n # We use multiplication by a matrix of ones to achieve this.\r\n # For some sparse matrix formats more efficient methods are\r\n # possible -- these should override this function.\r\n m, n = self.shape\r\n\r\n # Mimic numpy's casting.\r\n res_dtype = get_sum_dtype(self.dtype)\r\n\r\n if axis is None:\r\n # sum over rows and columns\r\n return (self * np.asmatrix(np.ones(\r\n (n, 1), dtype=res_dtype))).sum(\r\n dtype=dtype, out=out)\r\n\r\n if axis < 0:\r\n axis += 2\r\n\r\n # axis = 0 or 1 now\r\n if axis == 0:\r\n # sum over columns\r\n ret = np.asmatrix(np.ones(\r\n (1, m), dtype=res_dtype)) * self\r\n else:\r\n # sum over rows\r\n ret = self * np.asmatrix(\r\n np.ones((n, 1), dtype=res_dtype))\r\n\r\n if out is not None and out.shape != ret.shape:\r\n raise ValueError(\"dimensions do not match\")\r\n\r\n return ret.sum(axis=(), dtype=dtype, out=out)\r\n\r\n def mean(self, axis=None, dtype=None, out=None):\r\n \"\"\"\r\n Compute the arithmetic mean along the specified axis.\r\n\r\n Returns the average of the matrix elements. The average is taken\r\n over all elements in the matrix by default, otherwise over the\r\n specified axis. `float64` intermediate and return values are used\r\n for integer inputs.\r\n\r\n Parameters\r\n ----------\r\n axis : {-2, -1, 0, 1, None} optional\r\n Axis along which the mean is computed. The default is to compute\r\n the mean of all elements in the matrix (i.e. `axis` = `None`).\r\n dtype : data-type, optional\r\n Type to use in computing the mean. For integer inputs, the default\r\n is `float64`; for floating point inputs, it is the same as the\r\n input dtype.\r\n\r\n .. versionadded: 0.18.0\r\n\r\n out : np.matrix, optional\r\n Alternative output matrix in which to place the result. It must\r\n have the same shape as the expected output, but the type of the\r\n output values will be cast if necessary.\r\n\r\n .. versionadded: 0.18.0\r\n\r\n Returns\r\n -------\r\n m : np.matrix\r\n\r\n See Also\r\n --------\r\n np.matrix.mean : NumPy's implementation of 'mean' for matrices\r\n\r\n \"\"\"\r\n def _is_integral(dtype):\r\n return (np.issubdtype(dtype, np.integer) or\r\n np.issubdtype(dtype, np.bool_))\r\n\r\n validateaxis(axis)\r\n\r\n res_dtype = self.dtype.type\r\n integral = _is_integral(self.dtype)\r\n\r\n # output dtype\r\n if dtype is None:\r\n if integral:\r\n res_dtype = np.float64\r\n else:\r\n res_dtype = np.dtype(dtype).type\r\n\r\n # intermediate dtype for summation\r\n inter_dtype = np.float64 if integral else res_dtype\r\n inter_self = self.astype(inter_dtype)\r\n\r\n if axis is None:\r\n return (inter_self / np.array(\r\n self.shape[0] * self.shape[1]))\\\r\n .sum(dtype=res_dtype, out=out)\r\n\r\n if axis < 0:\r\n axis += 2\r\n\r\n # axis = 0 or 1 now\r\n if axis == 0:\r\n return (inter_self * (1.0 / self.shape[0])).sum(\r\n axis=0, dtype=res_dtype, out=out)\r\n else:\r\n return (inter_self * (1.0 / self.shape[1])).sum(\r\n axis=1, dtype=res_dtype, out=out)\r\n\r\n def diagonal(self):\r\n \"\"\"Returns the main diagonal of the matrix\r\n \"\"\"\r\n # TODO support k != 0\r\n return self.tocsr().diagonal()\r\n\r\n def setdiag(self, values, k=0):\r\n \"\"\"\r\n Set diagonal or off-diagonal elements of the array.\r\n\r\n Parameters\r\n ----------\r\n values : array_like\r\n New values of the diagonal elements.\r\n\r\n Values may have any length. If the diagonal is longer than values,\r\n then the remaining diagonal entries will not be set. If values if\r\n longer than the diagonal, then the remaining values are ignored.\r\n\r\n If a scalar value is given, all of the diagonal is set to it.\r\n\r\n k : int, optional\r\n Which off-diagonal to set, corresponding to elements a[i,i+k].\r\n Default: 0 (the main diagonal).\r\n\r\n \"\"\"\r\n M, N = self.shape\r\n if (k > 0 and k >= N) or (k < 0 and -k >= M):\r\n raise ValueError(\"k exceeds matrix dimensions\")\r\n self._setdiag(np.asarray(values), k)\r\n\r\n def _setdiag(self, values, k):\r\n M, N = self.shape\r\n if k < 0:\r\n if values.ndim == 0:\r\n # broadcast\r\n max_index = min(M+k, N)\r\n for i in xrange(max_index):\r\n self[i - k, i] = values\r\n else:\r\n max_index = min(M+k, N, len(values))\r\n if max_index <= 0:\r\n return\r\n for i, v in enumerate(values[:max_index]):\r\n self[i - k, i] = v\r\n else:\r\n if values.ndim == 0:\r\n # broadcast\r\n max_index = min(M, N-k)\r\n for i in xrange(max_index):\r\n self[i, i + k] = values\r\n else:\r\n max_index = min(M, N-k, len(values))\r\n if max_index <= 0:\r\n return\r\n for i, v in enumerate(values[:max_index]):\r\n self[i, i + k] = v\r\n\r\n def _process_toarray_args(self, order, out):\r\n if out is not None:\r\n if order is not None:\r\n raise ValueError('order cannot be specified if out '\r\n 'is not None')\r\n if out.shape != self.shape or out.dtype != self.dtype:\r\n raise ValueError('out array must be same dtype and shape as '\r\n 'sparse matrix')\r\n out[...] = 0.\r\n return out\r\n else:\r\n return np.zeros(self.shape, dtype=self.dtype, order=order)\r\n\r\n def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):\r\n \"\"\"Method for compatibility with NumPy's ufuncs and dot\r\n functions.\r\n \"\"\"\r\n\r\n if any(not isinstance(x, spmatrix) and np.asarray(x).dtype == object\r\n for x in inputs):\r\n # preserve previous behavior with object arrays\r\n with_self = list(inputs)\r\n with_self[pos] = np.asarray(self, dtype=object)\r\n return getattr(func, method)(*with_self, **kwargs)\r\n\r\n out = kwargs.pop('out', None)\r\n if method != '__call__' or kwargs:\r\n return NotImplemented\r\n\r\n without_self = list(inputs)\r\n del without_self[pos]\r\n without_self = tuple(without_self)\r\n\r\n if func is np.multiply:\r\n result = self.multiply(*without_self)\r\n elif func is np.add:\r\n result = self.__add__(*without_self)\r\n elif func is np.dot:\r\n if pos == 0:\r\n result = self.__mul__(inputs[1])\r\n else:\r\n result = self.__rmul__(inputs[0])\r\n elif func is np.subtract:\r\n if pos == 0:\r\n result = self.__sub__(inputs[1])\r\n else:\r\n result = self.__rsub__(inputs[0])\r\n elif func is np.divide:\r\n true_divide = (sys.version_info[0] >= 3)\r\n rdivide = (pos == 1)\r\n result = self._divide(*without_self,\r\n true_divide=true_divide,\r\n rdivide=rdivide)\r\n elif func is np.true_divide:\r\n rdivide = (pos == 1)\r\n result = self._divide(*without_self,\r\n true_divide=True,\r\n rdivide=rdivide)\r\n elif func is np.maximum:\r\n result = self.maximum(*without_self)\r\n elif func is np.minimum:\r\n result = self.minimum(*without_self)\r\n elif func is np.absolute:\r\n result = abs(self)\r\n elif func in _ufuncs_with_fixed_point_at_zero:\r\n func_name = func.__name__\r\n if hasattr(self, func_name):\r\n result = getattr(self, func_name)()\r\n else:\r\n result = getattr(self.tocsr(), func_name)()\r\n else:\r\n return NotImplemented\r\n\r\n if out is not None:\r\n if not isinstance(out, spmatrix) and isinstance(result, spmatrix):\r\n out[...] = result.todense()\r\n else:\r\n out[...] = result\r\n result = out\r\n\r\n return result\r\n\r\n\r\ndef isspmatrix(x):\r\n return isinstance(x, spmatrix)\r\n\r\nissparse = isspmatrix\r\n", "#\r\n# Author: Joris Vankerschaver 2013\r\n#\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport math\r\nimport numpy as np\r\nimport scipy.linalg\r\nfrom scipy.misc import doccer\r\nfrom scipy.special import gammaln, psi, multigammaln\r\nfrom scipy._lib._util import check_random_state\r\nfrom scipy.linalg.blas import drot\r\n\r\n\r\n__all__ = ['multivariate_normal',\r\n 'matrix_normal',\r\n 'dirichlet',\r\n 'wishart',\r\n 'invwishart',\r\n 'special_ortho_group',\r\n 'ortho_group',\r\n 'random_correlation']\r\n\r\n_LOG_2PI = np.log(2 * np.pi)\r\n_LOG_2 = np.log(2)\r\n_LOG_PI = np.log(np.pi)\r\n\r\n\r\n_doc_random_state = \"\"\"\\\r\nrandom_state : None or int or np.random.RandomState instance, optional\r\n If int or RandomState, use it for drawing the random variates.\r\n If None (or np.random), the global np.random state is used.\r\n Default is None.\r\n\"\"\"\r\n\r\ndef _squeeze_output(out):\r\n \"\"\"\r\n Remove single-dimensional entries from array and convert to scalar,\r\n if necessary.\r\n\r\n \"\"\"\r\n out = out.squeeze()\r\n if out.ndim == 0:\r\n out = out[()]\r\n return out\r\n\r\n\r\ndef _eigvalsh_to_eps(spectrum, cond=None, rcond=None):\r\n \"\"\"\r\n Determine which eigenvalues are \"small\" given the spectrum.\r\n\r\n This is for compatibility across various linear algebra functions\r\n that should agree about whether or not a Hermitian matrix is numerically\r\n singular and what is its numerical matrix rank.\r\n This is designed to be compatible with scipy.linalg.pinvh.\r\n\r\n Parameters\r\n ----------\r\n spectrum : 1d ndarray\r\n Array of eigenvalues of a Hermitian matrix.\r\n cond, rcond : float, optional\r\n Cutoff for small eigenvalues.\r\n Singular values smaller than rcond * largest_eigenvalue are\r\n considered zero.\r\n If None or -1, suitable machine precision is used.\r\n\r\n Returns\r\n -------\r\n eps : float\r\n Magnitude cutoff for numerical negligibility.\r\n\r\n \"\"\"\r\n if rcond is not None:\r\n cond = rcond\r\n if cond in [None, -1]:\r\n t = spectrum.dtype.char.lower()\r\n factor = {'f': 1E3, 'd': 1E6}\r\n cond = factor[t] * np.finfo(t).eps\r\n eps = cond * np.max(abs(spectrum))\r\n return eps\r\n\r\n\r\ndef _pinv_1d(v, eps=1e-5):\r\n \"\"\"\r\n A helper function for computing the pseudoinverse.\r\n\r\n Parameters\r\n ----------\r\n v : iterable of numbers\r\n This may be thought of as a vector of eigenvalues or singular values.\r\n eps : float\r\n Values with magnitude no greater than eps are considered negligible.\r\n\r\n Returns\r\n -------\r\n v_pinv : 1d float ndarray\r\n A vector of pseudo-inverted numbers.\r\n\r\n \"\"\"\r\n return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)\r\n\r\n\r\nclass _PSD(object):\r\n \"\"\"\r\n Compute coordinated functions of a symmetric positive semidefinite matrix.\r\n\r\n This class addresses two issues. Firstly it allows the pseudoinverse,\r\n the logarithm of the pseudo-determinant, and the rank of the matrix\r\n to be computed using one call to eigh instead of three.\r\n Secondly it allows these functions to be computed in a way\r\n that gives mutually compatible results.\r\n All of the functions are computed with a common understanding as to\r\n which of the eigenvalues are to be considered negligibly small.\r\n The functions are designed to coordinate with scipy.linalg.pinvh()\r\n but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().\r\n\r\n Parameters\r\n ----------\r\n M : array_like\r\n Symmetric positive semidefinite matrix (2-D).\r\n cond, rcond : float, optional\r\n Cutoff for small eigenvalues.\r\n Singular values smaller than rcond * largest_eigenvalue are\r\n considered zero.\r\n If None or -1, suitable machine precision is used.\r\n lower : bool, optional\r\n Whether the pertinent array data is taken from the lower\r\n or upper triangle of M. (Default: lower)\r\n check_finite : bool, optional\r\n Whether to check that the input matrices contain only finite\r\n numbers. Disabling may give a performance gain, but may result\r\n in problems (crashes, non-termination) if the inputs do contain\r\n infinities or NaNs.\r\n allow_singular : bool, optional\r\n Whether to allow a singular matrix. (Default: True)\r\n\r\n Notes\r\n -----\r\n The arguments are similar to those of scipy.linalg.pinvh().\r\n\r\n \"\"\"\r\n\r\n def __init__(self, M, cond=None, rcond=None, lower=True,\r\n check_finite=True, allow_singular=True):\r\n # Compute the symmetric eigendecomposition.\r\n # Note that eigh takes care of array conversion, chkfinite,\r\n # and assertion that the matrix is square.\r\n s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)\r\n\r\n eps = _eigvalsh_to_eps(s, cond, rcond)\r\n if np.min(s) < -eps:\r\n raise ValueError('the input matrix must be positive semidefinite')\r\n d = s[s > eps]\r\n if len(d) < len(s) and not allow_singular:\r\n raise np.linalg.LinAlgError('singular matrix')\r\n s_pinv = _pinv_1d(s, eps)\r\n U = np.multiply(u, np.sqrt(s_pinv))\r\n\r\n # Initialize the eagerly precomputed attributes.\r\n self.rank = len(d)\r\n self.U = U\r\n self.log_pdet = np.sum(np.log(d))\r\n\r\n # Initialize an attribute to be lazily computed.\r\n self._pinv = None\r\n\r\n @property\r\n def pinv(self):\r\n if self._pinv is None:\r\n self._pinv = np.dot(self.U, self.U.T)\r\n return self._pinv\r\n\r\n\r\nclass multi_rv_generic(object):\r\n \"\"\"\r\n Class which encapsulates common functionality between all multivariate\r\n distributions.\r\n\r\n \"\"\"\r\n def __init__(self, seed=None):\r\n super(multi_rv_generic, self).__init__()\r\n self._random_state = check_random_state(seed)\r\n\r\n @property\r\n def random_state(self):\r\n \"\"\" Get or set the RandomState object for generating random variates.\r\n\r\n This can be either None or an existing RandomState object.\r\n\r\n If None (or np.random), use the RandomState singleton used by np.random.\r\n If already a RandomState instance, use it.\r\n If an int, use a new RandomState instance seeded with seed.\r\n\r\n \"\"\"\r\n return self._random_state\r\n\r\n @random_state.setter\r\n def random_state(self, seed):\r\n self._random_state = check_random_state(seed)\r\n\r\n def _get_random_state(self, random_state):\r\n if random_state is not None:\r\n return check_random_state(random_state)\r\n else:\r\n return self._random_state\r\n\r\n\r\nclass multi_rv_frozen(object):\r\n \"\"\"\r\n Class which encapsulates common functionality between all frozen\r\n multivariate distributions.\r\n \"\"\"\r\n @property\r\n def random_state(self):\r\n return self._dist._random_state\r\n\r\n @random_state.setter\r\n def random_state(self, seed):\r\n self._dist._random_state = check_random_state(seed)\r\n\r\n_mvn_doc_default_callparams = \"\"\"\\\r\nmean : array_like, optional\r\n Mean of the distribution (default zero)\r\ncov : array_like, optional\r\n Covariance matrix of the distribution (default one)\r\nallow_singular : bool, optional\r\n Whether to allow a singular covariance matrix. (Default: False)\r\n\"\"\"\r\n\r\n_mvn_doc_callparams_note = \\\r\n \"\"\"Setting the parameter `mean` to `None` is equivalent to having `mean`\r\n be the zero-vector. The parameter `cov` can be a scalar, in which case\r\n the covariance matrix is the identity times that value, a vector of\r\n diagonal entries for the covariance matrix, or a two-dimensional\r\n array_like.\r\n \"\"\"\r\n\r\n_mvn_doc_frozen_callparams = \"\"\r\n\r\n_mvn_doc_frozen_callparams_note = \\\r\n \"\"\"See class definition for a detailed description of parameters.\"\"\"\r\n\r\nmvn_docdict_params = {\r\n '_mvn_doc_default_callparams': _mvn_doc_default_callparams,\r\n '_mvn_doc_callparams_note': _mvn_doc_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\nmvn_docdict_noparams = {\r\n '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,\r\n '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\nclass multivariate_normal_gen(multi_rv_generic):\r\n r\"\"\"\r\n A multivariate normal random variable.\r\n\r\n The `mean` keyword specifies the mean. The `cov` keyword specifies the\r\n covariance matrix.\r\n\r\n Methods\r\n -------\r\n ``pdf(x, mean=None, cov=1, allow_singular=False)``\r\n Probability density function.\r\n ``logpdf(x, mean=None, cov=1, allow_singular=False)``\r\n Log of the probability density function.\r\n ``rvs(mean=None, cov=1, size=1, random_state=None)``\r\n Draw random samples from a multivariate normal distribution.\r\n ``entropy()``\r\n Compute the differential entropy of the multivariate normal.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_mvn_doc_default_callparams)s\r\n %(_doc_random_state)s\r\n\r\n Alternatively, the object may be called (as a function) to fix the mean\r\n and covariance parameters, returning a \"frozen\" multivariate normal\r\n random variable:\r\n\r\n rv = multivariate_normal(mean=None, cov=1, allow_singular=False)\r\n - Frozen object with the same methods but holding the given\r\n mean and covariance fixed.\r\n\r\n Notes\r\n -----\r\n %(_mvn_doc_callparams_note)s\r\n\r\n The covariance matrix `cov` must be a (symmetric) positive\r\n semi-definite matrix. The determinant and inverse of `cov` are computed\r\n as the pseudo-determinant and pseudo-inverse, respectively, so\r\n that `cov` does not need to have full rank.\r\n\r\n The probability density function for `multivariate_normal` is\r\n\r\n .. math::\r\n\r\n f(x) = \\frac{1}{\\sqrt{(2 \\pi)^k \\det \\Sigma}}\r\n \\exp\\left( -\\frac{1}{2} (x - \\mu)^T \\Sigma^{-1} (x - \\mu) \\right),\r\n\r\n where :math:`\\mu` is the mean, :math:`\\Sigma` the covariance matrix,\r\n and :math:`k` is the dimension of the space where :math:`x` takes values.\r\n\r\n .. versionadded:: 0.14.0\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> from scipy.stats import multivariate_normal\r\n\r\n >>> x = np.linspace(0, 5, 10, endpoint=False)\r\n >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y\r\n array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,\r\n 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])\r\n >>> fig1 = plt.figure()\r\n >>> ax = fig1.add_subplot(111)\r\n >>> ax.plot(x, y)\r\n\r\n The input quantiles can be any shape of array, as long as the last\r\n axis labels the components. This allows us for instance to\r\n display the frozen pdf for a non-isotropic random variable in 2D as\r\n follows:\r\n\r\n >>> x, y = np.mgrid[-1:1:.01, -1:1:.01]\r\n >>> pos = np.dstack((x, y))\r\n >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])\r\n >>> fig2 = plt.figure()\r\n >>> ax2 = fig2.add_subplot(111)\r\n >>> ax2.contourf(x, y, rv.pdf(pos))\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(multivariate_normal_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)\r\n\r\n def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):\r\n \"\"\"\r\n Create a frozen multivariate normal distribution.\r\n\r\n See `multivariate_normal_frozen` for more information.\r\n\r\n \"\"\"\r\n return multivariate_normal_frozen(mean, cov,\r\n allow_singular=allow_singular,\r\n seed=seed)\r\n\r\n def _process_parameters(self, dim, mean, cov):\r\n \"\"\"\r\n Infer dimensionality from mean or covariance matrix, ensure that\r\n mean and covariance are full vector resp. matrix.\r\n\r\n \"\"\"\r\n\r\n # Try to infer dimensionality\r\n if dim is None:\r\n if mean is None:\r\n if cov is None:\r\n dim = 1\r\n else:\r\n cov = np.asarray(cov, dtype=float)\r\n if cov.ndim < 2:\r\n dim = 1\r\n else:\r\n dim = cov.shape[0]\r\n else:\r\n mean = np.asarray(mean, dtype=float)\r\n dim = mean.size\r\n else:\r\n if not np.isscalar(dim):\r\n raise ValueError(\"Dimension of random variable must be a scalar.\")\r\n\r\n # Check input sizes and return full arrays for mean and cov if necessary\r\n if mean is None:\r\n mean = np.zeros(dim)\r\n mean = np.asarray(mean, dtype=float)\r\n\r\n if cov is None:\r\n cov = 1.0\r\n cov = np.asarray(cov, dtype=float)\r\n\r\n if dim == 1:\r\n mean.shape = (1,)\r\n cov.shape = (1, 1)\r\n\r\n if mean.ndim != 1 or mean.shape[0] != dim:\r\n raise ValueError(\"Array 'mean' must be a vector of length %d.\" % dim)\r\n if cov.ndim == 0:\r\n cov = cov * np.eye(dim)\r\n elif cov.ndim == 1:\r\n cov = np.diag(cov)\r\n elif cov.ndim == 2 and cov.shape != (dim, dim):\r\n rows, cols = cov.shape\r\n if rows != cols:\r\n msg = (\"Array 'cov' must be square if it is two dimensional,\"\r\n \" but cov.shape = %s.\" % str(cov.shape))\r\n else:\r\n msg = (\"Dimension mismatch: array 'cov' is of shape %s,\"\r\n \" but 'mean' is a vector of length %d.\")\r\n msg = msg % (str(cov.shape), len(mean))\r\n raise ValueError(msg)\r\n elif cov.ndim > 2:\r\n raise ValueError(\"Array 'cov' must be at most two-dimensional,\"\r\n \" but cov.ndim = %d\" % cov.ndim)\r\n\r\n return dim, mean, cov\r\n\r\n def _process_quantiles(self, x, dim):\r\n \"\"\"\r\n Adjust quantiles array so that last axis labels the components of\r\n each data point.\r\n\r\n \"\"\"\r\n x = np.asarray(x, dtype=float)\r\n\r\n if x.ndim == 0:\r\n x = x[np.newaxis]\r\n elif x.ndim == 1:\r\n if dim == 1:\r\n x = x[:, np.newaxis]\r\n else:\r\n x = x[np.newaxis, :]\r\n\r\n return x\r\n\r\n def _logpdf(self, x, mean, prec_U, log_det_cov, rank):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n x : ndarray\r\n Points at which to evaluate the log of the probability\r\n density function\r\n mean : ndarray\r\n Mean of the distribution\r\n prec_U : ndarray\r\n A decomposition such that np.dot(prec_U, prec_U.T)\r\n is the precision matrix, i.e. inverse of the covariance matrix.\r\n log_det_cov : float\r\n Logarithm of the determinant of the covariance matrix\r\n rank : int\r\n Rank of the covariance matrix.\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'logpdf' instead.\r\n\r\n \"\"\"\r\n dev = x - mean\r\n maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)\r\n return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)\r\n\r\n def logpdf(self, x, mean=None, cov=1, allow_singular=False):\r\n \"\"\"\r\n Log of the multivariate normal probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_mvn_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Log of the probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_mvn_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, mean, cov = self._process_parameters(None, mean, cov)\r\n x = self._process_quantiles(x, dim)\r\n psd = _PSD(cov, allow_singular=allow_singular)\r\n out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x, mean=None, cov=1, allow_singular=False):\r\n \"\"\"\r\n Multivariate normal probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_mvn_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_mvn_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, mean, cov = self._process_parameters(None, mean, cov)\r\n x = self._process_quantiles(x, dim)\r\n psd = _PSD(cov, allow_singular=allow_singular)\r\n out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))\r\n return _squeeze_output(out)\r\n\r\n def rvs(self, mean=None, cov=1, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from a multivariate normal distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_mvn_doc_default_callparams)s\r\n size : integer, optional\r\n Number of samples to draw (default 1).\r\n %(_doc_random_state)s\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random variates of size (`size`, `N`), where `N` is the\r\n dimension of the random variable.\r\n\r\n Notes\r\n -----\r\n %(_mvn_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, mean, cov = self._process_parameters(None, mean, cov)\r\n\r\n random_state = self._get_random_state(random_state)\r\n out = random_state.multivariate_normal(mean, cov, size)\r\n return _squeeze_output(out)\r\n\r\n def entropy(self, mean=None, cov=1):\r\n \"\"\"\r\n Compute the differential entropy of the multivariate normal.\r\n\r\n Parameters\r\n ----------\r\n %(_mvn_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n h : scalar\r\n Entropy of the multivariate normal distribution\r\n\r\n Notes\r\n -----\r\n %(_mvn_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, mean, cov = self._process_parameters(None, mean, cov)\r\n _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)\r\n return 0.5 * logdet\r\n\r\n\r\nmultivariate_normal = multivariate_normal_gen()\r\n\r\n\r\nclass multivariate_normal_frozen(multi_rv_frozen):\r\n def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):\r\n \"\"\"\r\n Create a frozen multivariate normal distribution.\r\n\r\n Parameters\r\n ----------\r\n mean : array_like, optional\r\n Mean of the distribution (default zero)\r\n cov : array_like, optional\r\n Covariance matrix of the distribution (default one)\r\n allow_singular : bool, optional\r\n If this flag is True then tolerate a singular\r\n covariance matrix (default False).\r\n seed : None or int or np.random.RandomState instance, optional\r\n This parameter defines the RandomState object to use for drawing\r\n random variates.\r\n If None (or np.random), the global np.random state is used.\r\n If integer, it is used to seed the local RandomState instance\r\n Default is None.\r\n\r\n Examples\r\n --------\r\n When called with the default parameters, this will create a 1D random\r\n variable with mean 0 and covariance 1:\r\n\r\n >>> from scipy.stats import multivariate_normal\r\n >>> r = multivariate_normal()\r\n >>> r.mean\r\n array([ 0.])\r\n >>> r.cov\r\n array([[1.]])\r\n\r\n \"\"\"\r\n self._dist = multivariate_normal_gen(seed)\r\n self.dim, self.mean, self.cov = self._dist._process_parameters(\r\n None, mean, cov)\r\n self.cov_info = _PSD(self.cov, allow_singular=allow_singular)\r\n\r\n def logpdf(self, x):\r\n x = self._dist._process_quantiles(x, self.dim)\r\n out = self._dist._logpdf(x, self.mean, self.cov_info.U,\r\n self.cov_info.log_pdet, self.cov_info.rank)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x):\r\n return np.exp(self.logpdf(x))\r\n\r\n def rvs(self, size=1, random_state=None):\r\n return self._dist.rvs(self.mean, self.cov, size, random_state)\r\n\r\n def entropy(self):\r\n \"\"\"\r\n Computes the differential entropy of the multivariate normal.\r\n\r\n Returns\r\n -------\r\n h : scalar\r\n Entropy of the multivariate normal distribution\r\n\r\n \"\"\"\r\n log_pdet = self.cov_info.log_pdet\r\n rank = self.cov_info.rank\r\n return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)\r\n\r\n# Set frozen generator docstrings from corresponding docstrings in\r\n# multivariate_normal_gen and fill in default strings in class docstrings\r\nfor name in ['logpdf', 'pdf', 'rvs']:\r\n method = multivariate_normal_gen.__dict__[name]\r\n method_frozen = multivariate_normal_frozen.__dict__[name]\r\n method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)\r\n method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)\r\n\r\n_matnorm_doc_default_callparams = \"\"\"\\\r\nmean : array_like, optional\r\n Mean of the distribution (default: `None`)\r\nrowcov : array_like, optional\r\n Among-row covariance matrix of the distribution (default: `1`)\r\ncolcov : array_like, optional\r\n Among-column covariance matrix of the distribution (default: `1`)\r\n\"\"\"\r\n\r\n_matnorm_doc_callparams_note = \\\r\n \"\"\"If `mean` is set to `None` then a matrix of zeros is used for the mean.\r\n The dimensions of this matrix are inferred from the shape of `rowcov` and\r\n `colcov`, if these are provided, or set to `1` if ambiguous.\r\n\r\n `rowcov` and `colcov` can be two-dimensional array_likes specifying the\r\n covariance matrices directly. Alternatively, a one-dimensional array will\r\n be be interpreted as the entries of a diagonal matrix, and a scalar or\r\n zero-dimensional array will be interpreted as this value times the\r\n identity matrix.\r\n \"\"\"\r\n\r\n_matnorm_doc_frozen_callparams = \"\"\r\n\r\n_matnorm_doc_frozen_callparams_note = \\\r\n \"\"\"See class definition for a detailed description of parameters.\"\"\"\r\n\r\nmatnorm_docdict_params = {\r\n '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,\r\n '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\nmatnorm_docdict_noparams = {\r\n '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,\r\n '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\nclass matrix_normal_gen(multi_rv_generic):\r\n r\"\"\"\r\n A matrix normal random variable.\r\n\r\n The `mean` keyword specifies the mean. The `rowcov` keyword specifies the\r\n among-row covariance matrix. The 'colcov' keyword specifies the\r\n among-column covariance matrix.\r\n\r\n Methods\r\n -------\r\n ``pdf(X, mean=None, rowcov=1, colcov=1)``\r\n Probability density function.\r\n ``logpdf(X, mean=None, rowcov=1, colcov=1)``\r\n Log of the probability density function.\r\n ``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``\r\n Draw random samples.\r\n\r\n Parameters\r\n ----------\r\n X : array_like\r\n Quantiles, with the last two axes of `X` denoting the components.\r\n %(_matnorm_doc_default_callparams)s\r\n %(_doc_random_state)s\r\n\r\n Alternatively, the object may be called (as a function) to fix the mean\r\n and covariance parameters, returning a \"frozen\" matrix normal\r\n random variable:\r\n\r\n rv = matrix_normal(mean=None, rowcov=1, colcov=1)\r\n - Frozen object with the same methods but holding the given\r\n mean and covariance fixed.\r\n\r\n Notes\r\n -----\r\n %(_matnorm_doc_callparams_note)s\r\n\r\n The covariance matrices specified by `rowcov` and `colcov` must be\r\n (symmetric) positive definite. If the samples in `X` are\r\n :math:`m \\times n`, then `rowcov` must be :math:`m \\times m` and\r\n `colcov` must be :math:`n \\times n`. `mean` must be the same shape as `X`.\r\n\r\n The probability density function for `matrix_normal` is\r\n\r\n .. math::\r\n\r\n f(X) = (2 \\pi)^{-\\frac{mn}{2}}|U|^{-\\frac{n}{2}} |V|^{-\\frac{m}{2}}\r\n \\exp\\left( -\\frac{1}{2} \\mathrm{Tr}\\left[ U^{-1} (X-M) V^{-1}\r\n (X-M)^T \\right] \\right),\r\n\r\n where :math:`M` is the mean, :math:`U` the among-row covariance matrix,\r\n :math:`V` the among-column covariance matrix.\r\n\r\n The `allow_singular` behaviour of the `multivariate_normal`\r\n distribution is not currently supported. Covariance matrices must be\r\n full rank.\r\n\r\n The `matrix_normal` distribution is closely related to the\r\n `multivariate_normal` distribution. Specifically, :math:`\\mathrm{Vec}(X)`\r\n (the vector formed by concatenating the columns of :math:`X`) has a\r\n multivariate normal distribution with mean :math:`\\mathrm{Vec}(M)`\r\n and covariance :math:`V \\otimes U` (where :math:`\\otimes` is the Kronecker\r\n product). Sampling and pdf evaluation are\r\n :math:`\\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but\r\n :math:`\\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,\r\n making this equivalent form algorithmically inefficient.\r\n\r\n .. versionadded:: 0.17.0\r\n\r\n Examples\r\n --------\r\n\r\n >>> from scipy.stats import matrix_normal\r\n\r\n >>> M = np.arange(6).reshape(3,2); M\r\n array([[0, 1],\r\n [2, 3],\r\n [4, 5]])\r\n >>> U = np.diag([1,2,3]); U\r\n array([[1, 0, 0],\r\n [0, 2, 0],\r\n [0, 0, 3]])\r\n >>> V = 0.3*np.identity(2); V\r\n array([[ 0.3, 0. ],\r\n [ 0. , 0.3]])\r\n >>> X = M + 0.1; X\r\n array([[ 0.1, 1.1],\r\n [ 2.1, 3.1],\r\n [ 4.1, 5.1]])\r\n >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)\r\n 0.023410202050005054\r\n\r\n >>> # Equivalent multivariate normal\r\n >>> from scipy.stats import multivariate_normal\r\n >>> vectorised_X = X.T.flatten()\r\n >>> equiv_mean = M.T.flatten()\r\n >>> equiv_cov = np.kron(V,U)\r\n >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)\r\n 0.023410202050005054\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(matrix_normal_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)\r\n\r\n def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):\r\n \"\"\"\r\n Create a frozen matrix normal distribution.\r\n\r\n See `matrix_normal_frozen` for more information.\r\n\r\n \"\"\"\r\n return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)\r\n\r\n def _process_parameters(self, mean, rowcov, colcov):\r\n \"\"\"\r\n Infer dimensionality from mean or covariance matrices. Handle\r\n defaults. Ensure compatible dimensions.\r\n\r\n \"\"\"\r\n\r\n # Process mean\r\n if mean is not None:\r\n mean = np.asarray(mean, dtype=float)\r\n meanshape = mean.shape\r\n if len(meanshape) != 2:\r\n raise ValueError(\"Array `mean` must be two dimensional.\")\r\n if np.any(meanshape == 0):\r\n raise ValueError(\"Array `mean` has invalid shape.\")\r\n\r\n # Process among-row covariance\r\n rowcov = np.asarray(rowcov, dtype=float)\r\n if rowcov.ndim == 0:\r\n if mean is not None:\r\n rowcov = rowcov * np.identity(meanshape[0])\r\n else:\r\n rowcov = rowcov * np.identity(1)\r\n elif rowcov.ndim == 1:\r\n rowcov = np.diag(rowcov)\r\n rowshape = rowcov.shape\r\n if len(rowshape) != 2:\r\n raise ValueError(\"`rowcov` must be a scalar or a 2D array.\")\r\n if rowshape[0] != rowshape[1]:\r\n raise ValueError(\"Array `rowcov` must be square.\")\r\n if rowshape[0] == 0:\r\n raise ValueError(\"Array `rowcov` has invalid shape.\")\r\n numrows = rowshape[0]\r\n\r\n # Process among-column covariance\r\n colcov = np.asarray(colcov, dtype=float)\r\n if colcov.ndim == 0:\r\n if mean is not None:\r\n colcov = colcov * np.identity(meanshape[1])\r\n else:\r\n colcov = colcov * np.identity(1)\r\n elif colcov.ndim == 1:\r\n colcov = np.diag(colcov)\r\n colshape = colcov.shape\r\n if len(colshape) != 2:\r\n raise ValueError(\"`colcov` must be a scalar or a 2D array.\")\r\n if colshape[0] != colshape[1]:\r\n raise ValueError(\"Array `colcov` must be square.\")\r\n if colshape[0] == 0:\r\n raise ValueError(\"Array `colcov` has invalid shape.\")\r\n numcols = colshape[0]\r\n\r\n # Ensure mean and covariances compatible\r\n if mean is not None:\r\n if meanshape[0] != numrows:\r\n raise ValueError(\"Arrays `mean` and `rowcov` must have the\"\r\n \"same number of rows.\")\r\n if meanshape[1] != numcols:\r\n raise ValueError(\"Arrays `mean` and `colcov` must have the\"\r\n \"same number of columns.\")\r\n else:\r\n mean = np.zeros((numrows,numcols))\r\n\r\n dims = (numrows, numcols)\r\n\r\n return dims, mean, rowcov, colcov\r\n\r\n def _process_quantiles(self, X, dims):\r\n \"\"\"\r\n Adjust quantiles array so that last two axes labels the components of\r\n each data point.\r\n\r\n \"\"\"\r\n X = np.asarray(X, dtype=float)\r\n if X.ndim == 2:\r\n X = X[np.newaxis, :]\r\n if X.shape[-2:] != dims:\r\n raise ValueError(\"The shape of array `X` is not compatible \"\r\n \"with the distribution parameters.\")\r\n return X\r\n\r\n def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,\r\n col_prec_rt, log_det_colcov):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dims : tuple\r\n Dimensions of the matrix variates\r\n X : ndarray\r\n Points at which to evaluate the log of the probability\r\n density function\r\n mean : ndarray\r\n Mean of the distribution\r\n row_prec_rt : ndarray\r\n A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)\r\n is the inverse of the among-row covariance matrix\r\n log_det_rowcov : float\r\n Logarithm of the determinant of the among-row covariance matrix\r\n col_prec_rt : ndarray\r\n A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)\r\n is the inverse of the among-column covariance matrix\r\n log_det_colcov : float\r\n Logarithm of the determinant of the among-column covariance matrix\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'logpdf' instead.\r\n\r\n \"\"\"\r\n numrows, numcols = dims\r\n roll_dev = np.rollaxis(X-mean, axis=-1, start=0)\r\n scale_dev = np.tensordot(col_prec_rt.T,\r\n np.dot(roll_dev, row_prec_rt), 1)\r\n maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)\r\n return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov\r\n + numrows*log_det_colcov + maha)\r\n\r\n def logpdf(self, X, mean=None, rowcov=1, colcov=1):\r\n \"\"\"\r\n Log of the matrix normal probability density function.\r\n\r\n Parameters\r\n ----------\r\n X : array_like\r\n Quantiles, with the last two axes of `X` denoting the components.\r\n %(_matnorm_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n logpdf : ndarray\r\n Log of the probability density function evaluated at `X`\r\n\r\n Notes\r\n -----\r\n %(_matnorm_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,\r\n colcov)\r\n X = self._process_quantiles(X, dims)\r\n rowpsd = _PSD(rowcov, allow_singular=False)\r\n colpsd = _PSD(colcov, allow_singular=False)\r\n out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,\r\n colpsd.log_pdet)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, X, mean=None, rowcov=1, colcov=1):\r\n \"\"\"\r\n Matrix normal probability density function.\r\n\r\n Parameters\r\n ----------\r\n X : array_like\r\n Quantiles, with the last two axes of `X` denoting the components.\r\n %(_matnorm_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Probability density function evaluated at `X`\r\n\r\n Notes\r\n -----\r\n %(_matnorm_doc_callparams_note)s\r\n\r\n \"\"\"\r\n return np.exp(self.logpdf(X, mean, rowcov, colcov))\r\n\r\n def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from a matrix normal distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_matnorm_doc_default_callparams)s\r\n size : integer, optional\r\n Number of samples to draw (default 1).\r\n %(_doc_random_state)s\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random variates of size (`size`, `dims`), where `dims` is the\r\n dimension of the random matrices.\r\n\r\n Notes\r\n -----\r\n %(_matnorm_doc_callparams_note)s\r\n\r\n \"\"\"\r\n size = int(size)\r\n dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,\r\n colcov)\r\n rowchol = scipy.linalg.cholesky(rowcov, lower=True)\r\n colchol = scipy.linalg.cholesky(colcov, lower=True)\r\n random_state = self._get_random_state(random_state)\r\n std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))\r\n roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)\r\n out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]\r\n if size == 1:\r\n #out = np.squeeze(out, axis=0)\r\n out = out.reshape(mean.shape)\r\n return out\r\n\r\nmatrix_normal = matrix_normal_gen()\r\n\r\n\r\nclass matrix_normal_frozen(multi_rv_frozen):\r\n def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):\r\n \"\"\"\r\n Create a frozen matrix normal distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_matnorm_doc_default_callparams)s\r\n seed : None or int or np.random.RandomState instance, optional\r\n If int or RandomState, use it for drawing the random variates.\r\n If None (or np.random), the global np.random state is used.\r\n Default is None.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.stats import matrix_normal\r\n\r\n >>> distn = matrix_normal(mean=np.zeros((3,3)))\r\n >>> X = distn.rvs(); X\r\n array([[-0.02976962, 0.93339138, -0.09663178],\r\n [ 0.67405524, 0.28250467, -0.93308929],\r\n [-0.31144782, 0.74535536, 1.30412916]])\r\n >>> distn.pdf(X)\r\n 2.5160642368346784e-05\r\n >>> distn.logpdf(X)\r\n -10.590229595124615\r\n \"\"\"\r\n self._dist = matrix_normal_gen(seed)\r\n self.dims, self.mean, self.rowcov, self.colcov = \\\r\n self._dist._process_parameters(mean, rowcov, colcov)\r\n self.rowpsd = _PSD(self.rowcov, allow_singular=False)\r\n self.colpsd = _PSD(self.colcov, allow_singular=False)\r\n\r\n def logpdf(self, X):\r\n X = self._dist._process_quantiles(X, self.dims)\r\n out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,\r\n self.rowpsd.log_pdet, self.colpsd.U,\r\n self.colpsd.log_pdet)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, X):\r\n return np.exp(self.logpdf(X))\r\n\r\n def rvs(self, size=1, random_state=None):\r\n return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,\r\n random_state)\r\n\r\n\r\n# Set frozen generator docstrings from corresponding docstrings in\r\n# matrix_normal_gen and fill in default strings in class docstrings\r\nfor name in ['logpdf', 'pdf', 'rvs']:\r\n method = matrix_normal_gen.__dict__[name]\r\n method_frozen = matrix_normal_frozen.__dict__[name]\r\n method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)\r\n method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)\r\n\r\n_dirichlet_doc_default_callparams = \"\"\"\\\r\nalpha : array_like\r\n The concentration parameters. The number of entries determines the\r\n dimensionality of the distribution.\r\n\"\"\"\r\n_dirichlet_doc_frozen_callparams = \"\"\r\n\r\n_dirichlet_doc_frozen_callparams_note = \\\r\n \"\"\"See class definition for a detailed description of parameters.\"\"\"\r\n\r\ndirichlet_docdict_params = {\r\n '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\ndirichlet_docdict_noparams = {\r\n '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\ndef _dirichlet_check_parameters(alpha):\r\n alpha = np.asarray(alpha)\r\n if np.min(alpha) <= 0:\r\n raise ValueError(\"All parameters must be greater than 0\")\r\n elif alpha.ndim != 1:\r\n raise ValueError(\"Parameter vector 'a' must be one dimensional, \"\r\n \"but a.shape = %s.\" % (alpha.shape, ))\r\n return alpha\r\n\r\n\r\ndef _dirichlet_check_input(alpha, x):\r\n x = np.asarray(x)\r\n\r\n if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:\r\n raise ValueError(\"Vector 'x' must have either the same number \"\r\n \"of entries as, or one entry fewer than, \"\r\n \"parameter vector 'a', but alpha.shape = %s \"\r\n \"and x.shape = %s.\" % (alpha.shape, x.shape))\r\n\r\n if x.shape[0] != alpha.shape[0]:\r\n xk = np.array([1 - np.sum(x, 0)])\r\n if xk.ndim == 1:\r\n x = np.append(x, xk)\r\n elif xk.ndim == 2:\r\n x = np.vstack((x, xk))\r\n else:\r\n raise ValueError(\"The input must be one dimensional or a two \"\r\n \"dimensional matrix containing the entries.\")\r\n\r\n if np.min(x) <= 0:\r\n raise ValueError(\"Each entry in 'x' must be greater than zero.\")\r\n\r\n if np.max(x) > 1:\r\n raise ValueError(\"Each entry in 'x' must be smaller or equal one.\")\r\n\r\n if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():\r\n raise ValueError(\"The input vector 'x' must lie within the normal \"\r\n \"simplex. but np.sum(x, 0) = %s.\" % np.sum(x, 0))\r\n\r\n return x\r\n\r\n\r\ndef _lnB(alpha):\r\n r\"\"\"\r\n Internal helper function to compute the log of the useful quotient\r\n\r\n .. math::\r\n\r\n B(\\alpha) = \\frac{\\prod_{i=1}{K}\\Gamma(\\alpha_i)}{\\Gamma\\left(\\sum_{i=1}^{K}\\alpha_i\\right)}\r\n\r\n Parameters\r\n ----------\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n B : scalar\r\n Helper quotient, internal use only\r\n\r\n \"\"\"\r\n return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))\r\n\r\n\r\nclass dirichlet_gen(multi_rv_generic):\r\n r\"\"\"\r\n A Dirichlet random variable.\r\n\r\n The `alpha` keyword specifies the concentration parameters of the\r\n distribution.\r\n\r\n .. versionadded:: 0.15.0\r\n\r\n Methods\r\n -------\r\n ``pdf(x, alpha)``\r\n Probability density function.\r\n ``logpdf(x, alpha)``\r\n Log of the probability density function.\r\n ``rvs(alpha, size=1, random_state=None)``\r\n Draw random samples from a Dirichlet distribution.\r\n ``mean(alpha)``\r\n The mean of the Dirichlet distribution\r\n ``var(alpha)``\r\n The variance of the Dirichlet distribution\r\n ``entropy(alpha)``\r\n Compute the differential entropy of the multivariate normal.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_dirichlet_doc_default_callparams)s\r\n %(_doc_random_state)s\r\n\r\n Alternatively, the object may be called (as a function) to fix\r\n concentration parameters, returning a \"frozen\" Dirichlet\r\n random variable:\r\n\r\n rv = dirichlet(alpha)\r\n - Frozen object with the same methods but holding the given\r\n concentration parameters fixed.\r\n\r\n Notes\r\n -----\r\n Each :math:`\\alpha` entry must be positive. The distribution has only\r\n support on the simplex defined by\r\n\r\n .. math::\r\n \\sum_{i=1}^{K} x_i \\le 1\r\n\r\n\r\n The probability density function for `dirichlet` is\r\n\r\n .. math::\r\n\r\n f(x) = \\frac{1}{\\mathrm{B}(\\boldsymbol\\alpha)} \\prod_{i=1}^K x_i^{\\alpha_i - 1}\r\n\r\n where\r\n\r\n .. math::\r\n\r\n \\mathrm{B}(\\boldsymbol\\alpha) = \\frac{\\prod_{i=1}^K \\Gamma(\\alpha_i)}\r\n {\\Gamma\\bigl(\\sum_{i=1}^K \\alpha_i\\bigr)}\r\n\r\n and :math:`\\boldsymbol\\alpha=(\\alpha_1,\\ldots,\\alpha_K)`, the\r\n concentration parameters and :math:`K` is the dimension of the space\r\n where :math:`x` takes values.\r\n\r\n Note that the dirichlet interface is somewhat inconsistent.\r\n The array returned by the rvs function is transposed\r\n with respect to the format expected by the pdf and logpdf.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(dirichlet_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)\r\n\r\n def __call__(self, alpha, seed=None):\r\n return dirichlet_frozen(alpha, seed=seed)\r\n\r\n def _logpdf(self, x, alpha):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n x : ndarray\r\n Points at which to evaluate the log of the probability\r\n density function\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'logpdf' instead.\r\n\r\n \"\"\"\r\n lnB = _lnB(alpha)\r\n return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)\r\n\r\n def logpdf(self, x, alpha):\r\n \"\"\"\r\n Log of the Dirichlet probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Log of the probability density function evaluated at `x`.\r\n\r\n \"\"\"\r\n alpha = _dirichlet_check_parameters(alpha)\r\n x = _dirichlet_check_input(alpha, x)\r\n\r\n out = self._logpdf(x, alpha)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x, alpha):\r\n \"\"\"\r\n The Dirichlet probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n The probability density function evaluated at `x`.\r\n\r\n \"\"\"\r\n alpha = _dirichlet_check_parameters(alpha)\r\n x = _dirichlet_check_input(alpha, x)\r\n\r\n out = np.exp(self._logpdf(x, alpha))\r\n return _squeeze_output(out)\r\n\r\n def mean(self, alpha):\r\n \"\"\"\r\n Compute the mean of the dirichlet distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n mu : scalar\r\n Mean of the Dirichlet distribution\r\n\r\n \"\"\"\r\n alpha = _dirichlet_check_parameters(alpha)\r\n\r\n out = alpha / (np.sum(alpha))\r\n return _squeeze_output(out)\r\n\r\n def var(self, alpha):\r\n \"\"\"\r\n Compute the variance of the dirichlet distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n v : scalar\r\n Variance of the Dirichlet distribution\r\n\r\n \"\"\"\r\n\r\n alpha = _dirichlet_check_parameters(alpha)\r\n\r\n alpha0 = np.sum(alpha)\r\n out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))\r\n return out\r\n\r\n def entropy(self, alpha):\r\n \"\"\"\r\n Compute the differential entropy of the dirichlet distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_dirichlet_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n h : scalar\r\n Entropy of the Dirichlet distribution\r\n\r\n \"\"\"\r\n\r\n alpha = _dirichlet_check_parameters(alpha)\r\n\r\n alpha0 = np.sum(alpha)\r\n lnB = _lnB(alpha)\r\n K = alpha.shape[0]\r\n\r\n out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(\r\n (alpha - 1) * scipy.special.psi(alpha))\r\n return _squeeze_output(out)\r\n\r\n def rvs(self, alpha, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from a Dirichlet distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_dirichlet_doc_default_callparams)s\r\n size : int, optional\r\n Number of samples to draw (default 1).\r\n %(_doc_random_state)s\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random variates of size (`size`, `N`), where `N` is the\r\n dimension of the random variable.\r\n\r\n \"\"\"\r\n alpha = _dirichlet_check_parameters(alpha)\r\n random_state = self._get_random_state(random_state)\r\n return random_state.dirichlet(alpha, size=size)\r\n\r\n\r\ndirichlet = dirichlet_gen()\r\n\r\n\r\nclass dirichlet_frozen(multi_rv_frozen):\r\n def __init__(self, alpha, seed=None):\r\n self.alpha = _dirichlet_check_parameters(alpha)\r\n self._dist = dirichlet_gen(seed)\r\n\r\n def logpdf(self, x):\r\n return self._dist.logpdf(x, self.alpha)\r\n\r\n def pdf(self, x):\r\n return self._dist.pdf(x, self.alpha)\r\n\r\n def mean(self):\r\n return self._dist.mean(self.alpha)\r\n\r\n def var(self):\r\n return self._dist.var(self.alpha)\r\n\r\n def entropy(self):\r\n return self._dist.entropy(self.alpha)\r\n\r\n def rvs(self, size=1, random_state=None):\r\n return self._dist.rvs(self.alpha, size, random_state)\r\n\r\n\r\n# Set frozen generator docstrings from corresponding docstrings in\r\n# multivariate_normal_gen and fill in default strings in class docstrings\r\nfor name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:\r\n method = dirichlet_gen.__dict__[name]\r\n method_frozen = dirichlet_frozen.__dict__[name]\r\n method_frozen.__doc__ = doccer.docformat(\r\n method.__doc__, dirichlet_docdict_noparams)\r\n method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)\r\n\r\n\r\n_wishart_doc_default_callparams = \"\"\"\\\r\ndf : int\r\n Degrees of freedom, must be greater than or equal to dimension of the\r\n scale matrix\r\nscale : array_like\r\n Symmetric positive definite scale matrix of the distribution\r\n\"\"\"\r\n\r\n_wishart_doc_callparams_note = \"\"\r\n\r\n_wishart_doc_frozen_callparams = \"\"\r\n\r\n_wishart_doc_frozen_callparams_note = \\\r\n \"\"\"See class definition for a detailed description of parameters.\"\"\"\r\n\r\nwishart_docdict_params = {\r\n '_doc_default_callparams': _wishart_doc_default_callparams,\r\n '_doc_callparams_note': _wishart_doc_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\nwishart_docdict_noparams = {\r\n '_doc_default_callparams': _wishart_doc_frozen_callparams,\r\n '_doc_callparams_note': _wishart_doc_frozen_callparams_note,\r\n '_doc_random_state': _doc_random_state\r\n}\r\n\r\n\r\nclass wishart_gen(multi_rv_generic):\r\n r\"\"\"\r\n A Wishart random variable.\r\n\r\n The `df` keyword specifies the degrees of freedom. The `scale` keyword\r\n specifies the scale matrix, which must be symmetric and positive definite.\r\n In this context, the scale matrix is often interpreted in terms of a\r\n multivariate normal precision matrix (the inverse of the covariance\r\n matrix).\r\n\r\n Methods\r\n -------\r\n ``pdf(x, df, scale)``\r\n Probability density function.\r\n ``logpdf(x, df, scale)``\r\n Log of the probability density function.\r\n ``rvs(df, scale, size=1, random_state=None)``\r\n Draw random samples from a Wishart distribution.\r\n ``entropy()``\r\n Compute the differential entropy of the Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_doc_default_callparams)s\r\n %(_doc_random_state)s\r\n\r\n Alternatively, the object may be called (as a function) to fix the degrees\r\n of freedom and scale parameters, returning a \"frozen\" Wishart random\r\n variable:\r\n\r\n rv = wishart(df=1, scale=1)\r\n - Frozen object with the same methods but holding the given\r\n degrees of freedom and scale fixed.\r\n\r\n See Also\r\n --------\r\n invwishart, chi2\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n The scale matrix `scale` must be a symmetric positive definite\r\n matrix. Singular matrices, including the symmetric positive semi-definite\r\n case, are not supported.\r\n\r\n The Wishart distribution is often denoted\r\n\r\n .. math::\r\n\r\n W_p(\\nu, \\Sigma)\r\n\r\n where :math:`\\nu` is the degrees of freedom and :math:`\\Sigma` is the\r\n :math:`p \\times p` scale matrix.\r\n\r\n The probability density function for `wishart` has support over positive\r\n definite matrices :math:`S`; if :math:`S \\sim W_p(\\nu, \\Sigma)`, then\r\n its PDF is given by:\r\n\r\n .. math::\r\n\r\n f(S) = \\frac{|S|^{\\frac{\\nu - p - 1}{2}}}{2^{ \\frac{\\nu p}{2} }\r\n |\\Sigma|^\\frac{\\nu}{2} \\Gamma_p \\left ( \\frac{\\nu}{2} \\right )}\r\n \\exp\\left( -tr(\\Sigma^{-1} S) / 2 \\right)\r\n\r\n If :math:`S \\sim W_p(\\nu, \\Sigma)` (Wishart) then\r\n :math:`S^{-1} \\sim W_p^{-1}(\\nu, \\Sigma^{-1})` (inverse Wishart).\r\n\r\n If the scale matrix is 1-dimensional and equal to one, then the Wishart\r\n distribution :math:`W_1(\\nu, 1)` collapses to the :math:`\\chi^2(\\nu)`\r\n distribution.\r\n\r\n .. versionadded:: 0.16.0\r\n\r\n References\r\n ----------\r\n .. [1] M.L. Eaton, \"Multivariate Statistics: A Vector Space Approach\",\r\n Wiley, 1983.\r\n .. [2] W.B. Smith and R.R. Hocking, \"Algorithm AS 53: Wishart Variate\r\n Generator\", Applied Statistics, vol. 21, pp. 341-345, 1972.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> from scipy.stats import wishart, chi2\r\n >>> x = np.linspace(1e-5, 8, 100)\r\n >>> w = wishart.pdf(x, df=3, scale=1); w[:5]\r\n array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])\r\n >>> c = chi2.pdf(x, 3); c[:5]\r\n array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])\r\n >>> plt.plot(x, w)\r\n\r\n The input quantiles can be any shape of array, as long as the last\r\n axis labels the components.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(wishart_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)\r\n\r\n def __call__(self, df=None, scale=None, seed=None):\r\n \"\"\"\r\n Create a frozen Wishart distribution.\r\n\r\n See `wishart_frozen` for more information.\r\n\r\n \"\"\"\r\n return wishart_frozen(df, scale, seed)\r\n\r\n def _process_parameters(self, df, scale):\r\n if scale is None:\r\n scale = 1.0\r\n scale = np.asarray(scale, dtype=float)\r\n\r\n if scale.ndim == 0:\r\n scale = scale[np.newaxis,np.newaxis]\r\n elif scale.ndim == 1:\r\n scale = np.diag(scale)\r\n elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:\r\n raise ValueError(\"Array 'scale' must be square if it is two\"\r\n \" dimensional, but scale.scale = %s.\"\r\n % str(scale.shape))\r\n elif scale.ndim > 2:\r\n raise ValueError(\"Array 'scale' must be at most two-dimensional,\"\r\n \" but scale.ndim = %d\" % scale.ndim)\r\n\r\n dim = scale.shape[0]\r\n\r\n if df is None:\r\n df = dim\r\n elif not np.isscalar(df):\r\n raise ValueError(\"Degrees of freedom must be a scalar.\")\r\n elif df < dim:\r\n raise ValueError(\"Degrees of freedom cannot be less than dimension\"\r\n \" of scale matrix, but df = %d\" % df)\r\n\r\n return dim, df, scale\r\n\r\n def _process_quantiles(self, x, dim):\r\n \"\"\"\r\n Adjust quantiles array so that last axis labels the components of\r\n each data point.\r\n \"\"\"\r\n x = np.asarray(x, dtype=float)\r\n\r\n if x.ndim == 0:\r\n x = x * np.eye(dim)[:, :, np.newaxis]\r\n if x.ndim == 1:\r\n if dim == 1:\r\n x = x[np.newaxis, np.newaxis, :]\r\n else:\r\n x = np.diag(x)[:, :, np.newaxis]\r\n elif x.ndim == 2:\r\n if not x.shape[0] == x.shape[1]:\r\n raise ValueError(\"Quantiles must be square if they are two\"\r\n \" dimensional, but x.shape = %s.\"\r\n % str(x.shape))\r\n x = x[:, :, np.newaxis]\r\n elif x.ndim == 3:\r\n if not x.shape[0] == x.shape[1]:\r\n raise ValueError(\"Quantiles must be square in the first two\"\r\n \" dimensions if they are three dimensional\"\r\n \", but x.shape = %s.\" % str(x.shape))\r\n elif x.ndim > 3:\r\n raise ValueError(\"Quantiles must be at most two-dimensional with\"\r\n \" an additional dimension for multiple\"\r\n \"components, but x.ndim = %d\" % x.ndim)\r\n\r\n # Now we have 3-dim array; should have shape [dim, dim, *]\r\n if not x.shape[0:2] == (dim, dim):\r\n raise ValueError('Quantiles have incompatible dimensions: should'\r\n ' be %s, got %s.' % ((dim, dim), x.shape[0:2]))\r\n\r\n return x\r\n\r\n def _process_size(self, size):\r\n size = np.asarray(size)\r\n\r\n if size.ndim == 0:\r\n size = size[np.newaxis]\r\n elif size.ndim > 1:\r\n raise ValueError('Size must be an integer or tuple of integers;'\r\n ' thus must have dimension <= 1.'\r\n ' Got size.ndim = %s' % str(tuple(size)))\r\n n = size.prod()\r\n shape = tuple(size)\r\n\r\n return n, shape\r\n\r\n def _logpdf(self, x, dim, df, scale, log_det_scale, C):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n x : ndarray\r\n Points at which to evaluate the log of the probability\r\n density function\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n scale : ndarray\r\n Scale matrix\r\n log_det_scale : float\r\n Logarithm of the determinant of the scale matrix\r\n C : ndarray\r\n Cholesky factorization of the scale matrix, lower triagular.\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'logpdf' instead.\r\n\r\n \"\"\"\r\n # log determinant of x\r\n # Note: x has components along the last axis, so that x.T has\r\n # components alone the 0-th axis. Then since det(A) = det(A'), this\r\n # gives us a 1-dim vector of determinants\r\n\r\n # Retrieve tr(scale^{-1} x)\r\n log_det_x = np.zeros(x.shape[-1])\r\n scale_inv_x = np.zeros(x.shape)\r\n tr_scale_inv_x = np.zeros(x.shape[-1])\r\n for i in range(x.shape[-1]):\r\n _, log_det_x[i] = self._cholesky_logdet(x[:,:,i])\r\n scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])\r\n tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()\r\n\r\n # Log PDF\r\n out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -\r\n (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +\r\n multigammaln(0.5*df, dim)))\r\n\r\n return out\r\n\r\n def logpdf(self, x, df, scale):\r\n \"\"\"\r\n Log of the Wishart probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n Each quantile must be a symmetric positive definite matrix.\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Log of the probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n x = self._process_quantiles(x, dim)\r\n\r\n # Cholesky decomposition of scale, get log(det(scale))\r\n C, log_det_scale = self._cholesky_logdet(scale)\r\n\r\n out = self._logpdf(x, dim, df, scale, log_det_scale, C)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x, df, scale):\r\n \"\"\"\r\n Wishart probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n Each quantile must be a symmetric positive definite matrix.\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n return np.exp(self.logpdf(x, df, scale))\r\n\r\n def _mean(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'mean' instead.\r\n\r\n \"\"\"\r\n return df * scale\r\n\r\n def mean(self, df, scale):\r\n \"\"\"\r\n Mean of the Wishart distribution\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n mean : float\r\n The mean of the distribution\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._mean(dim, df, scale)\r\n return _squeeze_output(out)\r\n\r\n def _mode(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'mode' instead.\r\n\r\n \"\"\"\r\n if df >= dim + 1:\r\n out = (df-dim-1) * scale\r\n else:\r\n out = None\r\n return out\r\n\r\n def mode(self, df, scale):\r\n \"\"\"\r\n Mode of the Wishart distribution\r\n\r\n Only valid if the degrees of freedom are greater than the dimension of\r\n the scale matrix.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n mode : float or None\r\n The Mode of the distribution\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._mode(dim, df, scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def _var(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'var' instead.\r\n\r\n \"\"\"\r\n var = scale**2\r\n diag = scale.diagonal() # 1 x dim array\r\n var += np.outer(diag, diag)\r\n var *= df\r\n return var\r\n\r\n def var(self, df, scale):\r\n \"\"\"\r\n Variance of the Wishart distribution\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n var : float\r\n The variance of the distribution\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._var(dim, df, scale)\r\n return _squeeze_output(out)\r\n\r\n def _standard_rvs(self, n, shape, dim, df, random_state):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n n : integer\r\n Number of variates to generate\r\n shape : iterable\r\n Shape of the variates to generate\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n random_state : np.random.RandomState instance\r\n RandomState used for drawing the random variates.\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'rvs' instead.\r\n\r\n \"\"\"\r\n # Random normal variates for off-diagonal elements\r\n n_tril = dim * (dim-1) // 2\r\n covariances = random_state.normal(\r\n size=n*n_tril).reshape(shape+(n_tril,))\r\n\r\n # Random chi-square variates for diagonal elements\r\n variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5\r\n for i in range(dim)]].reshape((dim,) + shape[::-1]).T\r\n\r\n # Create the A matri(ces) - lower triangular\r\n A = np.zeros(shape + (dim, dim))\r\n\r\n # Input the covariances\r\n size_idx = tuple([slice(None,None,None)]*len(shape))\r\n tril_idx = np.tril_indices(dim, k=-1)\r\n A[size_idx + tril_idx] = covariances\r\n\r\n # Input the variances\r\n diag_idx = np.diag_indices(dim)\r\n A[size_idx + diag_idx] = variances\r\n\r\n return A\r\n\r\n def _rvs(self, n, shape, dim, df, C, random_state):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n n : integer\r\n Number of variates to generate\r\n shape : iterable\r\n Shape of the variates to generate\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n scale : ndarray\r\n Scale matrix\r\n C : ndarray\r\n Cholesky factorization of the scale matrix, lower triangular.\r\n %(_doc_random_state)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'rvs' instead.\r\n\r\n \"\"\"\r\n random_state = self._get_random_state(random_state)\r\n # Calculate the matrices A, which are actually lower triangular\r\n # Cholesky factorizations of a matrix B such that B ~ W(df, I)\r\n A = self._standard_rvs(n, shape, dim, df, random_state)\r\n\r\n # Calculate SA = C A A' C', where SA ~ W(df, scale)\r\n # Note: this is the product of a (lower) (lower) (lower)' (lower)'\r\n # or, denoting B = AA', it is C B C' where C is the lower\r\n # triangular Cholesky factorization of the scale matrix.\r\n # this appears to conflict with the instructions in [1]_, which\r\n # suggest that it should be D' B D where D is the lower\r\n # triangular factorization of the scale matrix. However, it is\r\n # meant to refer to the Bartlett (1933) representation of a\r\n # Wishart random variate as L A A' L' where L is lower triangular\r\n # so it appears that understanding D' to be upper triangular\r\n # is either a typo in or misreading of [1]_.\r\n for index in np.ndindex(shape):\r\n CA = np.dot(C, A[index])\r\n A[index] = np.dot(CA, CA.T)\r\n\r\n return A\r\n\r\n def rvs(self, df, scale, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from a Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n size : integer or iterable of integers, optional\r\n Number of samples to draw (default 1).\r\n %(_doc_random_state)s\r\n\r\n Returns\r\n -------\r\n rvs : ndarray\r\n Random variates of shape (`size`) + (`dim`, `dim), where `dim` is\r\n the dimension of the scale matrix.\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n n, shape = self._process_size(size)\r\n dim, df, scale = self._process_parameters(df, scale)\r\n\r\n # Cholesky decomposition of scale\r\n C = scipy.linalg.cholesky(scale, lower=True)\r\n\r\n out = self._rvs(n, shape, dim, df, C, random_state)\r\n\r\n return _squeeze_output(out)\r\n\r\n def _entropy(self, dim, df, log_det_scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n log_det_scale : float\r\n Logarithm of the determinant of the scale matrix\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'entropy' instead.\r\n\r\n \"\"\"\r\n return (\r\n 0.5 * (dim+1) * log_det_scale +\r\n 0.5 * dim * (dim+1) * _LOG_2 +\r\n multigammaln(0.5*df, dim) -\r\n 0.5 * (df - dim - 1) * np.sum(\r\n [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]\r\n ) +\r\n 0.5 * df * dim\r\n )\r\n\r\n def entropy(self, df, scale):\r\n \"\"\"\r\n Compute the differential entropy of the Wishart.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n h : scalar\r\n Entropy of the Wishart distribution\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n _, log_det_scale = self._cholesky_logdet(scale)\r\n return self._entropy(dim, df, log_det_scale)\r\n\r\n def _cholesky_logdet(self, scale):\r\n \"\"\"\r\n Compute Cholesky decomposition and determine (log(det(scale)).\r\n\r\n Parameters\r\n ----------\r\n scale : ndarray\r\n Scale matrix.\r\n\r\n Returns\r\n -------\r\n c_decomp : ndarray\r\n The Cholesky decomposition of `scale`.\r\n logdet : scalar\r\n The log of the determinant of `scale`.\r\n\r\n Notes\r\n -----\r\n This computation of ``logdet`` is equivalent to\r\n ``np.linalg.slogdet(scale)``. It is ~2x faster though.\r\n\r\n \"\"\"\r\n c_decomp = scipy.linalg.cholesky(scale, lower=True)\r\n logdet = 2 * np.sum(np.log(c_decomp.diagonal()))\r\n return c_decomp, logdet\r\nwishart = wishart_gen()\r\n\r\n\r\nclass wishart_frozen(multi_rv_frozen):\r\n \"\"\"\r\n Create a frozen Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n df : array_like\r\n Degrees of freedom of the distribution\r\n scale : array_like\r\n Scale matrix of the distribution\r\n seed : None or int or np.random.RandomState instance, optional\r\n This parameter defines the RandomState object to use for drawing\r\n random variates.\r\n If None (or np.random), the global np.random state is used.\r\n If integer, it is used to seed the local RandomState instance\r\n Default is None.\r\n\r\n \"\"\"\r\n def __init__(self, df, scale, seed=None):\r\n self._dist = wishart_gen(seed)\r\n self.dim, self.df, self.scale = self._dist._process_parameters(\r\n df, scale)\r\n self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)\r\n\r\n def logpdf(self, x):\r\n x = self._dist._process_quantiles(x, self.dim)\r\n\r\n out = self._dist._logpdf(x, self.dim, self.df, self.scale,\r\n self.log_det_scale, self.C)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x):\r\n return np.exp(self.logpdf(x))\r\n\r\n def mean(self):\r\n out = self._dist._mean(self.dim, self.df, self.scale)\r\n return _squeeze_output(out)\r\n\r\n def mode(self):\r\n out = self._dist._mode(self.dim, self.df, self.scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def var(self):\r\n out = self._dist._var(self.dim, self.df, self.scale)\r\n return _squeeze_output(out)\r\n\r\n def rvs(self, size=1, random_state=None):\r\n n, shape = self._dist._process_size(size)\r\n out = self._dist._rvs(n, shape, self.dim, self.df,\r\n self.C, random_state)\r\n return _squeeze_output(out)\r\n\r\n def entropy(self):\r\n return self._dist._entropy(self.dim, self.df, self.log_det_scale)\r\n\r\n# Set frozen generator docstrings from corresponding docstrings in\r\n# Wishart and fill in default strings in class docstrings\r\nfor name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:\r\n method = wishart_gen.__dict__[name]\r\n method_frozen = wishart_frozen.__dict__[name]\r\n method_frozen.__doc__ = doccer.docformat(\r\n method.__doc__, wishart_docdict_noparams)\r\n method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)\r\n\r\n\r\nfrom numpy import asarray_chkfinite, asarray\r\nfrom scipy.linalg.misc import LinAlgError\r\nfrom scipy.linalg.lapack import get_lapack_funcs\r\ndef _cho_inv_batch(a, check_finite=True):\r\n \"\"\"\r\n Invert the matrices a_i, using a Cholesky factorization of A, where\r\n a_i resides in the last two dimensions of a and the other indices describe\r\n the index i.\r\n\r\n Overwrites the data in a.\r\n\r\n Parameters\r\n ----------\r\n a : array\r\n Array of matrices to invert, where the matrices themselves are stored\r\n in the last two dimensions.\r\n check_finite : bool, optional\r\n Whether to check that the input matrices contain only finite numbers.\r\n Disabling may give a performance gain, but may result in problems\r\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\r\n\r\n Returns\r\n -------\r\n x : array\r\n Array of inverses of the matrices ``a_i``.\r\n\r\n See also\r\n --------\r\n scipy.linalg.cholesky : Cholesky factorization of a matrix\r\n\r\n \"\"\"\r\n if check_finite:\r\n a1 = asarray_chkfinite(a)\r\n else:\r\n a1 = asarray(a)\r\n if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:\r\n raise ValueError('expected square matrix in last two dimensions')\r\n\r\n potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))\r\n\r\n tril_idx = np.tril_indices(a.shape[-2], k=-1)\r\n triu_idx = np.triu_indices(a.shape[-2], k=1)\r\n for index in np.ndindex(a1.shape[:-2]):\r\n\r\n # Cholesky decomposition\r\n a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,\r\n clean=False)\r\n if info > 0:\r\n raise LinAlgError(\"%d-th leading minor not positive definite\"\r\n % info)\r\n if info < 0:\r\n raise ValueError('illegal value in %d-th argument of internal'\r\n ' potrf' % -info)\r\n # Inversion\r\n a1[index], info = potri(a1[index], lower=True, overwrite_c=False)\r\n if info > 0:\r\n raise LinAlgError(\"the inverse could not be computed\")\r\n if info < 0:\r\n raise ValueError('illegal value in %d-th argument of internal'\r\n ' potrf' % -info)\r\n\r\n # Make symmetric (dpotri only fills in the lower triangle)\r\n a1[index][triu_idx] = a1[index][tril_idx]\r\n\r\n return a1\r\n\r\n\r\nclass invwishart_gen(wishart_gen):\r\n r\"\"\"\r\n An inverse Wishart random variable.\r\n\r\n The `df` keyword specifies the degrees of freedom. The `scale` keyword\r\n specifies the scale matrix, which must be symmetric and positive definite.\r\n In this context, the scale matrix is often interpreted in terms of a\r\n multivariate normal covariance matrix.\r\n\r\n Methods\r\n -------\r\n ``pdf(x, df, scale)``\r\n Probability density function.\r\n ``logpdf(x, df, scale)``\r\n Log of the probability density function.\r\n ``rvs(df, scale, size=1, random_state=None)``\r\n Draw random samples from an inverse Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n %(_doc_default_callparams)s\r\n %(_doc_random_state)s\r\n\r\n Alternatively, the object may be called (as a function) to fix the degrees\r\n of freedom and scale parameters, returning a \"frozen\" inverse Wishart\r\n random variable:\r\n\r\n rv = invwishart(df=1, scale=1)\r\n - Frozen object with the same methods but holding the given\r\n degrees of freedom and scale fixed.\r\n\r\n See Also\r\n --------\r\n wishart\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n The scale matrix `scale` must be a symmetric positive definite\r\n matrix. Singular matrices, including the symmetric positive semi-definite\r\n case, are not supported.\r\n\r\n The inverse Wishart distribution is often denoted\r\n\r\n .. math::\r\n\r\n W_p^{-1}(\\nu, \\Psi)\r\n\r\n where :math:`\\nu` is the degrees of freedom and :math:`\\Psi` is the\r\n :math:`p \\times p` scale matrix.\r\n\r\n The probability density function for `invwishart` has support over positive\r\n definite matrices :math:`S`; if :math:`S \\sim W^{-1}_p(\\nu, \\Sigma)`,\r\n then its PDF is given by:\r\n\r\n .. math::\r\n\r\n f(S) = \\frac{|\\Sigma|^\\frac{\\nu}{2}}{2^{ \\frac{\\nu p}{2} }\r\n |S|^{\\frac{\\nu + p + 1}{2}} \\Gamma_p \\left(\\frac{\\nu}{2} \\right)}\r\n \\exp\\left( -tr(\\Sigma S^{-1}) / 2 \\right)\r\n\r\n If :math:`S \\sim W_p^{-1}(\\nu, \\Psi)` (inverse Wishart) then\r\n :math:`S^{-1} \\sim W_p(\\nu, \\Psi^{-1})` (Wishart).\r\n\r\n If the scale matrix is 1-dimensional and equal to one, then the inverse\r\n Wishart distribution :math:`W_1(\\nu, 1)` collapses to the\r\n inverse Gamma distribution with parameters shape = :math:`\\frac{\\nu}{2}`\r\n and scale = :math:`\\frac{1}{2}`.\r\n\r\n .. versionadded:: 0.16.0\r\n\r\n References\r\n ----------\r\n .. [1] M.L. Eaton, \"Multivariate Statistics: A Vector Space Approach\",\r\n Wiley, 1983.\r\n .. [2] M.C. Jones, \"Generating Inverse Wishart Matrices\", Communications in\r\n Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> from scipy.stats import invwishart, invgamma\r\n >>> x = np.linspace(0.01, 1, 100)\r\n >>> iw = invwishart.pdf(x, df=6, scale=1)\r\n >>> iw[:3]\r\n array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])\r\n >>> ig = invgamma.pdf(x, 6/2., scale=1./2)\r\n >>> ig[:3]\r\n array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])\r\n >>> plt.plot(x, iw)\r\n\r\n The input quantiles can be any shape of array, as long as the last\r\n axis labels the components.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(invwishart_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)\r\n\r\n def __call__(self, df=None, scale=None, seed=None):\r\n \"\"\"\r\n Create a frozen inverse Wishart distribution.\r\n\r\n See `invwishart_frozen` for more information.\r\n\r\n \"\"\"\r\n return invwishart_frozen(df, scale, seed)\r\n\r\n def _logpdf(self, x, dim, df, scale, log_det_scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n x : ndarray\r\n Points at which to evaluate the log of the probability\r\n density function.\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n scale : ndarray\r\n Scale matrix\r\n log_det_scale : float\r\n Logarithm of the determinant of the scale matrix\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'logpdf' instead.\r\n\r\n \"\"\"\r\n log_det_x = np.zeros(x.shape[-1])\r\n #scale_x_inv = np.zeros(x.shape)\r\n x_inv = np.copy(x).T\r\n if dim > 1:\r\n _cho_inv_batch(x_inv) # works in-place\r\n else:\r\n x_inv = 1./x_inv\r\n tr_scale_x_inv = np.zeros(x.shape[-1])\r\n\r\n for i in range(x.shape[-1]):\r\n C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)\r\n\r\n log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))\r\n\r\n #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T\r\n tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()\r\n\r\n # Log PDF\r\n out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -\r\n (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -\r\n multigammaln(0.5*df, dim))\r\n\r\n return out\r\n\r\n def logpdf(self, x, df, scale):\r\n \"\"\"\r\n Log of the inverse Wishart probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n Each quantile must be a symmetric positive definite matrix.\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Log of the probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n x = self._process_quantiles(x, dim)\r\n _, log_det_scale = self._cholesky_logdet(scale)\r\n out = self._logpdf(x, dim, df, scale, log_det_scale)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x, df, scale):\r\n \"\"\"\r\n Inverse Wishart probability density function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Quantiles, with the last axis of `x` denoting the components.\r\n Each quantile must be a symmetric positive definite matrix.\r\n\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n pdf : ndarray\r\n Probability density function evaluated at `x`\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n return np.exp(self.logpdf(x, df, scale))\r\n\r\n def _mean(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'mean' instead.\r\n\r\n \"\"\"\r\n if df > dim + 1:\r\n out = scale / (df - dim - 1)\r\n else:\r\n out = None\r\n return out\r\n\r\n def mean(self, df, scale):\r\n \"\"\"\r\n Mean of the inverse Wishart distribution\r\n\r\n Only valid if the degrees of freedom are greater than the dimension of\r\n the scale matrix plus one.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n mean : float or None\r\n The mean of the distribution\r\n\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._mean(dim, df, scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def _mode(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'mode' instead.\r\n\r\n \"\"\"\r\n return scale / (df + dim + 1)\r\n\r\n def mode(self, df, scale):\r\n \"\"\"\r\n Mode of the inverse Wishart distribution\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n mode : float\r\n The Mode of the distribution\r\n\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._mode(dim, df, scale)\r\n return _squeeze_output(out)\r\n\r\n def _var(self, dim, df, scale):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dim : int\r\n Dimension of the scale matrix\r\n %(_doc_default_callparams)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'var' instead.\r\n\r\n \"\"\"\r\n if df > dim + 3:\r\n var = (df - dim + 1) * scale**2\r\n diag = scale.diagonal() # 1 x dim array\r\n var += (df - dim - 1) * np.outer(diag, diag)\r\n var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)\r\n else:\r\n var = None\r\n return var\r\n\r\n def var(self, df, scale):\r\n \"\"\"\r\n Variance of the inverse Wishart distribution\r\n\r\n Only valid if the degrees of freedom are greater than the dimension of\r\n the scale matrix plus three.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n\r\n Returns\r\n -------\r\n var : float\r\n The variance of the distribution\r\n \"\"\"\r\n dim, df, scale = self._process_parameters(df, scale)\r\n out = self._var(dim, df, scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def _rvs(self, n, shape, dim, df, C, random_state):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n n : integer\r\n Number of variates to generate\r\n shape : iterable\r\n Shape of the variates to generate\r\n dim : int\r\n Dimension of the scale matrix\r\n df : int\r\n Degrees of freedom\r\n C : ndarray\r\n Cholesky factorization of the scale matrix, lower triagular.\r\n %(_doc_random_state)s\r\n\r\n Notes\r\n -----\r\n As this function does no argument checking, it should not be\r\n called directly; use 'rvs' instead.\r\n\r\n \"\"\"\r\n random_state = self._get_random_state(random_state)\r\n # Get random draws A such that A ~ W(df, I)\r\n A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,\r\n df, random_state)\r\n\r\n # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)\r\n eye = np.eye(dim)\r\n trtrs = get_lapack_funcs(('trtrs'), (A,))\r\n\r\n for index in np.ndindex(A.shape[:-2]):\r\n # Calculate CA\r\n CA = np.dot(C, A[index])\r\n # Get (C A)^{-1} via triangular solver\r\n if dim > 1:\r\n CA, info = trtrs(CA, eye, lower=True)\r\n if info > 0:\r\n raise LinAlgError(\"Singular matrix.\")\r\n if info < 0:\r\n raise ValueError('Illegal value in %d-th argument of'\r\n ' internal trtrs' % -info)\r\n else:\r\n CA = 1. / CA\r\n # Get SA\r\n A[index] = np.dot(CA.T, CA)\r\n\r\n return A\r\n\r\n def rvs(self, df, scale, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from an inverse Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n %(_doc_default_callparams)s\r\n size : integer or iterable of integers, optional\r\n Number of samples to draw (default 1).\r\n %(_doc_random_state)s\r\n\r\n Returns\r\n -------\r\n rvs : ndarray\r\n Random variates of shape (`size`) + (`dim`, `dim), where `dim` is\r\n the dimension of the scale matrix.\r\n\r\n Notes\r\n -----\r\n %(_doc_callparams_note)s\r\n\r\n \"\"\"\r\n n, shape = self._process_size(size)\r\n dim, df, scale = self._process_parameters(df, scale)\r\n\r\n # Invert the scale\r\n eye = np.eye(dim)\r\n L, lower = scipy.linalg.cho_factor(scale, lower=True)\r\n inv_scale = scipy.linalg.cho_solve((L, lower), eye)\r\n # Cholesky decomposition of inverted scale\r\n C = scipy.linalg.cholesky(inv_scale, lower=True)\r\n\r\n out = self._rvs(n, shape, dim, df, C, random_state)\r\n\r\n return _squeeze_output(out)\r\n\r\n def entropy(self):\r\n # Need to find reference for inverse Wishart entropy\r\n raise AttributeError\r\n\r\ninvwishart = invwishart_gen()\r\n\r\nclass invwishart_frozen(multi_rv_frozen):\r\n def __init__(self, df, scale, seed=None):\r\n \"\"\"\r\n Create a frozen inverse Wishart distribution.\r\n\r\n Parameters\r\n ----------\r\n df : array_like\r\n Degrees of freedom of the distribution\r\n scale : array_like\r\n Scale matrix of the distribution\r\n seed : None or int or np.random.RandomState instance, optional\r\n This parameter defines the RandomState object to use for drawing\r\n random variates.\r\n If None (or np.random), the global np.random state is used.\r\n If integer, it is used to seed the local RandomState instance\r\n Default is None.\r\n\r\n \"\"\"\r\n self._dist = invwishart_gen(seed)\r\n self.dim, self.df, self.scale = self._dist._process_parameters(\r\n df, scale\r\n )\r\n\r\n # Get the determinant via Cholesky factorization\r\n C, lower = scipy.linalg.cho_factor(self.scale, lower=True)\r\n self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))\r\n\r\n # Get the inverse using the Cholesky factorization\r\n eye = np.eye(self.dim)\r\n self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)\r\n\r\n # Get the Cholesky factorization of the inverse scale\r\n self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)\r\n\r\n def logpdf(self, x):\r\n x = self._dist._process_quantiles(x, self.dim)\r\n out = self._dist._logpdf(x, self.dim, self.df, self.scale,\r\n self.log_det_scale)\r\n return _squeeze_output(out)\r\n\r\n def pdf(self, x):\r\n return np.exp(self.logpdf(x))\r\n\r\n def mean(self):\r\n out = self._dist._mean(self.dim, self.df, self.scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def mode(self):\r\n out = self._dist._mode(self.dim, self.df, self.scale)\r\n return _squeeze_output(out)\r\n\r\n def var(self):\r\n out = self._dist._var(self.dim, self.df, self.scale)\r\n return _squeeze_output(out) if out is not None else out\r\n\r\n def rvs(self, size=1, random_state=None):\r\n n, shape = self._dist._process_size(size)\r\n\r\n out = self._dist._rvs(n, shape, self.dim, self.df,\r\n self.C, random_state)\r\n\r\n return _squeeze_output(out)\r\n\r\n def entropy(self):\r\n # Need to find reference for inverse Wishart entropy\r\n raise AttributeError\r\n\r\n# Set frozen generator docstrings from corresponding docstrings in\r\n# inverse Wishart and fill in default strings in class docstrings\r\nfor name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:\r\n method = invwishart_gen.__dict__[name]\r\n method_frozen = wishart_frozen.__dict__[name]\r\n method_frozen.__doc__ = doccer.docformat(\r\n method.__doc__, wishart_docdict_noparams)\r\n method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)\r\n\r\nclass special_ortho_group_gen(multi_rv_generic):\r\n r\"\"\"\r\n A matrix-valued SO(N) random variable.\r\n\r\n Return a random rotation matrix, drawn from the Haar distribution\r\n (the only uniform distribution on SO(n)).\r\n\r\n The `dim` keyword specifies the dimension N.\r\n\r\n Methods\r\n -------\r\n ``rvs(dim=None, size=1, random_state=None)``\r\n Draw random samples from SO(N).\r\n\r\n Parameters\r\n ----------\r\n dim : scalar\r\n Dimension of matrices\r\n\r\n Notes\r\n ----------\r\n This class is wrapping the random_rot code from the MDP Toolkit,\r\n https://github.com/mdp-toolkit/mdp-toolkit\r\n\r\n Return a random rotation matrix, drawn from the Haar distribution\r\n (the only uniform distribution on SO(n)).\r\n The algorithm is described in the paper\r\n Stewart, G.W., \"The efficient generation of random orthogonal\r\n matrices with an application to condition estimators\", SIAM Journal\r\n on Numerical Analysis, 17(3), pp. 403-409, 1980.\r\n For more information see\r\n http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization\r\n\r\n See also the similar `ortho_group`.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.stats import special_ortho_group\r\n >>> x = special_ortho_group.rvs(3)\r\n\r\n >>> np.dot(x, x.T)\r\n array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],\r\n [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],\r\n [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])\r\n\r\n >>> import scipy.linalg\r\n >>> scipy.linalg.det(x)\r\n 1.0\r\n\r\n This generates one random matrix from SO(3). It is orthogonal and\r\n has a determinant of 1.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(special_ortho_group_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__)\r\n\r\n def __call__(self, dim=None, seed=None):\r\n \"\"\"\r\n Create a frozen SO(N) distribution.\r\n\r\n See `special_ortho_group_frozen` for more information.\r\n\r\n \"\"\"\r\n return special_ortho_group_frozen(dim, seed=seed)\r\n\r\n def _process_parameters(self, dim):\r\n \"\"\"\r\n Dimension N must be specified; it cannot be inferred.\r\n \"\"\"\r\n\r\n if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):\r\n raise ValueError(\"\"\"Dimension of rotation must be specified,\r\n and must be a scalar greater than 1.\"\"\")\r\n\r\n return dim\r\n\r\n def rvs(self, dim, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from SO(N).\r\n\r\n Parameters\r\n ----------\r\n dim : integer\r\n Dimension of rotation space (N).\r\n size : integer, optional\r\n Number of samples to draw (default 1).\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random size N-dimensional matrices, dimension (size, dim, dim)\r\n\r\n \"\"\"\r\n size = int(size)\r\n if size > 1:\r\n return np.array([self.rvs(dim, size=1, random_state=random_state)\r\n for i in range(size)])\r\n\r\n dim = self._process_parameters(dim)\r\n\r\n random_state = self._get_random_state(random_state)\r\n\r\n H = np.eye(dim)\r\n D = np.ones((dim,))\r\n for n in range(1, dim):\r\n x = random_state.normal(size=(dim-n+1,))\r\n\r\n D[n-1] = np.sign(x[0])\r\n x[0] -= D[n-1]*np.sqrt((x*x).sum())\r\n # Householder transformation\r\n Hx = (np.eye(dim-n+1)\r\n - 2.*np.outer(x, x)/(x*x).sum())\r\n mat = np.eye(dim)\r\n mat[n-1:, n-1:] = Hx\r\n H = np.dot(H, mat)\r\n # Fix the last sign such that the determinant is 1\r\n D[-1] = (-1)**(1-(dim % 2))*D.prod()\r\n # Equivalent to np.dot(np.diag(D), H) but faster, apparently\r\n H = (D*H.T).T\r\n return H\r\n\r\nspecial_ortho_group = special_ortho_group_gen()\r\n\r\nclass special_ortho_group_frozen(multi_rv_frozen):\r\n def __init__(self, dim=None, seed=None):\r\n \"\"\"\r\n Create a frozen SO(N) distribution.\r\n\r\n Parameters\r\n ----------\r\n dim : scalar\r\n Dimension of matrices\r\n seed : None or int or np.random.RandomState instance, optional\r\n This parameter defines the RandomState object to use for drawing\r\n random variates.\r\n If None (or np.random), the global np.random state is used.\r\n If integer, it is used to seed the local RandomState instance\r\n Default is None.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.stats import special_ortho_group\r\n >>> g = special_ortho_group(5)\r\n >>> x = g.rvs()\r\n\r\n \"\"\"\r\n self._dist = special_ortho_group_gen(seed)\r\n self.dim = self._dist._process_parameters(dim)\r\n\r\n def rvs(self, size=1, random_state=None):\r\n return self._dist.rvs(self.dim, size, random_state)\r\n\r\nclass ortho_group_gen(multi_rv_generic):\r\n r\"\"\"\r\n A matrix-valued O(N) random variable.\r\n\r\n Return a random orthogonal matrix, drawn from the O(N) Haar\r\n distribution (the only uniform distribution on O(N)).\r\n\r\n The `dim` keyword specifies the dimension N.\r\n\r\n Methods\r\n -------\r\n ``rvs(dim=None, size=1, random_state=None)``\r\n Draw random samples from O(N).\r\n\r\n Parameters\r\n ----------\r\n dim : scalar\r\n Dimension of matrices\r\n\r\n Notes\r\n ----------\r\n This class is closely related to `special_ortho_group`.\r\n\r\n Some care is taken to avoid numerical error, as per the paper by Mezzadri.\r\n\r\n References\r\n ----------\r\n .. [1] F. Mezzadri, \"How to generate random matrices from the classical\r\n compact groups\", arXiv:math-ph/0609050v2.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.stats import ortho_group\r\n >>> x = ortho_group.rvs(3)\r\n\r\n >>> np.dot(x, x.T)\r\n array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],\r\n [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],\r\n [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])\r\n\r\n >>> import scipy.linalg\r\n >>> np.fabs(scipy.linalg.det(x))\r\n 1.0\r\n\r\n This generates one random matrix from O(3). It is orthogonal and\r\n has a determinant of +1 or -1.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(ortho_group_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__)\r\n\r\n def _process_parameters(self, dim):\r\n \"\"\"\r\n Dimension N must be specified; it cannot be inferred.\r\n \"\"\"\r\n\r\n if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):\r\n raise ValueError(\"Dimension of rotation must be specified,\"\r\n \"and must be a scalar greater than 1.\")\r\n\r\n return dim\r\n\r\n def rvs(self, dim, size=1, random_state=None):\r\n \"\"\"\r\n Draw random samples from O(N).\r\n\r\n Parameters\r\n ----------\r\n dim : integer\r\n Dimension of rotation space (N).\r\n size : integer, optional\r\n Number of samples to draw (default 1).\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random size N-dimensional matrices, dimension (size, dim, dim)\r\n\r\n \"\"\"\r\n size = int(size)\r\n if size > 1:\r\n return np.array([self.rvs(dim, size=1, random_state=random_state)\r\n for i in range(size)])\r\n\r\n dim = self._process_parameters(dim)\r\n\r\n random_state = self._get_random_state(random_state)\r\n\r\n H = np.eye(dim)\r\n for n in range(1, dim):\r\n x = random_state.normal(size=(dim-n+1,))\r\n # random sign, 50/50, but chosen carefully to avoid roundoff error\r\n D = np.sign(x[0])\r\n x[0] += D*np.sqrt((x*x).sum())\r\n # Householder transformation\r\n Hx = -D*(np.eye(dim-n+1)\r\n - 2.*np.outer(x, x)/(x*x).sum())\r\n mat = np.eye(dim)\r\n mat[n-1:, n-1:] = Hx\r\n H = np.dot(H, mat)\r\n return H\r\n\r\northo_group = ortho_group_gen()\r\n\r\nclass random_correlation_gen(multi_rv_generic):\r\n r\"\"\"\r\n A random correlation matrix.\r\n\r\n Return a random correlation matrix, given a vector of eigenvalues.\r\n\r\n The `eigs` keyword specifies the eigenvalues of the correlation matrix,\r\n and implies the dimension.\r\n\r\n Methods\r\n -------\r\n ``rvs(eigs=None, random_state=None)``\r\n Draw random correlation matrices, all with eigenvalues eigs.\r\n\r\n Parameters\r\n ----------\r\n eigs : 1d ndarray\r\n Eigenvalues of correlation matrix.\r\n\r\n Notes\r\n ----------\r\n\r\n Generates a random correlation matrix following a numerically stable\r\n algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)\r\n similarity transformation to construct a symmetric positive semi-definite\r\n matrix, and applies a series of Givens rotations to scale it to have ones\r\n on the diagonal.\r\n\r\n References\r\n ----------\r\n\r\n .. [1] Davies, Philip I; Higham, Nicholas J; \"Numerically stable generation\r\n of correlation matrices and their factors\", BIT 2000, Vol. 40,\r\n No. 4, pp. 640 651\r\n\r\n Examples\r\n --------\r\n >>> from scipy.stats import random_correlation\r\n >>> np.random.seed(514)\r\n >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))\r\n >>> x\r\n array([[ 1. , -0.20387311, 0.18366501, -0.04953711],\r\n [-0.20387311, 1. , -0.24351129, 0.06703474],\r\n [ 0.18366501, -0.24351129, 1. , 0.38530195],\r\n [-0.04953711, 0.06703474, 0.38530195, 1. ]])\r\n\r\n >>> import scipy.linalg\r\n >>> e, v = scipy.linalg.eigh(x)\r\n >>> e\r\n array([ 0.5, 0.8, 1.2, 1.5])\r\n\r\n \"\"\"\r\n\r\n def __init__(self, seed=None):\r\n super(random_correlation_gen, self).__init__(seed)\r\n self.__doc__ = doccer.docformat(self.__doc__)\r\n\r\n def _process_parameters(self, eigs, tol):\r\n eigs = np.asarray(eigs, dtype=float)\r\n dim = eigs.size\r\n\r\n if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:\r\n raise ValueError(\"Array 'eigs' must be a vector of length greater than 1.\")\r\n\r\n if np.fabs(np.sum(eigs) - dim) > tol:\r\n raise ValueError(\"Sum of eigenvalues must equal dimensionality.\")\r\n\r\n for x in eigs:\r\n if x < -tol:\r\n raise ValueError(\"All eigenvalues must be non-negative.\")\r\n\r\n return dim, eigs\r\n\r\n def _givens_to_1(self, aii, ajj, aij):\r\n \"\"\"Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.\r\n\r\n The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].\r\n\r\n The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];\r\n the elements c and s are returned.\r\n\r\n Applying the output matrix to the input matrix (as b=g.T M g)\r\n results in a matrix with bii=1, provided tr(M) - det(M) >= 1\r\n and floating point issues do not occur. Otherwise, some other\r\n valid rotation is returned. When tr(M)==2, also bjj=1.\r\n\r\n \"\"\"\r\n aiid = aii - 1.\r\n ajjd = ajj - 1.\r\n\r\n if ajjd == 0:\r\n # ajj==1, so swap aii and ajj to avoid division by zero\r\n return 0., 1.\r\n\r\n dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))\r\n\r\n # The choice of t should be chosen to avoid cancellation [1]\r\n t = (aij + math.copysign(dd, aij)) / ajjd\r\n c = 1. / math.sqrt(1. + t*t)\r\n if c == 0:\r\n # Underflow\r\n s = 1.0\r\n else:\r\n s = c*t\r\n return c, s\r\n\r\n def _to_corr(self, m):\r\n \"\"\"\r\n Given a psd matrix m, rotate to put one's on the diagonal, turning it\r\n into a correlation matrix. This also requires the trace equal the\r\n dimensionality. Note: modifies input matrix\r\n \"\"\"\r\n # Check requirements for in-place Givens\r\n if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):\r\n raise ValueError()\r\n\r\n d = m.shape[0]\r\n for i in range(d-1):\r\n if m[i,i] == 1:\r\n continue\r\n elif m[i, i] > 1:\r\n for j in range(i+1, d):\r\n if m[j, j] < 1:\r\n break\r\n else:\r\n for j in range(i+1, d):\r\n if m[j, j] > 1:\r\n break\r\n\r\n c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])\r\n\r\n # Use BLAS to apply Givens rotations in-place. Equivalent to:\r\n # g = np.eye(d)\r\n # g[i, i] = g[j,j] = c\r\n # g[j, i] = -s; g[i, j] = s\r\n # m = np.dot(g.T, np.dot(m, g))\r\n mv = m.ravel()\r\n drot(mv, mv, c, -s, n=d,\r\n offx=i*d, incx=1, offy=j*d, incy=1,\r\n overwrite_x=True, overwrite_y=True)\r\n drot(mv, mv, c, -s, n=d,\r\n offx=i, incx=d, offy=j, incy=d,\r\n overwrite_x=True, overwrite_y=True)\r\n\r\n return m\r\n\r\n def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):\r\n \"\"\"\r\n Draw random correlation matrices\r\n\r\n Parameters\r\n ----------\r\n eigs : 1d ndarray\r\n Eigenvalues of correlation matrix\r\n tol : float, optional\r\n Tolerance for input parameter checks\r\n diag_tol : float, optional\r\n Tolerance for deviation of the diagonal of the resulting\r\n matrix. Default: 1e-7\r\n\r\n Raises\r\n ------\r\n RuntimeError\r\n Floating point error prevented generating a valid correlation\r\n matrix.\r\n\r\n Returns\r\n -------\r\n rvs : ndarray or scalar\r\n Random size N-dimensional matrices, dimension (size, dim, dim),\r\n each having eigenvalues eigs.\r\n\r\n \"\"\"\r\n dim, eigs = self._process_parameters(eigs, tol=tol)\r\n\r\n random_state = self._get_random_state(random_state)\r\n\r\n m = ortho_group.rvs(dim, random_state=random_state)\r\n m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m\r\n m = self._to_corr(m) # Carefully rotate to unit diagonal\r\n\r\n # Check diagonal\r\n if abs(m.diagonal() - 1).max() > diag_tol:\r\n raise RuntimeError(\"Failed to generate a valid correlation matrix\")\r\n\r\n return m\r\n\r\nrandom_correlation = random_correlation_gen()\r\n", "\"\"\"\r\nLinear Solvers\r\n==============\r\n\r\nThe default solver is SuperLU (included in the scipy distribution),\r\nwhich can solve real or complex linear systems in both single and\r\ndouble precisions. It is automatically replaced by UMFPACK, if\r\navailable. Note that UMFPACK works in double precision only, so\r\nswitch it off by::\r\n\r\n >>> use_solver(useUmfpack=False)\r\n\r\nto solve in the single precision. See also use_solver documentation.\r\n\r\nExample session::\r\n\r\n >>> from scipy.sparse import csc_matrix, spdiags\r\n >>> from numpy import array\r\n >>> from scipy.sparse.linalg import spsolve, use_solver\r\n >>>\r\n >>> print \"Inverting a sparse linear system:\"\r\n >>> print \"The sparse matrix (constructed from diagonals):\"\r\n >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)\r\n >>> b = array([1, 2, 3, 4, 5])\r\n >>> print \"Solve: single precision complex:\"\r\n >>> use_solver( useUmfpack = False )\r\n >>> a = a.astype('F')\r\n >>> x = spsolve(a, b)\r\n >>> print x\r\n >>> print \"Error: \", a*x-b\r\n >>>\r\n >>> print \"Solve: double precision complex:\"\r\n >>> use_solver( useUmfpack = True )\r\n >>> a = a.astype('D')\r\n >>> x = spsolve(a, b)\r\n >>> print x\r\n >>> print \"Error: \", a*x-b\r\n >>>\r\n >>> print \"Solve: double precision:\"\r\n >>> a = a.astype('d')\r\n >>> x = spsolve(a, b)\r\n >>> print x\r\n >>> print \"Error: \", a*x-b\r\n >>>\r\n >>> print \"Solve: single precision:\"\r\n >>> use_solver( useUmfpack = False )\r\n >>> a = a.astype('f')\r\n >>> x = spsolve(a, b.astype('f'))\r\n >>> print x\r\n >>> print \"Error: \", a*x-b\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n#import umfpack\r\n#__doc__ = '\\n\\n'.join( (__doc__, umfpack.__doc__) )\r\n#del umfpack\r\n\r\nfrom .linsolve import *\r\nfrom ._superlu import SuperLU\r\nfrom . import _add_newdocs\r\n\r\n__all__ = [s for s in dir() if not s.startswith('_')]\r\nfrom numpy.testing import Tester\r\ntest = Tester().test\r\nbench = Tester().bench\r\n", "\"\"\"\r\nUnit tests for the differential global minimization algorithm.\r\n\"\"\"\r\nfrom scipy.optimize import _differentialevolution\r\nfrom scipy.optimize._differentialevolution import DifferentialEvolutionSolver\r\nfrom scipy.optimize import differential_evolution\r\nimport numpy as np\r\nfrom scipy.optimize import rosen\r\nfrom numpy.testing import (assert_equal, TestCase, assert_allclose,\r\n run_module_suite, assert_almost_equal,\r\n assert_string_equal, assert_raises, assert_)\r\n\r\n\r\nclass TestDifferentialEvolutionSolver(TestCase):\r\n\r\n def setUp(self):\r\n self.old_seterr = np.seterr(invalid='raise')\r\n self.limits = np.array([[0., 0.],\r\n [2., 2.]])\r\n self.bounds = [(0., 2.), (0., 2.)]\r\n\r\n self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,\r\n [(0, 100)])\r\n\r\n # dummy_solver2 will be used to test mutation strategies\r\n self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,\r\n [(0, 1)],\r\n popsize=7,\r\n mutation=0.5)\r\n # create a population that's only 7 members long\r\n # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\r\n population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T\r\n self.dummy_solver2.population = population\r\n\r\n def tearDown(self):\r\n np.seterr(**self.old_seterr)\r\n\r\n def quadratic(self, x):\r\n return x[0]**2\r\n\r\n def test__strategy_resolves(self):\r\n # test that the correct mutation function is resolved by\r\n # different requested strategy arguments\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='best1exp')\r\n assert_equal(solver.strategy, 'best1exp')\r\n assert_equal(solver.mutation_func.__name__, '_best1')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='best1bin')\r\n assert_equal(solver.strategy, 'best1bin')\r\n assert_equal(solver.mutation_func.__name__, '_best1')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='rand1bin')\r\n assert_equal(solver.strategy, 'rand1bin')\r\n assert_equal(solver.mutation_func.__name__, '_rand1')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='rand1exp')\r\n assert_equal(solver.strategy, 'rand1exp')\r\n assert_equal(solver.mutation_func.__name__, '_rand1')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='rand2exp')\r\n assert_equal(solver.strategy, 'rand2exp')\r\n assert_equal(solver.mutation_func.__name__, '_rand2')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='best2bin')\r\n assert_equal(solver.strategy, 'best2bin')\r\n assert_equal(solver.mutation_func.__name__, '_best2')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='rand2bin')\r\n assert_equal(solver.strategy, 'rand2bin')\r\n assert_equal(solver.mutation_func.__name__, '_rand2')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='rand2exp')\r\n assert_equal(solver.strategy, 'rand2exp')\r\n assert_equal(solver.mutation_func.__name__, '_rand2')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='randtobest1bin')\r\n assert_equal(solver.strategy, 'randtobest1bin')\r\n assert_equal(solver.mutation_func.__name__, '_randtobest1')\r\n\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='randtobest1exp')\r\n assert_equal(solver.strategy, 'randtobest1exp')\r\n assert_equal(solver.mutation_func.__name__, '_randtobest1')\r\n\r\n def test__mutate1(self):\r\n # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.\r\n result = np.array([0.05])\r\n trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))\r\n assert_allclose(trial, result)\r\n\r\n result = np.array([0.25])\r\n trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))\r\n assert_allclose(trial, result)\r\n\r\n def test__mutate2(self):\r\n # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.\r\n # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\r\n\r\n result = np.array([-0.1])\r\n trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))\r\n assert_allclose(trial, result)\r\n\r\n result = np.array([0.1])\r\n trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))\r\n assert_allclose(trial, result)\r\n\r\n def test__randtobest1(self):\r\n # strategies randtobest/1/*\r\n result = np.array([0.1])\r\n trial = self.dummy_solver2._randtobest1(1, (2, 3, 4, 5, 6))\r\n assert_allclose(trial, result)\r\n\r\n def test_can_init_with_dithering(self):\r\n mutation = (0.5, 1)\r\n solver = DifferentialEvolutionSolver(self.quadratic,\r\n self.bounds,\r\n mutation=mutation)\r\n\r\n self.assertEqual(solver.dither, list(mutation))\r\n\r\n def test_invalid_mutation_values_arent_accepted(self):\r\n func = rosen\r\n mutation = (0.5, 3)\r\n self.assertRaises(ValueError,\r\n DifferentialEvolutionSolver,\r\n func,\r\n self.bounds,\r\n mutation=mutation)\r\n\r\n mutation = (-1, 1)\r\n self.assertRaises(ValueError,\r\n DifferentialEvolutionSolver,\r\n func,\r\n self.bounds,\r\n mutation=mutation)\r\n\r\n mutation = (0.1, np.nan)\r\n self.assertRaises(ValueError,\r\n DifferentialEvolutionSolver,\r\n func,\r\n self.bounds,\r\n mutation=mutation)\r\n\r\n mutation = 0.5\r\n solver = DifferentialEvolutionSolver(func,\r\n self.bounds,\r\n mutation=mutation)\r\n assert_equal(0.5, solver.scale)\r\n assert_equal(None, solver.dither)\r\n\r\n def test__scale_parameters(self):\r\n trial = np.array([0.3])\r\n assert_equal(30, self.dummy_solver._scale_parameters(trial))\r\n\r\n # it should also work with the limits reversed\r\n self.dummy_solver.limits = np.array([[100], [0.]])\r\n assert_equal(30, self.dummy_solver._scale_parameters(trial))\r\n\r\n def test__unscale_parameters(self):\r\n trial = np.array([30])\r\n assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))\r\n\r\n # it should also work with the limits reversed\r\n self.dummy_solver.limits = np.array([[100], [0.]])\r\n assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))\r\n\r\n def test__ensure_constraint(self):\r\n trial = np.array([1.1, -100, 2., 300., -0.00001])\r\n self.dummy_solver._ensure_constraint(trial)\r\n assert_equal(np.all(trial <= 1), True)\r\n\r\n def test_differential_evolution(self):\r\n # test that the Jmin of DifferentialEvolutionSolver\r\n # is the same as the function evaluation\r\n solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])\r\n result = solver.solve()\r\n assert_almost_equal(result.fun, self.quadratic(result.x))\r\n\r\n def test_best_solution_retrieval(self):\r\n # test that the getter property method for the best solution works.\r\n solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])\r\n result = solver.solve()\r\n assert_equal(result.x, solver.x)\r\n\r\n def test_callback_terminates(self):\r\n # test that if the callback returns true, then the minimization halts\r\n bounds = [(0, 2), (0, 2)]\r\n\r\n def callback(param, convergence=0.):\r\n return True\r\n\r\n result = differential_evolution(rosen, bounds, callback=callback)\r\n\r\n assert_string_equal(result.message,\r\n 'callback function requested stop early '\r\n 'by returning True')\r\n\r\n def test_args_tuple_is_passed(self):\r\n # test that the args tuple is passed to the cost function properly.\r\n bounds = [(-10, 10)]\r\n args = (1., 2., 3.)\r\n\r\n def quadratic(x, *args):\r\n if type(args) != tuple:\r\n raise ValueError('args should be a tuple')\r\n return args[0] + args[1] * x + args[2] * x**2.\r\n\r\n result = differential_evolution(quadratic,\r\n bounds,\r\n args=args,\r\n polish=True)\r\n assert_almost_equal(result.fun, 2 / 3.)\r\n\r\n def test_init_with_invalid_strategy(self):\r\n # test that passing an invalid strategy raises ValueError\r\n func = rosen\r\n bounds = [(-3, 3)]\r\n self.assertRaises(ValueError,\r\n differential_evolution,\r\n func,\r\n bounds,\r\n strategy='abc')\r\n\r\n def test_bounds_checking(self):\r\n # test that the bounds checking works\r\n func = rosen\r\n bounds = [(-3, None)]\r\n self.assertRaises(ValueError,\r\n differential_evolution,\r\n func,\r\n bounds)\r\n bounds = [(-3)]\r\n self.assertRaises(ValueError,\r\n differential_evolution,\r\n func,\r\n bounds)\r\n bounds = [(-3, 3), (3, 4, 5)]\r\n self.assertRaises(ValueError,\r\n differential_evolution,\r\n func,\r\n bounds)\r\n\r\n def test_select_samples(self):\r\n # select_samples should return 5 separate random numbers.\r\n limits = np.arange(12., dtype='float64').reshape(2, 6)\r\n bounds = list(zip(limits[0, :], limits[1, :]))\r\n solver = DifferentialEvolutionSolver(None, bounds, popsize=1)\r\n candidate = 0\r\n r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)\r\n assert_equal(\r\n len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)\r\n\r\n def test_maxiter_stops_solve(self):\r\n # test that if the maximum number of iterations is exceeded\r\n # the solver stops.\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)\r\n result = solver.solve()\r\n assert_equal(result.success, False)\r\n assert_equal(result.message,\r\n 'Maximum number of iterations has been exceeded.')\r\n\r\n def test_maxfun_stops_solve(self):\r\n # test that if the maximum number of function evaluations is exceeded\r\n # during initialisation the solver stops\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,\r\n polish=False)\r\n result = solver.solve()\r\n\r\n assert_equal(result.nfev, 2)\r\n assert_equal(result.success, False)\r\n assert_equal(result.message,\r\n 'Maximum number of function evaluations has '\r\n 'been exceeded.')\r\n\r\n # test that if the maximum number of function evaluations is exceeded\r\n # during the actual minimisation, then the solver stops.\r\n # Have to turn polishing off, as this will still occur even if maxfun\r\n # is reached. For popsize=5 and len(bounds)=2, then there are only 10\r\n # function evaluations during initialisation.\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n popsize=5,\r\n polish=False,\r\n maxfun=40)\r\n result = solver.solve()\r\n\r\n assert_equal(result.nfev, 41)\r\n assert_equal(result.success, False)\r\n assert_equal(result.message,\r\n 'Maximum number of function evaluations has '\r\n 'been exceeded.')\r\n\r\n def test_quadratic(self):\r\n # test the quadratic function from object\r\n solver = DifferentialEvolutionSolver(self.quadratic,\r\n [(-100, 100)],\r\n tol=0.02)\r\n solver.solve()\r\n assert_equal(np.argmin(solver.population_energies), 0)\r\n\r\n def test_quadratic_from_diff_ev(self):\r\n # test the quadratic function from differential_evolution function\r\n differential_evolution(self.quadratic,\r\n [(-100, 100)],\r\n tol=0.02)\r\n\r\n def test_seed_gives_repeatability(self):\r\n result = differential_evolution(self.quadratic,\r\n [(-100, 100)],\r\n polish=False,\r\n seed=1,\r\n tol=0.5)\r\n result2 = differential_evolution(self.quadratic,\r\n [(-100, 100)],\r\n polish=False,\r\n seed=1,\r\n tol=0.5)\r\n assert_equal(result.x, result2.x)\r\n\r\n def test_exp_runs(self):\r\n # test whether exponential mutation loop runs\r\n solver = DifferentialEvolutionSolver(rosen,\r\n self.bounds,\r\n strategy='best1exp',\r\n maxiter=1)\r\n\r\n solver.solve()\r\n\r\n def test__make_random_gen(self):\r\n # If seed is None, return the RandomState singleton used by np.random.\r\n # If seed is an int, return a new RandomState instance seeded with seed.\r\n # If seed is already a RandomState instance, return it.\r\n # Otherwise raise ValueError.\r\n rsi = _differentialevolution._make_random_gen(1)\r\n assert_equal(type(rsi), np.random.RandomState)\r\n rsi = _differentialevolution._make_random_gen(rsi)\r\n assert_equal(type(rsi), np.random.RandomState)\r\n rsi = _differentialevolution._make_random_gen(None)\r\n assert_equal(type(rsi), np.random.RandomState)\r\n self.assertRaises(\r\n ValueError, _differentialevolution._make_random_gen, 'a')\r\n\r\n def test_gh_4511_regression(self):\r\n # This modification of the differential evolution docstring example\r\n # uses a custom popsize that had triggered an off-by-one error.\r\n # Because we do not care about solving the optimization problem in\r\n # this test, we use maxiter=1 to reduce the testing time.\r\n bounds = [(-5, 5), (-5, 5)]\r\n result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1)\r\n\r\n def test_calculate_population_energies(self):\r\n # if popsize is 2 then the overall generation has size (4,)\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=2)\r\n solver._calculate_population_energies()\r\n\r\n assert_equal(np.argmin(solver.population_energies), 0)\r\n\r\n # initial calculation of the energies should require 4 nfev.\r\n assert_equal(solver._nfev, 4)\r\n\r\n def test_iteration(self):\r\n # test that DifferentialEvolutionSolver is iterable\r\n # if popsize is 2 then the overall generation has size (4,)\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=2,\r\n maxfun=8)\r\n x, fun = next(solver)\r\n assert_equal(np.size(x, 0), 2)\r\n\r\n # 4 nfev are required for initial calculation of energies, 4 nfev are\r\n # required for the evolution of the 4 population members.\r\n assert_equal(solver._nfev, 8)\r\n\r\n # the next generation should halt because it exceeds maxfun\r\n assert_raises(StopIteration, next, solver)\r\n\r\n # check a proper minimisation can be done by an iterable solver\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds)\r\n for i, soln in enumerate(solver):\r\n x_current, fun_current = soln\r\n # need to have this otherwise the solver would never stop.\r\n if i == 1000:\r\n break\r\n\r\n assert_almost_equal(fun_current, 0)\r\n\r\n def test_convergence(self):\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,\r\n polish=False)\r\n solver.solve()\r\n assert_(solver.convergence < 0.2)\r\n\r\n def test_maxiter_none_GH5731(self):\r\n # Pre 0.17 the previous default for maxiter and maxfun was None.\r\n # the numerical defaults are now 1000 and np.inf. However, some scripts\r\n # will still supply None for both of those, this will raise a TypeError\r\n # in the solve method.\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,\r\n maxfun=None)\r\n solver.solve()\r\n\r\n def test_population_initiation(self):\r\n # test the different modes of population initiation\r\n\r\n # init must be either 'latinhypercube' or 'random'\r\n # raising ValueError is something else is passed in\r\n assert_raises(ValueError,\r\n DifferentialEvolutionSolver,\r\n *(rosen, self.bounds),\r\n **{'init': 'rubbish'})\r\n\r\n solver = DifferentialEvolutionSolver(rosen, self.bounds)\r\n\r\n # check that population initiation:\r\n # 1) resets _nfev to 0\r\n # 2) all population energies are np.inf\r\n solver.init_population_random()\r\n assert_equal(solver._nfev, 0)\r\n assert_(np.all(np.isinf(solver.population_energies)))\r\n\r\n solver.init_population_lhs()\r\n assert_equal(solver._nfev, 0)\r\n assert_(np.all(np.isinf(solver.population_energies)))\r\n\r\n\r\nif __name__ == '__main__':\r\n run_module_suite()\r\n", "\"\"\"\r\nTests for the stats.mstats module (support for masked arrays)\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom numpy import nan\r\nimport numpy.ma as ma\r\nfrom numpy.ma import masked, nomask\r\n\r\nimport scipy.stats.mstats as mstats\r\nfrom scipy import stats\r\nfrom common_tests import check_named_results\r\nfrom numpy.testing import TestCase, run_module_suite\r\nfrom numpy.testing.decorators import skipif\r\nfrom numpy.ma.testutils import (assert_equal, assert_almost_equal,\r\n assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,\r\n assert_allclose, assert_raises, assert_array_equal)\r\n\r\n\r\nclass TestMquantiles(TestCase):\r\n def test_mquantiles_limit_keyword(self):\r\n # Regression test for Trac ticket #867\r\n data = np.array([[6., 7., 1.],\r\n [47., 15., 2.],\r\n [49., 36., 3.],\r\n [15., 39., 4.],\r\n [42., 40., -999.],\r\n [41., 41., -999.],\r\n [7., -999., -999.],\r\n [39., -999., -999.],\r\n [43., -999., -999.],\r\n [40., -999., -999.],\r\n [36., -999., -999.]])\r\n desired = [[19.2, 14.6, 1.45],\r\n [40.0, 37.5, 2.5],\r\n [42.8, 40.05, 3.55]]\r\n quants = mstats.mquantiles(data, axis=0, limit=(0, 50))\r\n assert_almost_equal(quants, desired)\r\n\r\n\r\nclass TestGMean(TestCase):\r\n def test_1D(self):\r\n a = (1,2,3,4)\r\n actual = mstats.gmean(a)\r\n desired = np.power(1*2*3*4,1./4.)\r\n assert_almost_equal(actual, desired, decimal=14)\r\n\r\n desired1 = mstats.gmean(a,axis=-1)\r\n assert_almost_equal(actual, desired1, decimal=14)\r\n assert_(not isinstance(desired1, ma.MaskedArray))\r\n\r\n a = ma.array((1,2,3,4),mask=(0,0,0,1))\r\n actual = mstats.gmean(a)\r\n desired = np.power(1*2*3,1./3.)\r\n assert_almost_equal(actual, desired,decimal=14)\r\n\r\n desired1 = mstats.gmean(a,axis=-1)\r\n assert_almost_equal(actual, desired1, decimal=14)\r\n\r\n @skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')\r\n def test_1D_float96(self):\r\n a = ma.array((1,2,3,4), mask=(0,0,0,1))\r\n actual_dt = mstats.gmean(a, dtype=np.float96)\r\n desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)\r\n assert_almost_equal(actual_dt, desired_dt, decimal=14)\r\n assert_(actual_dt.dtype == desired_dt.dtype)\r\n\r\n def test_2D(self):\r\n a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),\r\n mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))\r\n actual = mstats.gmean(a)\r\n desired = np.array((1,2,3,4))\r\n assert_array_almost_equal(actual, desired, decimal=14)\r\n\r\n desired1 = mstats.gmean(a,axis=0)\r\n assert_array_almost_equal(actual, desired1, decimal=14)\r\n\r\n actual = mstats.gmean(a, -1)\r\n desired = ma.array((np.power(1*2*3*4,1./4.),\r\n np.power(2*3,1./2.),\r\n np.power(1*4,1./2.)))\r\n assert_array_almost_equal(actual, desired, decimal=14)\r\n\r\n\r\nclass TestHMean(TestCase):\r\n def test_1D(self):\r\n a = (1,2,3,4)\r\n actual = mstats.hmean(a)\r\n desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)\r\n assert_almost_equal(actual, desired, decimal=14)\r\n desired1 = mstats.hmean(ma.array(a),axis=-1)\r\n assert_almost_equal(actual, desired1, decimal=14)\r\n\r\n a = ma.array((1,2,3,4),mask=(0,0,0,1))\r\n actual = mstats.hmean(a)\r\n desired = 3. / (1./1 + 1./2 + 1./3)\r\n assert_almost_equal(actual, desired,decimal=14)\r\n desired1 = mstats.hmean(a,axis=-1)\r\n assert_almost_equal(actual, desired1, decimal=14)\r\n\r\n @skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')\r\n def test_1D_float96(self):\r\n a = ma.array((1,2,3,4), mask=(0,0,0,1))\r\n actual_dt = mstats.hmean(a, dtype=np.float96)\r\n desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),\r\n dtype=np.float96)\r\n assert_almost_equal(actual_dt, desired_dt, decimal=14)\r\n assert_(actual_dt.dtype == desired_dt.dtype)\r\n\r\n def test_2D(self):\r\n a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),\r\n mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))\r\n actual = mstats.hmean(a)\r\n desired = ma.array((1,2,3,4))\r\n assert_array_almost_equal(actual, desired, decimal=14)\r\n\r\n actual1 = mstats.hmean(a,axis=-1)\r\n desired = (4./(1/1.+1/2.+1/3.+1/4.),\r\n 2./(1/2.+1/3.),\r\n 2./(1/1.+1/4.)\r\n )\r\n assert_array_almost_equal(actual1, desired, decimal=14)\r\n\r\n\r\nclass TestRanking(TestCase):\r\n def __init__(self, *args, **kwargs):\r\n TestCase.__init__(self, *args, **kwargs)\r\n\r\n def test_ranking(self):\r\n x = ma.array([0,1,1,1,2,3,4,5,5,6,])\r\n assert_almost_equal(mstats.rankdata(x),\r\n [1,3,3,3,5,6,7,8.5,8.5,10])\r\n x[[3,4]] = masked\r\n assert_almost_equal(mstats.rankdata(x),\r\n [1,2.5,2.5,0,0,4,5,6.5,6.5,8])\r\n assert_almost_equal(mstats.rankdata(x, use_missing=True),\r\n [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])\r\n x = ma.array([0,1,5,1,2,4,3,5,1,6,])\r\n assert_almost_equal(mstats.rankdata(x),\r\n [1,3,8.5,3,5,7,6,8.5,3,10])\r\n x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])\r\n assert_almost_equal(mstats.rankdata(x),\r\n [[1,3,3,3,5], [6,7,8.5,8.5,10]])\r\n assert_almost_equal(mstats.rankdata(x, axis=1),\r\n [[1,3,3,3,5], [1,2,3.5,3.5,5]])\r\n assert_almost_equal(mstats.rankdata(x,axis=0),\r\n [[1,1,1,1,1], [2,2,2,2,2,]])\r\n\r\n\r\nclass TestCorr(TestCase):\r\n def test_pearsonr(self):\r\n # Tests some computations of Pearson's r\r\n x = ma.arange(10)\r\n with warnings.catch_warnings():\r\n # The tests in this context are edge cases, with perfect\r\n # correlation or anticorrelation, or totally masked data.\r\n # None of these should trigger a RuntimeWarning.\r\n warnings.simplefilter(\"error\", RuntimeWarning)\r\n\r\n assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)\r\n assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)\r\n\r\n x = ma.array(x, mask=True)\r\n pr = mstats.pearsonr(x, x)\r\n assert_(pr[0] is masked)\r\n assert_(pr[1] is masked)\r\n\r\n x1 = ma.array([-1.0, 0.0, 1.0])\r\n y1 = ma.array([0, 0, 3])\r\n r, p = mstats.pearsonr(x1, y1)\r\n assert_almost_equal(r, np.sqrt(3)/2)\r\n assert_almost_equal(p, 1.0/3)\r\n\r\n # (x2, y2) have the same unmasked data as (x1, y1).\r\n mask = [False, False, False, True]\r\n x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)\r\n y2 = ma.array([0, 0, 3, -1], mask=mask)\r\n r, p = mstats.pearsonr(x2, y2)\r\n assert_almost_equal(r, np.sqrt(3)/2)\r\n assert_almost_equal(p, 1.0/3)\r\n\r\n def test_spearmanr(self):\r\n # Tests some computations of Spearman's rho\r\n (x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])\r\n assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)\r\n (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])\r\n (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))\r\n assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)\r\n\r\n x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\r\n 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]\r\n y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\r\n 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]\r\n assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)\r\n x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\r\n 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]\r\n y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\r\n 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]\r\n (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))\r\n assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)\r\n\r\n # test for namedtuple attributes\r\n res = mstats.spearmanr(x, y)\r\n attributes = ('correlation', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_kendalltau(self):\r\n # Tests some computations of Kendall's tau\r\n x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])\r\n y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])\r\n z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])\r\n assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),\r\n [+0.3333333,0.4969059])\r\n assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),\r\n [-0.5477226,0.2785987])\r\n #\r\n x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,\r\n 10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])\r\n y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,\r\n 25,80,80,80,80,80,80, 0,10,45, np.nan, 0])\r\n result = mstats.kendalltau(x,y)\r\n assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])\r\n\r\n # test for namedtuple attributes\r\n res = mstats.kendalltau(x, y)\r\n attributes = ('correlation', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_kendalltau_seasonal(self):\r\n # Tests the seasonal Kendall tau.\r\n x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\r\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\r\n [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],\r\n [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]\r\n x = ma.fix_invalid(x).T\r\n output = mstats.kendalltau_seasonal(x)\r\n assert_almost_equal(output['global p-value (indep)'], 0.008, 3)\r\n assert_almost_equal(output['seasonal p-value'].round(2),\r\n [0.18,0.53,0.20,0.04])\r\n\r\n def test_pointbiserial(self):\r\n x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,\r\n 0,0,0,0,1,-1]\r\n y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,\r\n 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,\r\n 0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]\r\n assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)\r\n\r\n # test for namedtuple attributes\r\n res = mstats.pointbiserialr(x, y)\r\n attributes = ('correlation', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\nclass TestTrimming(TestCase):\r\n\r\n def test_trim(self):\r\n a = ma.arange(10)\r\n assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])\r\n a = ma.arange(10)\r\n assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])\r\n a = ma.arange(10)\r\n assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),\r\n [None,None,None,3,4,5,6,7,None,None])\r\n a = ma.arange(10)\r\n assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),\r\n [None,1,2,3,4,5,6,7,None,None])\r\n\r\n a = ma.arange(12)\r\n a[[0,-1]] = a[5] = masked\r\n assert_equal(mstats.trim(a, (2,8)),\r\n [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])\r\n\r\n x = ma.arange(100).reshape(10, 10)\r\n expected = [1]*10 + [0]*70 + [1]*20\r\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)\r\n assert_equal(trimx._mask.ravel(), expected)\r\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)\r\n assert_equal(trimx._mask.ravel(), expected)\r\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)\r\n assert_equal(trimx._mask.T.ravel(), expected)\r\n\r\n # same as above, but with an extra masked row inserted\r\n x = ma.arange(110).reshape(11, 10)\r\n x[1] = masked\r\n expected = [1]*20 + [0]*70 + [1]*20\r\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)\r\n assert_equal(trimx._mask.ravel(), expected)\r\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)\r\n assert_equal(trimx._mask.ravel(), expected)\r\n trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)\r\n assert_equal(trimx.T._mask.ravel(), expected)\r\n\r\n def test_trim_old(self):\r\n x = ma.arange(100)\r\n assert_equal(mstats.trimboth(x).count(), 60)\r\n assert_equal(mstats.trimtail(x,tail='r').count(), 80)\r\n x[50:70] = masked\r\n trimx = mstats.trimboth(x)\r\n assert_equal(trimx.count(), 48)\r\n assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)\r\n x._mask = nomask\r\n x.shape = (10,10)\r\n assert_equal(mstats.trimboth(x).count(), 60)\r\n assert_equal(mstats.trimtail(x).count(), 80)\r\n\r\n def test_trimmedmean(self):\r\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\r\n 296,299,306,376,428,515,666,1310,2611])\r\n assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)\r\n assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)\r\n assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)\r\n\r\n def test_trimmed_stde(self):\r\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\r\n 296,299,306,376,428,515,666,1310,2611])\r\n assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)\r\n assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)\r\n\r\n def test_winsorization(self):\r\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\r\n 296,299,306,376,428,515,666,1310,2611])\r\n assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),\r\n 21551.4, 1)\r\n data[5] = masked\r\n winsorized = mstats.winsorize(data)\r\n assert_equal(winsorized.mask, data.mask)\r\n\r\n\r\nclass TestMoments(TestCase):\r\n # Comparison numbers are found using R v.1.5.1\r\n # note that length(testcase) = 4\r\n # testmathworks comes from documentation for the\r\n # Statistics Toolbox for Matlab and can be found at both\r\n # http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml\r\n # http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml\r\n # Note that both test cases came from here.\r\n testcase = [1,2,3,4]\r\n testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,\r\n np.nan])\r\n testcase_2d = ma.array(\r\n np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],\r\n [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],\r\n [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],\r\n [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],\r\n [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),\r\n mask=np.array([[True, False, False, True, False],\r\n [True, True, True, False, True],\r\n [False, False, False, False, False],\r\n [True, True, True, True, True],\r\n [False, False, True, False, False]], dtype=bool))\r\n\r\n def test_moment(self):\r\n y = mstats.moment(self.testcase,1)\r\n assert_almost_equal(y,0.0,10)\r\n y = mstats.moment(self.testcase,2)\r\n assert_almost_equal(y,1.25)\r\n y = mstats.moment(self.testcase,3)\r\n assert_almost_equal(y,0.0)\r\n y = mstats.moment(self.testcase,4)\r\n assert_almost_equal(y,2.5625)\r\n\r\n def test_variation(self):\r\n y = mstats.variation(self.testcase)\r\n assert_almost_equal(y,0.44721359549996, 10)\r\n\r\n def test_skewness(self):\r\n y = mstats.skew(self.testmathworks)\r\n assert_almost_equal(y,-0.29322304336607,10)\r\n y = mstats.skew(self.testmathworks,bias=0)\r\n assert_almost_equal(y,-0.437111105023940,10)\r\n y = mstats.skew(self.testcase)\r\n assert_almost_equal(y,0.0,10)\r\n\r\n def test_kurtosis(self):\r\n # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis\r\n # for compatibility with Matlab)\r\n y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)\r\n assert_almost_equal(y, 2.1658856802973,10)\r\n # Note that MATLAB has confusing docs for the following case\r\n # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness\r\n # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)\r\n # The MATLAB docs imply that both should give Fisher's\r\n y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)\r\n assert_almost_equal(y, 3.663542721189047,10)\r\n y = mstats.kurtosis(self.testcase,0,0)\r\n assert_almost_equal(y,1.64)\r\n\r\n # test that kurtosis works on multidimensional masked arrays\r\n correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,\r\n -1.26979517952]),\r\n mask=np.array([False, False, False, True,\r\n False], dtype=bool))\r\n assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),\r\n correct_2d)\r\n for i, row in enumerate(self.testcase_2d):\r\n assert_almost_equal(mstats.kurtosis(row), correct_2d[i])\r\n\r\n correct_2d_bias_corrected = ma.array(\r\n np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),\r\n mask=np.array([False, False, False, True, False], dtype=bool))\r\n assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,\r\n bias=False),\r\n correct_2d_bias_corrected)\r\n for i, row in enumerate(self.testcase_2d):\r\n assert_almost_equal(mstats.kurtosis(row, bias=False),\r\n correct_2d_bias_corrected[i])\r\n\r\n # Check consistency between stats and mstats implementations\r\n assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),\r\n stats.kurtosis(self.testcase_2d[2, :]),\r\n nulp=4)\r\n\r\n def test_mode(self):\r\n a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]\r\n a2 = np.reshape(a1, (3,5))\r\n a3 = np.array([1,2,3,4,5,6])\r\n a4 = np.reshape(a3, (3,2))\r\n ma1 = ma.masked_where(ma.array(a1) > 2, a1)\r\n ma2 = ma.masked_where(a2 > 2, a2)\r\n ma3 = ma.masked_where(a3 < 2, a3)\r\n ma4 = ma.masked_where(ma.array(a4) < 2, a4)\r\n assert_equal(mstats.mode(a1, axis=None), (3,4))\r\n assert_equal(mstats.mode(a1, axis=0), (3,4))\r\n assert_equal(mstats.mode(ma1, axis=None), (0,3))\r\n assert_equal(mstats.mode(a2, axis=None), (3,4))\r\n assert_equal(mstats.mode(ma2, axis=None), (0,3))\r\n assert_equal(mstats.mode(a3, axis=None), (1,1))\r\n assert_equal(mstats.mode(ma3, axis=None), (2,1))\r\n assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))\r\n assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))\r\n assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))\r\n assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))\r\n assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))\r\n assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))\r\n\r\n a1_res = mstats.mode(a1, axis=None)\r\n\r\n # test for namedtuple attributes\r\n attributes = ('mode', 'count')\r\n check_named_results(a1_res, attributes, ma=True)\r\n\r\nclass TestPercentile(TestCase):\r\n def setUp(self):\r\n self.a1 = [3,4,5,10,-3,-5,6]\r\n self.a2 = [3,-6,-2,8,7,4,2,1]\r\n self.a3 = [3.,4,5,10,-3,-5,-6,7.0]\r\n\r\n def test_percentile(self):\r\n x = np.arange(8) * 0.5\r\n assert_equal(mstats.scoreatpercentile(x, 0), 0.)\r\n assert_equal(mstats.scoreatpercentile(x, 100), 3.5)\r\n assert_equal(mstats.scoreatpercentile(x, 50), 1.75)\r\n\r\n def test_2D(self):\r\n x = ma.array([[1, 1, 1],\r\n [1, 1, 1],\r\n [4, 4, 3],\r\n [1, 1, 1],\r\n [1, 1, 1]])\r\n assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])\r\n\r\n\r\nclass TestVariability(TestCase):\r\n \"\"\" Comparison numbers are found using R v.1.5.1\r\n note that length(testcase) = 4\r\n \"\"\"\r\n testcase = ma.fix_invalid([1,2,3,4,np.nan])\r\n\r\n def test_signaltonoise(self):\r\n # This is not in R, so used:\r\n # mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n y = mstats.signaltonoise(self.testcase)\r\n assert_almost_equal(y, 2.236067977)\r\n\r\n def test_sem(self):\r\n # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)\r\n y = mstats.sem(self.testcase)\r\n assert_almost_equal(y, 0.6454972244)\r\n n = self.testcase.count()\r\n assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),\r\n mstats.sem(self.testcase, ddof=2))\r\n\r\n def test_zmap(self):\r\n # This is not in R, so tested by using:\r\n # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)\r\n y = mstats.zmap(self.testcase, self.testcase)\r\n desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,\r\n 0.44721359549996, 1.3416407864999])\r\n assert_array_almost_equal(desired_unmaskedvals,\r\n y.data[y.mask == False], decimal=12)\r\n\r\n def test_zscore(self):\r\n # This is not in R, so tested by using:\r\n # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)\r\n y = mstats.zscore(self.testcase)\r\n desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,\r\n 0.44721359549996, 1.3416407864999, np.nan])\r\n assert_almost_equal(desired, y, decimal=12)\r\n\r\n\r\nclass TestMisc(TestCase):\r\n\r\n def test_obrientransform(self):\r\n args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,\r\n [6]+[7]*2+[8]*4+[9]*9+[10]*16]\r\n result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],\r\n [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]\r\n assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),\r\n result,4)\r\n\r\n def test_kstwosamp(self):\r\n x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\r\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\r\n [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],\r\n [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]\r\n x = ma.fix_invalid(x).T\r\n (winter,spring,summer,fall) = x.T\r\n\r\n assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),\r\n (0.1818,0.9892))\r\n assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),\r\n (0.1469,0.7734))\r\n assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),\r\n (0.1818,0.6744))\r\n\r\n def test_friedmanchisq(self):\r\n # No missing values\r\n args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],\r\n [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],\r\n [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])\r\n result = mstats.friedmanchisquare(*args)\r\n assert_almost_equal(result[0], 10.4737, 4)\r\n assert_almost_equal(result[1], 0.005317, 6)\r\n # Missing values\r\n x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\r\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\r\n [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],\r\n [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]\r\n x = ma.fix_invalid(x)\r\n result = mstats.friedmanchisquare(*x)\r\n assert_almost_equal(result[0], 2.0156, 4)\r\n assert_almost_equal(result[1], 0.5692, 4)\r\n\r\n # test for namedtuple attributes\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(result, attributes, ma=True)\r\n\r\n\r\ndef test_regress_simple():\r\n # Regress a line with sinusoidal noise. Test for #1273.\r\n x = np.linspace(0, 100, 100)\r\n y = 0.2 * np.linspace(0, 100, 100) + 10\r\n y += np.sin(np.linspace(0, 20, 100))\r\n\r\n slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)\r\n assert_almost_equal(slope, 0.19644990055858422)\r\n assert_almost_equal(intercept, 10.211269918932341)\r\n\r\n # test for namedtuple attributes\r\n res = mstats.linregress(x, y)\r\n attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\ndef test_theilslopes():\r\n # Test for basic slope and intercept.\r\n slope, intercept, lower, upper = mstats.theilslopes([0,1,1])\r\n assert_almost_equal(slope, 0.5)\r\n assert_almost_equal(intercept, 0.5)\r\n\r\n # Test for correct masking.\r\n y = np.ma.array([0,1,100,1], mask=[False, False, True, False])\r\n slope, intercept, lower, upper = mstats.theilslopes(y)\r\n assert_almost_equal(slope, 1./3)\r\n assert_almost_equal(intercept, 2./3)\r\n\r\n # Test of confidence intervals from example in Sen (1968).\r\n x = [1, 2, 3, 4, 10, 12, 18]\r\n y = [9, 15, 19, 20, 45, 55, 78]\r\n slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)\r\n assert_almost_equal(slope, 4)\r\n assert_almost_equal(upper, 4.38, decimal=2)\r\n assert_almost_equal(lower, 3.71, decimal=2)\r\n\r\n\r\ndef test_plotting_positions():\r\n # Regression test for #1256\r\n pos = mstats.plotting_positions(np.arange(3), 0, 0)\r\n assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))\r\n\r\n\r\nclass TestNormalitytests():\r\n\r\n def test_vs_nonmasked(self):\r\n x = np.array((-2,-1,0,1,2,3)*4)**2\r\n assert_array_almost_equal(mstats.normaltest(x),\r\n stats.normaltest(x))\r\n assert_array_almost_equal(mstats.skewtest(x),\r\n stats.skewtest(x))\r\n assert_array_almost_equal(mstats.kurtosistest(x),\r\n stats.kurtosistest(x))\r\n\r\n funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]\r\n mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]\r\n x = [1, 2, 3, 4]\r\n for func, mfunc in zip(funcs, mfuncs):\r\n assert_raises(ValueError, func, x)\r\n assert_raises(ValueError, mfunc, x)\r\n\r\n def test_axis_None(self):\r\n # Test axis=None (equal to axis=0 for 1-D input)\r\n x = np.array((-2,-1,0,1,2,3)*4)**2\r\n assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))\r\n assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))\r\n assert_allclose(mstats.kurtosistest(x, axis=None),\r\n mstats.kurtosistest(x))\r\n\r\n def test_maskedarray_input(self):\r\n # Add some masked values, test result doesn't change\r\n x = np.array((-2,-1,0,1,2,3)*4)**2\r\n xm = np.ma.array(np.r_[np.inf, x, 10],\r\n mask=np.r_[True, [False] * x.size, True])\r\n assert_allclose(mstats.normaltest(xm), stats.normaltest(x))\r\n assert_allclose(mstats.skewtest(xm), stats.skewtest(x))\r\n assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))\r\n\r\n def test_nd_input(self):\r\n x = np.array((-2,-1,0,1,2,3)*4)**2\r\n x_2d = np.vstack([x] * 2).T\r\n for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:\r\n res_1d = func(x)\r\n res_2d = func(x_2d)\r\n assert_allclose(res_2d[0], [res_1d[0]] * 2)\r\n assert_allclose(res_2d[1], [res_1d[1]] * 2)\r\n\r\n def test_normaltest_result_attributes(self):\r\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\r\n res = mstats.normaltest(x)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_kurtosistest_result_attributes(self):\r\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\r\n res = mstats.kurtosistest(x)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\nclass TestFOneway():\r\n def test_result_attributes(self):\r\n a = np.array([655, 788], dtype=np.uint16)\r\n b = np.array([789, 772], dtype=np.uint16)\r\n res = mstats.f_oneway(a, b)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\nclass TestMannwhitneyu():\r\n def test_result_attributes(self):\r\n x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1.])\r\n\r\n y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,\r\n 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,\r\n 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,\r\n 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,\r\n 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\r\n 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,\r\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,\r\n 1., 1., 1., 1.])\r\n\r\n res = mstats.mannwhitneyu(x, y)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\nclass TestKruskal():\r\n def test_result_attributes(self):\r\n x = [1, 3, 5, 7, 9]\r\n y = [2, 4, 6, 8, 10]\r\n\r\n res = mstats.kruskal(x, y)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n\r\n#TODO: for all ttest functions, add tests with masked array inputs\r\nclass TestTtest_rel():\r\n def test_vs_nonmasked(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n # 1-D inputs\r\n res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])\r\n res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])\r\n assert_allclose(res1, res2)\r\n\r\n # 2-D inputs\r\n res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)\r\n res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)\r\n assert_allclose(res1, res2)\r\n res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)\r\n res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)\r\n assert_allclose(res1, res2)\r\n\r\n # Check default is axis=0\r\n res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])\r\n assert_allclose(res2, res3)\r\n\r\n def test_fully_masked(self):\r\n np.random.seed(1234567)\r\n outcome = ma.masked_array(np.random.randn(3, 2),\r\n mask=[[1, 1, 1], [0, 0, 0]])\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_array_equal(mstats.ttest_rel(outcome[:, 0], outcome[:, 1]),\r\n (np.nan, np.nan))\r\n assert_array_equal(mstats.ttest_rel([np.nan, np.nan], [1.0, 2.0]),\r\n (np.nan, np.nan))\r\n\r\n def test_result_attributes(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_invalid_input_size(self):\r\n assert_raises(ValueError, mstats.ttest_rel,\r\n np.arange(10), np.arange(11))\r\n x = np.arange(24)\r\n assert_raises(ValueError, mstats.ttest_rel,\r\n x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)\r\n assert_raises(ValueError, mstats.ttest_rel,\r\n x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)\r\n\r\n def test_empty(self):\r\n res1 = mstats.ttest_rel([], [])\r\n assert_(np.all(np.isnan(res1)))\r\n\r\n def test_zero_division(self):\r\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_equal((np.abs(t), p), (np.inf, 0))\r\n assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),\r\n (np.nan, np.nan))\r\n\r\nclass TestTtest_ind():\r\n def test_vs_nonmasked(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n # 1-D inputs\r\n res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])\r\n res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])\r\n assert_allclose(res1, res2)\r\n\r\n # 2-D inputs\r\n res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)\r\n res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)\r\n assert_allclose(res1, res2)\r\n res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)\r\n res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)\r\n assert_allclose(res1, res2)\r\n\r\n # Check default is axis=0\r\n res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])\r\n assert_allclose(res2, res3)\r\n\r\n # Check equal_var\r\n res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)\r\n res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)\r\n assert_allclose(res4, res5)\r\n res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)\r\n res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)\r\n assert_allclose(res4, res5)\r\n\r\n def test_fully_masked(self):\r\n np.random.seed(1234567)\r\n outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_array_equal(mstats.ttest_ind(outcome[:, 0], outcome[:, 1]),\r\n (np.nan, np.nan))\r\n assert_array_equal(mstats.ttest_ind([np.nan, np.nan], [1.0, 2.0]),\r\n (np.nan, np.nan))\r\n\r\n def test_result_attributes(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_empty(self):\r\n res1 = mstats.ttest_ind([], [])\r\n assert_(np.all(np.isnan(res1)))\r\n\r\n def test_zero_division(self):\r\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_equal((np.abs(t), p), (np.inf, 0))\r\n assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),\r\n (np.nan, np.nan))\r\n\r\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_equal((np.abs(t), p), (np.inf, 0))\r\n assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],\r\n equal_var=False),\r\n (np.nan, np.nan))\r\n\r\n\r\nclass TestTtest_1samp():\r\n def test_vs_nonmasked(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n # 1-D inputs\r\n res1 = stats.ttest_1samp(outcome[:, 0], 1)\r\n res2 = mstats.ttest_1samp(outcome[:, 0], 1)\r\n assert_allclose(res1, res2)\r\n\r\n # 2-D inputs\r\n res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)\r\n res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)\r\n assert_allclose(res1, res2)\r\n res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)\r\n res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)\r\n assert_allclose(res1, res2)\r\n\r\n # Check default is axis=0\r\n res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])\r\n assert_allclose(res2, res3)\r\n\r\n def test_fully_masked(self):\r\n np.random.seed(1234567)\r\n outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_array_equal(mstats.ttest_1samp(outcome, 0.0),\r\n (np.nan, np.nan))\r\n assert_array_equal(mstats.ttest_1samp((np.nan, np.nan), 0.0),\r\n (np.nan, np.nan))\r\n\r\n def test_result_attributes(self):\r\n np.random.seed(1234567)\r\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\r\n\r\n res = mstats.ttest_1samp(outcome[:, 0], 1)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_empty(self):\r\n res1 = mstats.ttest_1samp([], 1)\r\n assert_(np.all(np.isnan(res1)))\r\n\r\n def test_zero_division(self):\r\n t, p = mstats.ttest_1samp([0, 0, 0], 1)\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore')\r\n assert_equal((np.abs(t), p), (np.inf, 0))\r\n assert_array_equal(mstats.ttest_1samp([0, 0, 0], 0),\r\n (np.nan, np.nan))\r\n\r\n\r\nclass TestCompareWithStats(TestCase):\r\n \"\"\"\r\n Class to compare mstats results with stats results.\r\n\r\n It is in general assumed that scipy.stats is at a more mature stage than\r\n stats.mstats. If a routine in mstats results in similar results like in\r\n scipy.stats, this is considered also as a proper validation of scipy.mstats\r\n routine.\r\n\r\n Different sample sizes are used for testing, as some problems between stats\r\n and mstats are dependent on sample size.\r\n\r\n Author: Alexander Loew\r\n\r\n NOTE that some tests fail. This might be caused by\r\n a) actual differences or bugs between stats and mstats\r\n b) numerical inaccuracies\r\n c) different definitions of routine interfaces\r\n\r\n These failures need to be checked. Current workaround is to have disabled these tests,\r\n but issuing reports on scipy-dev\r\n\r\n \"\"\"\r\n def get_n(self):\r\n \"\"\" Returns list of sample sizes to be used for comparison. \"\"\"\r\n return [1000, 100, 10, 5]\r\n\r\n def generate_xy_sample(self, n):\r\n # This routine generates numpy arrays and corresponding masked arrays\r\n # with the same data, but additional masked values\r\n np.random.seed(1234567)\r\n x = np.random.randn(n)\r\n y = x + np.random.randn(n)\r\n xm = np.ones(len(x) + 5) * 1e16\r\n ym = np.ones(len(y) + 5) * 1e16\r\n xm[0:len(x)] = x\r\n ym[0:len(y)] = y\r\n mask = xm > 9e15\r\n xm = np.ma.array(xm, mask=mask)\r\n ym = np.ma.array(ym, mask=mask)\r\n return x, y, xm, ym\r\n\r\n def generate_xy_sample2D(self, n, nx):\r\n x = np.ones((n, nx)) * np.nan\r\n y = np.ones((n, nx)) * np.nan\r\n xm = np.ones((n+5, nx)) * np.nan\r\n ym = np.ones((n+5, nx)) * np.nan\r\n\r\n for i in range(nx):\r\n x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)\r\n\r\n xm[0:n, :] = x[0:n]\r\n ym[0:n, :] = y[0:n]\r\n xm = np.ma.array(xm, mask=np.isnan(xm))\r\n ym = np.ma.array(ym, mask=np.isnan(ym))\r\n return x, y, xm, ym\r\n\r\n def test_linregress(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n res1 = stats.linregress(x, y)\r\n res2 = stats.mstats.linregress(xm, ym)\r\n assert_allclose(np.asarray(res1), np.asarray(res2))\r\n\r\n def test_pearsonr(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r, p = stats.pearsonr(x, y)\r\n rm, pm = stats.mstats.pearsonr(xm, ym)\r\n\r\n assert_almost_equal(r, rm, decimal=14)\r\n assert_almost_equal(p, pm, decimal=14)\r\n\r\n def test_spearmanr(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r, p = stats.spearmanr(x, y)\r\n rm, pm = stats.mstats.spearmanr(xm, ym)\r\n assert_almost_equal(r, rm, 14)\r\n assert_almost_equal(p, pm, 14)\r\n\r\n def test_gmean(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.gmean(abs(x))\r\n rm = stats.mstats.gmean(abs(xm))\r\n assert_allclose(r, rm, rtol=1e-13)\r\n\r\n r = stats.gmean(abs(y))\r\n rm = stats.mstats.gmean(abs(ym))\r\n assert_allclose(r, rm, rtol=1e-13)\r\n\r\n def test_hmean(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n\r\n r = stats.hmean(abs(x))\r\n rm = stats.mstats.hmean(abs(xm))\r\n assert_almost_equal(r, rm, 10)\r\n\r\n r = stats.hmean(abs(y))\r\n rm = stats.mstats.hmean(abs(ym))\r\n assert_almost_equal(r, rm, 10)\r\n\r\n def test_skew(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n\r\n r = stats.skew(x)\r\n rm = stats.mstats.skew(xm)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n r = stats.skew(y)\r\n rm = stats.mstats.skew(ym)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n def test_moment(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n\r\n r = stats.moment(x)\r\n rm = stats.mstats.moment(xm)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n r = stats.moment(y)\r\n rm = stats.mstats.moment(ym)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n def test_signaltonoise(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n\r\n r = stats.signaltonoise(x)\r\n rm = stats.mstats.signaltonoise(xm)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n r = stats.signaltonoise(y)\r\n rm = stats.mstats.signaltonoise(ym)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n def test_betai(self):\r\n np.random.seed(12345)\r\n for i in range(10):\r\n a = np.random.rand() * 5.\r\n b = np.random.rand() * 200.\r\n\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore', category=DeprecationWarning)\r\n assert_equal(stats.betai(a, b, 0.), 0.)\r\n assert_equal(stats.betai(a, b, 1.), 1.)\r\n assert_equal(stats.mstats.betai(a, b, 0.), 0.)\r\n assert_equal(stats.mstats.betai(a, b, 1.), 1.)\r\n x = np.random.rand()\r\n assert_almost_equal(stats.betai(a, b, x),\r\n stats.mstats.betai(a, b, x), decimal=13)\r\n\r\n def test_zscore(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n\r\n #reference solution\r\n zx = (x - x.mean()) / x.std()\r\n zy = (y - y.mean()) / y.std()\r\n\r\n #validate stats\r\n assert_allclose(stats.zscore(x), zx, rtol=1e-10)\r\n assert_allclose(stats.zscore(y), zy, rtol=1e-10)\r\n\r\n #compare stats and mstats\r\n assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),\r\n rtol=1e-10)\r\n assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),\r\n rtol=1e-10)\r\n\r\n def test_kurtosis(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.kurtosis(x)\r\n rm = stats.mstats.kurtosis(xm)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n r = stats.kurtosis(y)\r\n rm = stats.mstats.kurtosis(ym)\r\n assert_almost_equal(r, rm, 10)\r\n\r\n def test_sem(self):\r\n # example from stats.sem doc\r\n a = np.arange(20).reshape(5,4)\r\n am = np.ma.array(a)\r\n r = stats.sem(a,ddof=1)\r\n rm = stats.mstats.sem(am, ddof=1)\r\n\r\n assert_allclose(r, 2.82842712, atol=1e-5)\r\n assert_allclose(rm, 2.82842712, atol=1e-5)\r\n\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),\r\n stats.sem(x, axis=None, ddof=0), decimal=13)\r\n assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),\r\n stats.sem(y, axis=None, ddof=0), decimal=13)\r\n assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),\r\n stats.sem(x, axis=None, ddof=1), decimal=13)\r\n assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),\r\n stats.sem(y, axis=None, ddof=1), decimal=13)\r\n\r\n def test_describe(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.describe(x, ddof=1)\r\n rm = stats.mstats.describe(xm, ddof=1)\r\n for ii in range(6):\r\n assert_almost_equal(np.asarray(r[ii]),\r\n np.asarray(rm[ii]),\r\n decimal=12)\r\n\r\n def test_describe_result_attributes(self):\r\n actual = mstats.describe(np.arange(5))\r\n attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',\r\n 'kurtosis')\r\n check_named_results(actual, attributes, ma=True)\r\n\r\n def test_rankdata(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.rankdata(x)\r\n rm = stats.mstats.rankdata(x)\r\n assert_allclose(r, rm)\r\n\r\n def test_tmean(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)\r\n assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)\r\n\r\n def test_tmax(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.tmax(x,2.),\r\n stats.mstats.tmax(xm,2.), 10)\r\n assert_almost_equal(stats.tmax(y,2.),\r\n stats.mstats.tmax(ym,2.), 10)\r\n\r\n assert_almost_equal(stats.tmax(x, upperlimit=3.),\r\n stats.mstats.tmax(xm, upperlimit=3.), 10)\r\n assert_almost_equal(stats.tmax(y, upperlimit=3.),\r\n stats.mstats.tmax(ym, upperlimit=3.), 10)\r\n\r\n def test_tmin(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_equal(stats.tmin(x),stats.mstats.tmin(xm))\r\n assert_equal(stats.tmin(y),stats.mstats.tmin(ym))\r\n\r\n assert_almost_equal(stats.tmin(x,lowerlimit=-1.),\r\n stats.mstats.tmin(xm,lowerlimit=-1.), 10)\r\n assert_almost_equal(stats.tmin(y,lowerlimit=-1.),\r\n stats.mstats.tmin(ym,lowerlimit=-1.), 10)\r\n\r\n def test_zmap(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n z = stats.zmap(x,y)\r\n zm = stats.mstats.zmap(xm,ym)\r\n assert_allclose(z, zm[0:len(z)], atol=1e-10)\r\n\r\n def test_variation(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),\r\n decimal=12)\r\n assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),\r\n decimal=12)\r\n\r\n def test_tvar(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),\r\n decimal=12)\r\n assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),\r\n decimal=12)\r\n\r\n def test_trimboth(self):\r\n a = np.arange(20)\r\n b = stats.trimboth(a, 0.1)\r\n bm = stats.mstats.trimboth(a, 0.1)\r\n assert_allclose(np.sort(b), bm.data[~bm.mask])\r\n\r\n def test_tsem(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)\r\n assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)\r\n assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),\r\n stats.mstats.tsem(xm,limits=(-2.,2.)),\r\n decimal=14)\r\n\r\n def test_skewtest(self):\r\n # this test is for 1D data\r\n for n in self.get_n():\r\n if n > 8:\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.skewtest(x)\r\n rm = stats.mstats.skewtest(xm)\r\n assert_allclose(r[0], rm[0], rtol=1e-15)\r\n # TODO this test is not performed as it is a known issue that\r\n # mstats returns a slightly different p-value what is a bit\r\n # strange is that other tests like test_maskedarray_input don't\r\n # fail!\r\n #~ assert_almost_equal(r[1], rm[1])\r\n\r\n def test_skewtest_result_attributes(self):\r\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\r\n res = mstats.skewtest(x)\r\n attributes = ('statistic', 'pvalue')\r\n check_named_results(res, attributes, ma=True)\r\n\r\n def test_skewtest_2D_notmasked(self):\r\n # a normal ndarray is passed to the masked function\r\n x = np.random.random((20, 2)) * 20.\r\n r = stats.skewtest(x)\r\n rm = stats.mstats.skewtest(x)\r\n assert_allclose(np.asarray(r), np.asarray(rm))\r\n\r\n def test_skewtest_2D_WithMask(self):\r\n nx = 2\r\n for n in self.get_n():\r\n if n > 8:\r\n x, y, xm, ym = self.generate_xy_sample2D(n, nx)\r\n r = stats.skewtest(x)\r\n rm = stats.mstats.skewtest(xm)\r\n\r\n assert_equal(r[0][0],rm[0][0])\r\n assert_equal(r[0][1],rm[0][1])\r\n\r\n def test_normaltest(self):\r\n np.seterr(over='raise')\r\n for n in self.get_n():\r\n if n > 8:\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings('ignore', category=UserWarning)\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.normaltest(x)\r\n rm = stats.mstats.normaltest(xm)\r\n assert_allclose(np.asarray(r), np.asarray(rm))\r\n\r\n def test_find_repeats(self):\r\n x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')\r\n tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')\r\n mask = (tmp == 5.)\r\n xm = np.ma.array(tmp, mask=mask)\r\n x_orig, xm_orig = x.copy(), xm.copy()\r\n\r\n r = stats.find_repeats(x)\r\n rm = stats.mstats.find_repeats(xm)\r\n\r\n assert_equal(r, rm)\r\n assert_equal(x, x_orig)\r\n assert_equal(xm, xm_orig)\r\n\r\n # This crazy behavior is expected by count_tied_groups, but is not\r\n # in the docstring...\r\n _, counts = stats.mstats.find_repeats([])\r\n assert_equal(counts, np.array(0, dtype=np.intp))\r\n\r\n def test_kendalltau(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.kendalltau(x, y)\r\n rm = stats.mstats.kendalltau(xm, ym)\r\n assert_almost_equal(r[0], rm[0], decimal=10)\r\n assert_almost_equal(r[1], rm[1], decimal=7)\r\n\r\n def test_obrientransform(self):\r\n for n in self.get_n():\r\n x, y, xm, ym = self.generate_xy_sample(n)\r\n r = stats.obrientransform(x)\r\n rm = stats.mstats.obrientransform(xm)\r\n assert_almost_equal(r.T, rm[0:len(x)])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "# -*- encoding:utf-8 -*-\r\n\"\"\"\r\n==================================\r\nInput and output (:mod:`scipy.io`)\r\n==================================\r\n\r\n.. currentmodule:: scipy.io\r\n\r\nSciPy has many modules, classes, and functions available to read data\r\nfrom and write data to a variety of file formats.\r\n\r\n.. seealso:: :ref:`numpy-reference.routines.io` (in Numpy)\r\n\r\nMATLAB® files\r\n=============\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n loadmat - Read a MATLAB style mat file (version 4 through 7.1)\r\n savemat - Write a MATLAB style mat file (version 4 through 7.1)\r\n whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)\r\n\r\nIDL® files\r\n==========\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n readsav - Read an IDL 'save' file\r\n\r\nMatrix Market files\r\n===================\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n mminfo - Query matrix info from Matrix Market formatted file\r\n mmread - Read matrix from Matrix Market formatted file\r\n mmwrite - Write matrix to Matrix Market formatted file\r\n\r\nUnformatted Fortran files\r\n===============================\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n FortranFile - A file object for unformatted sequential Fortran files\r\n\r\nNetcdf\r\n======\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n netcdf_file - A file object for NetCDF data\r\n netcdf_variable - A data object for the netcdf module\r\n\r\nHarwell-Boeing files\r\n====================\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n hb_read -- read H-B file\r\n hb_write -- write H-B file\r\n\r\nWav sound files (:mod:`scipy.io.wavfile`)\r\n=========================================\r\n\r\n.. module:: scipy.io.wavfile\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n read\r\n write\r\n WavFileWarning\r\n\r\nArff files (:mod:`scipy.io.arff`)\r\n=================================\r\n\r\n.. module:: scipy.io.arff\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n loadarff\r\n MetaData\r\n ArffError\r\n ParseArffError\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n# matfile read and write\r\nfrom .matlab import loadmat, savemat, whosmat, byteordercodes\r\n\r\n# netCDF file support\r\nfrom .netcdf import netcdf_file, netcdf_variable\r\n\r\n# Fortran file support\r\nfrom ._fortran import FortranFile\r\n\r\nfrom .mmio import mminfo, mmread, mmwrite\r\nfrom .idl import readsav\r\nfrom .harwell_boeing import hb_read, hb_write\r\n\r\n__all__ = [s for s in dir() if not s.startswith('_')]\r\nfrom numpy.testing import Tester\r\ntest = Tester().test\r\n", "\"\"\"\r\nUtilities helpful for handling the vehicle model: find floor/ceiling, convert coordinates to\r\nvoxel grid points, label regions (and based on a specific point), etc.\r\n\"\"\"\r\nimport logging\r\nimport numpy as np\r\nfrom scipy.ndimage import measurements as meas\r\n\r\n\r\nclass Component(object):\r\n \"\"\"Object to store information about a specific vehicle component, manikin, etc.\"\"\"\r\n def __init__(self, voxel_data_dict, vehicle_csys=None, voxel_size=None):\r\n \"\"\"\r\n :param voxel_data_dict: Output of voxelization routine, usually read in from file.\r\n :param voxel_size: The spacing between adjacent voxels.\r\n :param vehicle_csys: The transform matrix to go to the vehicle csys; usually ignored\r\n \"\"\"\r\n # Allow occupied voxels to be a string (file location) or numpy array (data)\r\n\r\n self.voxel_size = voxel_size or voxel_data_dict['vox_size']\r\n self.vehicle_csys = vehicle_csys if vehicle_csys is not None else np.eye(4)\r\n\r\n # Hack so that we can use old manikins where voxels are stored as occupied_voxels\r\n try:\r\n self.occupied_voxels = voxel_data_dict[\"value\"]\r\n except KeyError:\r\n self.occupied_voxels = voxel_data_dict[\"occupied_voxels\"]\r\n\r\n self.shape = self.occupied_voxels.shape\r\n\r\n self.labels = None\r\n self.labels_binary = None\r\n\r\n try:\r\n # Load in x,y,z grid if available. Otherwise can create arbitrary grid on demand.\r\n self.x_grid = voxel_data_dict[\"x_grid\"]\r\n self.y_grid = voxel_data_dict[\"y_grid\"]\r\n self.z_grid = voxel_data_dict[\"z_grid\"]\r\n except KeyError:\r\n logging.debug(\"No predefined gridpoints.\")\r\n self.x_grid = None\r\n self.y_grid = None\r\n self.z_grid = None\r\n\r\n def _make_labeled_regions(self, mask_from_voxel=None):\r\n \"\"\"\r\n Label regions of EMPTY space in the voxelated grid. Can be used for binarization to find\r\n vehicle interior (as well as floor and ceiling, based on vehicle interior)\r\n\r\n Optionally creates true/false mask based on the ijk voxel indices of a selected point.\r\n \"\"\"\r\n # Labeled image is converted to boolean, because ndimage thinks 0=background\r\n\r\n self.labels, _ = meas.label((self.occupied_voxels == 0))\r\n # Store whether labels are a binary mask (desired region/not), or a list of indiv regions\r\n self.labels_binary = mask_from_voxel\r\n if mask_from_voxel is not None:\r\n # (sans a buffer region, this method of empty space detection may cause problems)\r\n label_desired = self.labels[mask_from_voxel]\r\n self.labels = (self.labels == label_desired)\r\n self.labels_binary = mask_from_voxel\r\n return self.labels\r\n\r\n def _make_coord_grid(self, csys=None):\r\n \"\"\"\r\n Originating at the given coord system, provide a coordinate grid to match voxel numbers\r\n (giving xyz positions of voxel[i,j,k]). (so doesn't give range such as -x...+x)\r\n\r\n :param csys: Defaults to origin; else 4x4 transformation matrix representing the\r\n coordinate system in which to rotate the coordinates and make the grid\r\n \"\"\"\r\n if csys is None:\r\n csys = np.identity(4)\r\n\r\n origin = csys[:-1, 3]\r\n self.x_grid = np.arange(origin[0] * self.voxel_size,\r\n origin[0] * self.voxel_size + self.shape[0] * self.voxel_size,\r\n self.voxel_size)\r\n self.y_grid = np.arange(origin[1] * self.voxel_size,\r\n origin[1] * self.voxel_size + self.shape[1] * self.voxel_size,\r\n self.voxel_size)\r\n self.z_grid = np.arange(origin[2] * self.voxel_size,\r\n origin[2] * self.voxel_size + self.shape[2] * self.voxel_size,\r\n self.voxel_size)\r\n\r\n return self.x_grid, self.y_grid, self.z_grid\r\n\r\n def get_vox_from_coord(self, coord_vec):\r\n \"\"\"\r\n Turn xyz coordinate vector (or list) into tuple of the ijk voxel #s (array indices in\r\n occupied_voxels).\r\n \"\"\"\r\n assert len(coord_vec) == 3, \"Specified coordinate vector of incorrect size\"\r\n\r\n # Correct for diff in alignment between vehicle coords and occ voxel grid\r\n coord_vec = np.dot(np.array(coord_vec), self.vehicle_csys[:3, :3])\r\n x_pos = (i for i, c in enumerate(self.x_grid) if coord_vec[0] <= c)\r\n y_pos = (i for i, c in enumerate(self.y_grid) if coord_vec[1] <= c)\r\n z_pos = (i for i, c in enumerate(self.z_grid) if coord_vec[2] <= c)\r\n\r\n return x_pos.next(), y_pos.next(), z_pos.next()\r\n\r\n def get_labels(self, mask_from_voxel=None):\r\n \"\"\"\r\n Return or create labels as needed\r\n \"\"\"\r\n if self.labels is not None and self.labels_binary == mask_from_voxel:\r\n return self.labels\r\n # If the labels don't exist or don't match what we want, regenerate\r\n return self._make_labeled_regions(mask_from_voxel=mask_from_voxel)\r\n\r\n def get_coord_grid(self):\r\n \"\"\"\r\n Return or create the coordinates along a voxelated grid, as needed\r\n \"\"\"\r\n if self.x_grid is not None and self.y_grid is not None and self.z_grid is not None:\r\n return self.x_grid, self.y_grid, self.z_grid\r\n else:\r\n return self._make_coord_grid()\r\n\r\n\r\nclass Vehicle(Component):\r\n \"\"\"\r\n Add specific methods for finding floor and ceiling\r\n \"\"\"\r\n def __init__(self, voxel_data_dict, vehicle_csys=None, voxel_size=None):\r\n \"\"\"\r\n Call constructor of superclass and then add on two more empty parameters\r\n \"\"\"\r\n super(Vehicle, self).__init__(voxel_data_dict,\r\n vehicle_csys=vehicle_csys,\r\n voxel_size=voxel_size)\r\n self.floor = None\r\n self.ceiling = None\r\n\r\n def _make_floor_ceil(self, cabin_voxel):\r\n \"\"\"\r\n Alternate method of ceiling detection: get the label in a region containing troops (\r\n assumed to be the cabin interior volume), then find min and max points where that label\r\n is found, everywhere in the vehicle. Floor and ceiling are the endpoints of the longest\r\n continuous gap between floor and ceiling.\r\n\r\n :param cabin_voxel: 3-tuple containing the ijk indices of a voxel known to be cabin-\r\n determine this from the position of a troop manikin in the vehicle model\r\n \"\"\"\r\n # Default value = bottom of vehicle box. Easy to spot meaningless ceiling points.\r\n\r\n labels = self.get_labels(mask_from_voxel=cabin_voxel)\r\n\r\n self.ceiling = np.zeros((labels.shape[0], labels.shape[1]), dtype=np.int16)\r\n self.floor = np.zeros((labels.shape[0], labels.shape[1]), dtype=np.int16)\r\n for i in xrange(labels.shape[0]):\r\n for j in xrange(labels.shape[1]):\r\n labs, isl = meas.label(labels[i, j, :])\r\n if isl == 0:\r\n continue\r\n slices = meas.find_objects(labs)\r\n lrgst = np.argmax(np.array([sli[0].stop - sli[0].start for sli in slices]))\r\n \r\n self.floor[i, j] = slices[lrgst][0].start - 1\r\n self.ceiling[i, j] = slices[lrgst][0].stop\r\n # Hack: postprocess so that floor and ceiling arrays have the default values assumed\r\n # by rest of test bench\r\n self.floor[self.floor == -1] = 0\r\n self.ceiling[self.ceiling == labels.shape[2]] = 0\r\n\r\n def get_floor(self, cabin_voxel=None):\r\n \"\"\"\r\n Return vehicle floor. Calculate if necessary.\r\n \"\"\"\r\n if self.floor is None:\r\n self._make_floor_ceil(cabin_voxel)\r\n return self.floor\r\n\r\n def get_ceil(self, cabin_voxel=None):\r\n \"\"\"\r\n Return vehicle ceiling. Calculate for the first time if necessary.\r\n \"\"\"\r\n if self.ceiling is None:\r\n self._make_floor_ceil(cabin_voxel) \r\n return self.ceiling\r\n", "from __future__ import division, print_function, absolute_import\r\n\r\n__docformat__ = \"restructuredtext en\"\r\n\r\n__all__ = []\r\n\r\nfrom warnings import warn\r\n\r\nfrom numpy import asanyarray, asarray, asmatrix, array, matrix, zeros\r\n\r\nfrom scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \\\r\n IdentityOperator\r\n\r\n_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',\r\n ('f','D'):'D', ('d','f'):'d', ('d','d'):'d',\r\n ('d','F'):'D', ('d','D'):'D', ('F','f'):'F',\r\n ('F','d'):'D', ('F','F'):'F', ('F','D'):'D',\r\n ('D','f'):'D', ('D','d'):'D', ('D','F'):'D',\r\n ('D','D'):'D'}\r\n\r\n\r\ndef coerce(x,y):\r\n if x not in 'fdFD':\r\n x = 'd'\r\n if y not in 'fdFD':\r\n y = 'd'\r\n return _coerce_rules[x,y]\r\n\r\n\r\ndef id(x):\r\n return x\r\n\r\n\r\ndef make_system(A, M, x0, b, xtype=None):\r\n \"\"\"Make a linear system Ax=b\r\n\r\n Parameters\r\n ----------\r\n A : LinearOperator\r\n sparse or dense matrix (or any valid input to aslinearoperator)\r\n M : {LinearOperator, Nones}\r\n preconditioner\r\n sparse or dense matrix (or any valid input to aslinearoperator)\r\n x0 : {array_like, None}\r\n initial guess to iterative method\r\n b : array_like\r\n right hand side\r\n xtype : {'f', 'd', 'F', 'D', None}, optional\r\n dtype of the x vector\r\n\r\n Returns\r\n -------\r\n (A, M, x, b, postprocess)\r\n A : LinearOperator\r\n matrix of the linear system\r\n M : LinearOperator\r\n preconditioner\r\n x : rank 1 ndarray\r\n initial guess\r\n b : rank 1 ndarray\r\n right hand side\r\n postprocess : function\r\n converts the solution vector to the appropriate\r\n type and dimensions (e.g. (N,1) matrix)\r\n\r\n \"\"\"\r\n A_ = A\r\n A = aslinearoperator(A)\r\n\r\n if A.shape[0] != A.shape[1]:\r\n raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))\r\n\r\n N = A.shape[0]\r\n\r\n b = asanyarray(b)\r\n\r\n if not (b.shape == (N,1) or b.shape == (N,)):\r\n raise ValueError('A and b have incompatible dimensions')\r\n\r\n if b.dtype.char not in 'fdFD':\r\n b = b.astype('d') # upcast non-FP types to double\r\n\r\n def postprocess(x):\r\n if isinstance(b,matrix):\r\n x = asmatrix(x)\r\n return x.reshape(b.shape)\r\n\r\n if xtype is None:\r\n if hasattr(A,'dtype'):\r\n xtype = A.dtype.char\r\n else:\r\n xtype = A.matvec(b).dtype.char\r\n xtype = coerce(xtype, b.dtype.char)\r\n else:\r\n warn('Use of xtype argument is deprecated. '\r\n 'Use LinearOperator( ... , dtype=xtype) instead.',\r\n DeprecationWarning)\r\n if xtype == 0:\r\n xtype = b.dtype.char\r\n else:\r\n if xtype not in 'fdFD':\r\n raise ValueError(\"xtype must be 'f', 'd', 'F', or 'D'\")\r\n\r\n b = asarray(b,dtype=xtype) # make b the same type as x\r\n b = b.ravel()\r\n\r\n if x0 is None:\r\n x = zeros(N, dtype=xtype)\r\n else:\r\n x = array(x0, dtype=xtype)\r\n if not (x.shape == (N,1) or x.shape == (N,)):\r\n raise ValueError('A and x have incompatible dimensions')\r\n x = x.ravel()\r\n\r\n # process preconditioner\r\n if M is None:\r\n if hasattr(A_,'psolve'):\r\n psolve = A_.psolve\r\n else:\r\n psolve = id\r\n if hasattr(A_,'rpsolve'):\r\n rpsolve = A_.rpsolve\r\n else:\r\n rpsolve = id\r\n if psolve is id and rpsolve is id:\r\n M = IdentityOperator(shape=A.shape, dtype=A.dtype)\r\n else:\r\n M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,\r\n dtype=A.dtype)\r\n else:\r\n M = aslinearoperator(M)\r\n if A.shape != M.shape:\r\n raise ValueError('matrix and preconditioner have different shapes')\r\n\r\n return A, M, x, b, postprocess\r\n", "\"\"\"\r\nUnit tests for optimization routines from _root.py.\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nfrom numpy.testing import assert_\r\nimport numpy as np\r\n\r\nfrom scipy.optimize import root\r\n\r\n\r\nclass TestRoot(object):\r\n def test_tol_parameter(self):\r\n # Check that the minimize() tol= argument does something\r\n def func(z):\r\n x, y = z\r\n return np.array([x**3 - 1, y**3 - 1])\r\n\r\n def dfunc(z):\r\n x, y = z\r\n return np.array([[3*x**2, 0], [0, 3*y**2]])\r\n\r\n for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',\r\n 'diagbroyden', 'krylov']:\r\n if method in ('linearmixing', 'excitingmixing'):\r\n # doesn't converge\r\n continue\r\n\r\n if method in ('hybr', 'lm'):\r\n jac = dfunc\r\n else:\r\n jac = None\r\n\r\n sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)\r\n sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)\r\n msg = \"%s: %s vs. %s\" % (method, func(sol1.x), func(sol2.x))\r\n assert_(sol1.success, msg)\r\n assert_(sol2.success, msg)\r\n assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),\r\n msg)\r\n\r\n def test_minimize_scalar_coerce_args_param(self):\r\n # github issue #3503\r\n def func(z, f=1):\r\n x, y = z\r\n return np.array([x**3 - 1, y**3 - f])\r\n root(func, [1.1, 1.1], args=1.5)\r\n", "# this program corresponds to special.py\r\n\r\n### Means test is not done yet\r\n# E Means test is giving error (E)\r\n# F Means test is failing (F)\r\n# EF Means test is giving error and Failing\r\n#! Means test is segfaulting\r\n# 8 Means test runs forever\r\n\r\n### test_besselpoly\r\n### test_mathieu_a\r\n### test_mathieu_even_coef\r\n### test_mathieu_odd_coef\r\n### test_modfresnelp\r\n### test_modfresnelm\r\n# test_pbdv_seq\r\n### test_pbvv_seq\r\n### test_sph_harm\r\n# test_sph_in\r\n# test_sph_jn\r\n# test_sph_kn\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport itertools\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,\r\n log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)\r\n\r\nfrom numpy.testing import (assert_equal, assert_almost_equal,\r\n assert_array_equal, assert_array_almost_equal, assert_approx_equal,\r\n assert_, dec, TestCase, run_module_suite, assert_allclose,\r\n assert_raises, assert_array_almost_equal_nulp)\r\n\r\nfrom scipy import special\r\nimport scipy.special._ufuncs as cephes\r\nfrom scipy.special import ellipk, zeta\r\n\r\nfrom scipy.special._testutils import assert_tol_equal, with_special_errors, \\\r\n assert_func_equal\r\n\r\nfrom scipy._lib._version import NumpyVersion\r\n\r\nimport math\r\n\r\n\r\nclass TestCephes(TestCase):\r\n def test_airy(self):\r\n cephes.airy(0)\r\n\r\n def test_airye(self):\r\n cephes.airye(0)\r\n\r\n def test_binom(self):\r\n n = np.array([0.264, 4, 5.2, 17])\r\n k = np.array([2, 0.4, 7, 3.3])\r\n nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])\r\n ).reshape(2, -1).T\r\n rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,\r\n -0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],\r\n [10.92, 2.22993515861399, -0.00585728, 10.468891352063146],\r\n [136, 3.5252179590758828, 19448, 1024.5526916174495]])\r\n assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)\r\n\r\n # Test branches in implementation\r\n np.random.seed(1234)\r\n n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]\r\n k = np.arange(0, 102)\r\n nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])\r\n ).reshape(2, -1).T\r\n\r\n assert_func_equal(cephes.binom,\r\n cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),\r\n nk,\r\n atol=1e-10, rtol=1e-10)\r\n\r\n def test_binom_2(self):\r\n # Test branches in implementation\r\n np.random.seed(1234)\r\n n = np.r_[np.logspace(1, 300, 20)]\r\n k = np.arange(0, 102)\r\n nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])\r\n ).reshape(2, -1).T\r\n\r\n assert_func_equal(cephes.binom,\r\n cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),\r\n nk,\r\n atol=1e-10, rtol=1e-10)\r\n\r\n def test_binom_exact(self):\r\n @np.vectorize\r\n def binom_int(n, k):\r\n n = int(n)\r\n k = int(k)\r\n num = int(1)\r\n den = int(1)\r\n for i in range(1, k+1):\r\n num *= i + n - k\r\n den *= i\r\n return float(num/den)\r\n\r\n np.random.seed(1234)\r\n n = np.arange(1, 15)\r\n k = np.arange(0, 15)\r\n nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])\r\n ).reshape(2, -1).T\r\n nk = nk[nk[:,0] >= nk[:,1]]\r\n assert_func_equal(cephes.binom,\r\n binom_int(nk[:,0], nk[:,1]),\r\n nk,\r\n atol=0, rtol=0)\r\n\r\n def test_bdtr(self):\r\n assert_equal(cephes.bdtr(1,1,0.5),1.0)\r\n\r\n def test_bdtri(self):\r\n assert_equal(cephes.bdtri(1,3,0.5),0.5)\r\n\r\n def test_bdtrc(self):\r\n assert_equal(cephes.bdtrc(1,3,0.5),0.5)\r\n\r\n def test_bdtrin(self):\r\n assert_equal(cephes.bdtrin(1,0,1),5.0)\r\n\r\n def test_bdtrik(self):\r\n cephes.bdtrik(1,3,0.5)\r\n\r\n def test_bei(self):\r\n assert_equal(cephes.bei(0),0.0)\r\n\r\n def test_beip(self):\r\n assert_equal(cephes.beip(0),0.0)\r\n\r\n def test_ber(self):\r\n assert_equal(cephes.ber(0),1.0)\r\n\r\n def test_berp(self):\r\n assert_equal(cephes.berp(0),0.0)\r\n\r\n def test_besselpoly(self):\r\n assert_equal(cephes.besselpoly(0,0,0),1.0)\r\n\r\n def test_beta(self):\r\n assert_equal(cephes.beta(1,1),1.0)\r\n assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))\r\n assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,\r\n rtol=1e-13, atol=0)\r\n\r\n def test_betainc(self):\r\n assert_equal(cephes.betainc(1,1,1),1.0)\r\n assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)\r\n\r\n def test_betaln(self):\r\n assert_equal(cephes.betaln(1,1),0.0)\r\n assert_allclose(cephes.betaln(-100.3, 1e-200), cephes._gammaln(1e-200))\r\n assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,\r\n rtol=1e-14, atol=0)\r\n\r\n def test_betaincinv(self):\r\n assert_equal(cephes.betaincinv(1,1,1),1.0)\r\n assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),\r\n 8.4231316935498957e-21, rtol=3e-12, atol=0)\r\n\r\n def test_beta_inf(self):\r\n assert_(np.isinf(special.beta(-1, 2)))\r\n\r\n def test_btdtr(self):\r\n assert_equal(cephes.btdtr(1,1,1),1.0)\r\n\r\n def test_btdtri(self):\r\n assert_equal(cephes.btdtri(1,1,1),1.0)\r\n\r\n def test_btdtria(self):\r\n assert_equal(cephes.btdtria(1,1,1),5.0)\r\n\r\n def test_btdtrib(self):\r\n assert_equal(cephes.btdtrib(1,1,1),5.0)\r\n\r\n def test_cbrt(self):\r\n assert_approx_equal(cephes.cbrt(1),1.0)\r\n\r\n def test_chdtr(self):\r\n assert_equal(cephes.chdtr(1,0),0.0)\r\n\r\n def test_chdtrc(self):\r\n assert_equal(cephes.chdtrc(1,0),1.0)\r\n\r\n def test_chdtri(self):\r\n assert_equal(cephes.chdtri(1,1),0.0)\r\n\r\n def test_chdtriv(self):\r\n assert_equal(cephes.chdtriv(0,0),5.0)\r\n\r\n def test_chndtr(self):\r\n assert_equal(cephes.chndtr(0,1,0),0.0)\r\n p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)\r\n assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,\r\n 1.33520017e-08, 2.74909967e-08],\r\n rtol=1e-6, atol=0)\r\n assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)\r\n assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)\r\n assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))\r\n assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))\r\n assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))\r\n\r\n def test_chndtridf(self):\r\n assert_equal(cephes.chndtridf(0,0,1),5.0)\r\n\r\n def test_chndtrinc(self):\r\n assert_equal(cephes.chndtrinc(0,1,0),5.0)\r\n\r\n def test_chndtrix(self):\r\n assert_equal(cephes.chndtrix(0,1,0),0.0)\r\n\r\n def test_cosdg(self):\r\n assert_equal(cephes.cosdg(0),1.0)\r\n\r\n def test_cosm1(self):\r\n assert_equal(cephes.cosm1(0),0.0)\r\n\r\n def test_cotdg(self):\r\n assert_almost_equal(cephes.cotdg(45),1.0)\r\n\r\n def test_dawsn(self):\r\n assert_equal(cephes.dawsn(0),0.0)\r\n assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)\r\n\r\n def test_diric(self):\r\n # Test behavior near multiples of 2pi. Regression test for issue\r\n # described in gh-4001.\r\n n_odd = [1, 5, 25]\r\n x = np.array(2*np.pi + 5e-5).astype(np.float32)\r\n assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)\r\n x = np.array(2*np.pi + 1e-9).astype(np.float64)\r\n assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)\r\n x = np.array(2*np.pi + 1e-15).astype(np.float64)\r\n assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)\r\n if hasattr(np, 'float128'):\r\n # No float128 available in 32-bit numpy\r\n x = np.array(2*np.pi + 1e-12).astype(np.float128)\r\n assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)\r\n\r\n n_even = [2, 4, 24]\r\n x = np.array(2*np.pi + 1e-9).astype(np.float64)\r\n assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)\r\n\r\n # Test at some values not near a multiple of pi\r\n x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)\r\n octave_result = [0.872677996249965, 0.539344662916632,\r\n 0.127322003750035, -0.206011329583298]\r\n assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)\r\n\r\n def test_diric_broadcasting(self):\r\n x = np.arange(5)\r\n n = np.array([1, 3, 7])\r\n assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))\r\n\r\n def test_ellipe(self):\r\n assert_equal(cephes.ellipe(1),1.0)\r\n\r\n def test_ellipeinc(self):\r\n assert_equal(cephes.ellipeinc(0,1),0.0)\r\n\r\n def test_ellipj(self):\r\n cephes.ellipj(0,1)\r\n\r\n def test_ellipk(self):\r\n assert_allclose(ellipk(0), pi/2)\r\n\r\n def test_ellipkinc(self):\r\n assert_equal(cephes.ellipkinc(0,0),0.0)\r\n\r\n def test_erf(self):\r\n assert_equal(cephes.erf(0),0.0)\r\n\r\n def test_erfc(self):\r\n assert_equal(cephes.erfc(0),1.0)\r\n\r\n def test_exp1(self):\r\n cephes.exp1(1)\r\n\r\n def test_expi(self):\r\n cephes.expi(1)\r\n\r\n def test_expn(self):\r\n cephes.expn(1,1)\r\n\r\n def test_exp1_reg(self):\r\n # Regression for #834\r\n a = cephes.exp1(-complex(19.9999990))\r\n b = cephes.exp1(-complex(19.9999991))\r\n assert_array_almost_equal(a.imag, b.imag)\r\n\r\n def test_exp10(self):\r\n assert_approx_equal(cephes.exp10(2),100.0)\r\n\r\n def test_exp2(self):\r\n assert_equal(cephes.exp2(2),4.0)\r\n\r\n def test_expm1(self):\r\n assert_equal(cephes.expm1(0),0.0)\r\n assert_equal(cephes.expm1(np.inf), np.inf)\r\n assert_equal(cephes.expm1(-np.inf), -1)\r\n assert_equal(cephes.expm1(np.nan), np.nan)\r\n\r\n # Earlier numpy version don't guarantee that npy_cexp conforms to C99.\r\n @dec.skipif(NumpyVersion(np.__version__) < '1.9.0')\r\n def test_expm1_complex(self):\r\n expm1 = cephes.expm1\r\n assert_equal(expm1(0 + 0j), 0 + 0j)\r\n assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))\r\n assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))\r\n assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))\r\n assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))\r\n assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))\r\n assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))\r\n assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))\r\n assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))\r\n assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))\r\n assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))\r\n assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))\r\n assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))\r\n assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))\r\n assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))\r\n assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))\r\n\r\n @dec.knownfailureif(True, 'The real part of expm1(z) bad at these points')\r\n def test_expm1_complex_hard(self):\r\n # The real part of this function is difficult to evaluate when\r\n # z.real = -log(cos(z.imag)).\r\n y = np.array([0.1, 0.2, 0.3, 5, 11, 20])\r\n x = -np.log(np.cos(y))\r\n z = x + 1j*y\r\n\r\n # evaluate using mpmath.expm1 with dps=1000\r\n expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,\r\n 2.4289354732893695e-18+0.20271003550867248j,\r\n 4.5235500262585768e-17+0.30933624960962319j,\r\n 7.8234305217489006e-17-3.3805150062465863j,\r\n -1.3685191953697676e-16-225.95084645419513j,\r\n 8.7175620481291045e-17+2.2371609442247422j])\r\n found = cephes.expm1(z)\r\n # this passes.\r\n assert_array_almost_equal_nulp(found.imag, expected.imag, 3)\r\n # this fails.\r\n assert_array_almost_equal_nulp(found.real, expected.real, 20)\r\n\r\n def test_fdtr(self):\r\n assert_equal(cephes.fdtr(1,1,0),0.0)\r\n\r\n def test_fdtrc(self):\r\n assert_equal(cephes.fdtrc(1,1,0),1.0)\r\n\r\n def test_fdtri(self):\r\n # cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1\r\n assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),\r\n array([0.9937365, 1.00630298]), rtol=1e-6)\r\n\r\n def test_fdtridfd(self):\r\n assert_equal(cephes.fdtridfd(1,0,0),5.0)\r\n\r\n def test_fresnel(self):\r\n assert_equal(cephes.fresnel(0),(0.0,0.0))\r\n\r\n def test_gamma(self):\r\n assert_equal(cephes.gamma(5),24.0)\r\n\r\n def test_gammainc(self):\r\n assert_equal(cephes.gammainc(5,0),0.0)\r\n\r\n def test_gammaincc(self):\r\n assert_equal(cephes.gammaincc(5,0),1.0)\r\n\r\n def test_gammainccinv(self):\r\n assert_equal(cephes.gammainccinv(5,1),0.0)\r\n\r\n def test_gammaln(self):\r\n cephes._gammaln(10)\r\n\r\n def test_gammasgn(self):\r\n vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)\r\n assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))\r\n\r\n def test_gdtr(self):\r\n assert_equal(cephes.gdtr(1,1,0),0.0)\r\n\r\n def test_gdtr_inf(self):\r\n assert_equal(cephes.gdtr(1,1,np.inf),1.0)\r\n\r\n def test_gdtrc(self):\r\n assert_equal(cephes.gdtrc(1,1,0),1.0)\r\n\r\n def test_gdtria(self):\r\n assert_equal(cephes.gdtria(0,1,1),0.0)\r\n\r\n def test_gdtrib(self):\r\n cephes.gdtrib(1,0,1)\r\n # assert_equal(cephes.gdtrib(1,0,1),5.0)\r\n\r\n def test_gdtrix(self):\r\n cephes.gdtrix(1,1,.1)\r\n\r\n def test_hankel1(self):\r\n cephes.hankel1(1,1)\r\n\r\n def test_hankel1e(self):\r\n cephes.hankel1e(1,1)\r\n\r\n def test_hankel2(self):\r\n cephes.hankel2(1,1)\r\n\r\n def test_hankel2e(self):\r\n cephes.hankel2e(1,1)\r\n\r\n def test_hyp1f1(self):\r\n assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))\r\n assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)\r\n cephes.hyp1f1(1,1,1)\r\n\r\n def test_hyp1f2(self):\r\n cephes.hyp1f2(1,1,1,1)\r\n\r\n def test_hyp2f0(self):\r\n cephes.hyp2f0(1,1,1,1)\r\n\r\n def test_hyp2f1(self):\r\n assert_equal(cephes.hyp2f1(1,1,1,0),1.0)\r\n\r\n def test_hyp3f0(self):\r\n assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))\r\n\r\n def test_hyperu(self):\r\n assert_equal(cephes.hyperu(0,1,1),1.0)\r\n\r\n def test_i0(self):\r\n assert_equal(cephes.i0(0),1.0)\r\n\r\n def test_i0e(self):\r\n assert_equal(cephes.i0e(0),1.0)\r\n\r\n def test_i1(self):\r\n assert_equal(cephes.i1(0),0.0)\r\n\r\n def test_i1e(self):\r\n assert_equal(cephes.i1e(0),0.0)\r\n\r\n def test_it2i0k0(self):\r\n cephes.it2i0k0(1)\r\n\r\n def test_it2j0y0(self):\r\n cephes.it2j0y0(1)\r\n\r\n def test_it2struve0(self):\r\n cephes.it2struve0(1)\r\n\r\n def test_itairy(self):\r\n cephes.itairy(1)\r\n\r\n def test_iti0k0(self):\r\n assert_equal(cephes.iti0k0(0),(0.0,0.0))\r\n\r\n def test_itj0y0(self):\r\n assert_equal(cephes.itj0y0(0),(0.0,0.0))\r\n\r\n def test_itmodstruve0(self):\r\n assert_equal(cephes.itmodstruve0(0),0.0)\r\n\r\n def test_itstruve0(self):\r\n assert_equal(cephes.itstruve0(0),0.0)\r\n\r\n def test_iv(self):\r\n assert_equal(cephes.iv(1,0),0.0)\r\n\r\n def _check_ive(self):\r\n assert_equal(cephes.ive(1,0),0.0)\r\n\r\n def test_j0(self):\r\n assert_equal(cephes.j0(0),1.0)\r\n\r\n def test_j1(self):\r\n assert_equal(cephes.j1(0),0.0)\r\n\r\n def test_jn(self):\r\n assert_equal(cephes.jn(0,0),1.0)\r\n\r\n def test_jv(self):\r\n assert_equal(cephes.jv(0,0),1.0)\r\n\r\n def _check_jve(self):\r\n assert_equal(cephes.jve(0,0),1.0)\r\n\r\n def test_k0(self):\r\n cephes.k0(2)\r\n\r\n def test_k0e(self):\r\n cephes.k0e(2)\r\n\r\n def test_k1(self):\r\n cephes.k1(2)\r\n\r\n def test_k1e(self):\r\n cephes.k1e(2)\r\n\r\n def test_kei(self):\r\n cephes.kei(2)\r\n\r\n def test_keip(self):\r\n assert_equal(cephes.keip(0),0.0)\r\n\r\n def test_ker(self):\r\n cephes.ker(2)\r\n\r\n def test_kerp(self):\r\n cephes.kerp(2)\r\n\r\n def _check_kelvin(self):\r\n cephes.kelvin(2)\r\n\r\n def test_kn(self):\r\n cephes.kn(1,1)\r\n\r\n def test_kolmogi(self):\r\n assert_equal(cephes.kolmogi(1),0.0)\r\n assert_(np.isnan(cephes.kolmogi(np.nan)))\r\n\r\n def test_kolmogorov(self):\r\n assert_equal(cephes.kolmogorov(0),1.0)\r\n\r\n def _check_kv(self):\r\n cephes.kv(1,1)\r\n\r\n def _check_kve(self):\r\n cephes.kve(1,1)\r\n\r\n def test_log1p(self):\r\n log1p = cephes.log1p\r\n assert_equal(log1p(0), 0.0)\r\n assert_equal(log1p(-1), -np.inf)\r\n assert_equal(log1p(-2), np.nan)\r\n assert_equal(log1p(np.inf), np.inf)\r\n\r\n # earlier numpy version don't guarantee that npy_clog conforms to C99\r\n @dec.skipif(NumpyVersion(np.__version__) < '1.9.0')\r\n def test_log1p_complex(self):\r\n log1p = cephes.log1p\r\n c = complex\r\n assert_equal(log1p(0 + 0j), 0 + 0j)\r\n assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))\r\n assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))\r\n assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))\r\n assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))\r\n assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))\r\n assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))\r\n assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))\r\n assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))\r\n assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))\r\n assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))\r\n assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))\r\n assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))\r\n\r\n def test_lpmv(self):\r\n assert_equal(cephes.lpmv(0,0,1),1.0)\r\n\r\n def test_mathieu_a(self):\r\n assert_equal(cephes.mathieu_a(1,0),1.0)\r\n\r\n def test_mathieu_b(self):\r\n assert_equal(cephes.mathieu_b(1,0),1.0)\r\n\r\n def test_mathieu_cem(self):\r\n assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))\r\n\r\n # Test AMS 20.2.27\r\n @np.vectorize\r\n def ce_smallq(m, q, z):\r\n z *= np.pi/180\r\n if m == 0:\r\n return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)\r\n elif m == 1:\r\n return cos(z) - q/8 * cos(3*z) # + O(q^2)\r\n elif m == 2:\r\n return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)\r\n else:\r\n return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)\r\n m = np.arange(0, 100)\r\n q = np.r_[0, np.logspace(-30, -9, 10)]\r\n assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],\r\n ce_smallq(m[:,None], q[None,:], 0.123),\r\n rtol=1e-14, atol=0)\r\n\r\n def test_mathieu_sem(self):\r\n assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))\r\n\r\n # Test AMS 20.2.27\r\n @np.vectorize\r\n def se_smallq(m, q, z):\r\n z *= np.pi/180\r\n if m == 1:\r\n return sin(z) - q/8 * sin(3*z) # + O(q^2)\r\n elif m == 2:\r\n return sin(2*z) - q*sin(4*z)/12 # + O(q^2)\r\n else:\r\n return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)\r\n m = np.arange(1, 100)\r\n q = np.r_[0, np.logspace(-30, -9, 10)]\r\n assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],\r\n se_smallq(m[:,None], q[None,:], 0.123),\r\n rtol=1e-14, atol=0)\r\n\r\n def test_mathieu_modcem1(self):\r\n assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))\r\n\r\n def test_mathieu_modcem2(self):\r\n cephes.mathieu_modcem2(1,1,1)\r\n\r\n # Test reflection relation AMS 20.6.19\r\n m = np.arange(0, 4)[:,None,None]\r\n q = np.r_[np.logspace(-2, 2, 10)][None,:,None]\r\n z = np.linspace(0, 1, 7)[None,None,:]\r\n\r\n y1 = cephes.mathieu_modcem2(m, q, -z)[0]\r\n\r\n fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]\r\n y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]\r\n\r\n assert_allclose(y1, y2, rtol=1e-10)\r\n\r\n def test_mathieu_modsem1(self):\r\n assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))\r\n\r\n def test_mathieu_modsem2(self):\r\n cephes.mathieu_modsem2(1,1,1)\r\n\r\n # Test reflection relation AMS 20.6.20\r\n m = np.arange(1, 4)[:,None,None]\r\n q = np.r_[np.logspace(-2, 2, 10)][None,:,None]\r\n z = np.linspace(0, 1, 7)[None,None,:]\r\n\r\n y1 = cephes.mathieu_modsem2(m, q, -z)[0]\r\n fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]\r\n y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]\r\n assert_allclose(y1, y2, rtol=1e-10)\r\n\r\n def test_mathieu_overflow(self):\r\n # Check that these return NaNs instead of causing a SEGV\r\n assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))\r\n assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))\r\n\r\n def test_mathieu_ticket_1847(self):\r\n # Regression test --- this call had some out-of-bounds access\r\n # and could return nan occasionally\r\n for k in range(60):\r\n v = cephes.mathieu_modsem2(2, 100, -1)\r\n # Values from ACM TOMS 804 (derivate by numerical differentiation)\r\n assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)\r\n assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)\r\n\r\n def test_modfresnelm(self):\r\n cephes.modfresnelm(0)\r\n\r\n def test_modfresnelp(self):\r\n cephes.modfresnelp(0)\r\n\r\n def _check_modstruve(self):\r\n assert_equal(cephes.modstruve(1,0),0.0)\r\n\r\n def test_nbdtr(self):\r\n assert_equal(cephes.nbdtr(1,1,1),1.0)\r\n\r\n def test_nbdtrc(self):\r\n assert_equal(cephes.nbdtrc(1,1,1),0.0)\r\n\r\n def test_nbdtri(self):\r\n assert_equal(cephes.nbdtri(1,1,1),1.0)\r\n\r\n def __check_nbdtrik(self):\r\n cephes.nbdtrik(1,.4,.5)\r\n\r\n def test_nbdtrin(self):\r\n assert_equal(cephes.nbdtrin(1,0,0),5.0)\r\n\r\n def test_ncfdtr(self):\r\n assert_equal(cephes.ncfdtr(1,1,1,0),0.0)\r\n\r\n def test_ncfdtri(self):\r\n assert_equal(cephes.ncfdtri(1,1,1,0),0.0)\r\n\r\n def test_ncfdtridfd(self):\r\n cephes.ncfdtridfd(1,0.5,0,1)\r\n\r\n def __check_ncfdtridfn(self):\r\n cephes.ncfdtridfn(1,0.5,0,1)\r\n\r\n def __check_ncfdtrinc(self):\r\n cephes.ncfdtrinc(1,0.5,0,1)\r\n\r\n def test_nctdtr(self):\r\n assert_equal(cephes.nctdtr(1,0,0),0.5)\r\n assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)\r\n\r\n assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)\r\n assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))\r\n assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)\r\n\r\n assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))\r\n assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))\r\n assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))\r\n\r\n def __check_nctdtridf(self):\r\n cephes.nctdtridf(1,0.5,0)\r\n\r\n def test_nctdtrinc(self):\r\n cephes.nctdtrinc(1,0,0)\r\n\r\n def test_nctdtrit(self):\r\n cephes.nctdtrit(.1,0.2,.5)\r\n\r\n def test_ndtr(self):\r\n assert_equal(cephes.ndtr(0), 0.5)\r\n assert_almost_equal(cephes.ndtr(1), 0.84134474606)\r\n\r\n def test_ndtri(self):\r\n assert_equal(cephes.ndtri(0.5),0.0)\r\n\r\n def test_nrdtrimn(self):\r\n assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)\r\n\r\n def test_nrdtrisd(self):\r\n assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,\r\n atol=0, rtol=0)\r\n\r\n def test_obl_ang1(self):\r\n cephes.obl_ang1(1,1,1,0)\r\n\r\n def test_obl_ang1_cv(self):\r\n result = cephes.obl_ang1_cv(1,1,1,1,0)\r\n assert_almost_equal(result[0],1.0)\r\n assert_almost_equal(result[1],0.0)\r\n\r\n def _check_obl_cv(self):\r\n assert_equal(cephes.obl_cv(1,1,0),2.0)\r\n\r\n def test_obl_rad1(self):\r\n cephes.obl_rad1(1,1,1,0)\r\n\r\n def test_obl_rad1_cv(self):\r\n cephes.obl_rad1_cv(1,1,1,1,0)\r\n\r\n def test_obl_rad2(self):\r\n cephes.obl_rad2(1,1,1,0)\r\n\r\n def test_obl_rad2_cv(self):\r\n cephes.obl_rad2_cv(1,1,1,1,0)\r\n\r\n def test_pbdv(self):\r\n assert_equal(cephes.pbdv(1,0),(0.0,1.0))\r\n\r\n def test_pbvv(self):\r\n cephes.pbvv(1,0)\r\n\r\n def test_pbwa(self):\r\n cephes.pbwa(1,0)\r\n\r\n def test_pdtr(self):\r\n val = cephes.pdtr(0, 1)\r\n assert_almost_equal(val, np.exp(-1))\r\n # Edge case: m = 0.\r\n val = cephes.pdtr([0, 1, 2], 0.0)\r\n assert_array_equal(val, [1, 1, 1])\r\n\r\n def test_pdtrc(self):\r\n val = cephes.pdtrc(0, 1)\r\n assert_almost_equal(val, 1 - np.exp(-1))\r\n # Edge case: m = 0.\r\n val = cephes.pdtrc([0, 1, 2], 0.0)\r\n assert_array_equal(val, [0, 0, 0])\r\n\r\n def test_pdtri(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n cephes.pdtri(0.5,0.5)\r\n\r\n def test_pdtrik(self):\r\n k = cephes.pdtrik(0.5, 1)\r\n assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)\r\n # Edge case: m = 0 or very small.\r\n k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])\r\n assert_array_equal(k, np.zeros((3, 3)))\r\n\r\n def test_pro_ang1(self):\r\n cephes.pro_ang1(1,1,1,0)\r\n\r\n def test_pro_ang1_cv(self):\r\n assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),\r\n array((1.0,0.0)))\r\n\r\n def _check_pro_cv(self):\r\n assert_equal(cephes.pro_cv(1,1,0),2.0)\r\n\r\n def test_pro_rad1(self):\r\n cephes.pro_rad1(1,1,1,0.1)\r\n\r\n def test_pro_rad1_cv(self):\r\n cephes.pro_rad1_cv(1,1,1,1,0)\r\n\r\n def test_pro_rad2(self):\r\n cephes.pro_rad2(1,1,1,0)\r\n\r\n def test_pro_rad2_cv(self):\r\n cephes.pro_rad2_cv(1,1,1,1,0)\r\n\r\n def test_psi(self):\r\n cephes.psi(1)\r\n\r\n def test_radian(self):\r\n assert_equal(cephes.radian(0,0,0),0)\r\n\r\n def test_rgamma(self):\r\n assert_equal(cephes.rgamma(1),1.0)\r\n\r\n def test_round(self):\r\n assert_equal(cephes.round(3.4),3.0)\r\n assert_equal(cephes.round(-3.4),-3.0)\r\n assert_equal(cephes.round(3.6),4.0)\r\n assert_equal(cephes.round(-3.6),-4.0)\r\n assert_equal(cephes.round(3.5),4.0)\r\n assert_equal(cephes.round(-3.5),-4.0)\r\n\r\n def test_shichi(self):\r\n cephes.shichi(1)\r\n\r\n def test_sici(self):\r\n cephes.sici(1)\r\n\r\n s, c = cephes.sici(np.inf)\r\n assert_almost_equal(s, np.pi * 0.5)\r\n assert_almost_equal(c, 0)\r\n\r\n s, c = cephes.sici(-np.inf)\r\n assert_almost_equal(s, -np.pi * 0.5)\r\n assert_(np.isnan(c), \"cosine integral(-inf) is not nan\")\r\n\r\n def test_sindg(self):\r\n assert_equal(cephes.sindg(90),1.0)\r\n\r\n def test_smirnov(self):\r\n assert_equal(cephes.smirnov(1,.1),0.9)\r\n assert_(np.isnan(cephes.smirnov(1,np.nan)))\r\n\r\n def test_smirnovi(self):\r\n assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)\r\n assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)\r\n assert_(np.isnan(cephes.smirnovi(1,np.nan)))\r\n\r\n def test_spence(self):\r\n assert_equal(cephes.spence(1),0.0)\r\n\r\n def test_stdtr(self):\r\n assert_equal(cephes.stdtr(1,0),0.5)\r\n assert_almost_equal(cephes.stdtr(1,1), 0.75)\r\n assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)\r\n\r\n def test_stdtridf(self):\r\n cephes.stdtridf(0.7,1)\r\n\r\n def test_stdtrit(self):\r\n cephes.stdtrit(1,0.7)\r\n\r\n def test_struve(self):\r\n assert_equal(cephes.struve(0,0),0.0)\r\n\r\n def test_tandg(self):\r\n assert_equal(cephes.tandg(45),1.0)\r\n\r\n def test_tklmbda(self):\r\n assert_almost_equal(cephes.tklmbda(1,1),1.0)\r\n\r\n def test_y0(self):\r\n cephes.y0(1)\r\n\r\n def test_y1(self):\r\n cephes.y1(1)\r\n\r\n def test_yn(self):\r\n cephes.yn(1,1)\r\n\r\n def test_yv(self):\r\n cephes.yv(1,1)\r\n\r\n def _check_yve(self):\r\n cephes.yve(1,1)\r\n\r\n def test_zeta(self):\r\n assert_allclose(zeta(2,2), pi**2/6 - 1, rtol=1e-12)\r\n\r\n def test_zetac(self):\r\n assert_equal(cephes.zetac(0),-1.5)\r\n\r\n def test_zeta_1arg(self):\r\n assert_allclose(zeta(2), pi**2/6, rtol=1e-12)\r\n assert_allclose(zeta(4), pi**4/90, rtol=1e-12)\r\n\r\n def test_wofz(self):\r\n z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),\r\n complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),\r\n complex(-0.0000000234545,1.1234), complex(-3.,5.1),\r\n complex(-53,30.1), complex(0.0,0.12345),\r\n complex(11,1), complex(-22,-2), complex(9,-28),\r\n complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)\r\n ]\r\n w = [\r\n complex(-3.78270245518980507452677445620103199303131110e-7,\r\n 0.000903861276433172057331093754199933411710053155),\r\n complex(0.1764906227004816847297495349730234591778719532788,\r\n -0.02146550539468457616788719893991501311573031095617),\r\n complex(0.2410250715772692146133539023007113781272362309451,\r\n 0.06087579663428089745895459735240964093522265589350),\r\n complex(0.30474420525691259245713884106959496013413834051768,\r\n -0.20821893820283162728743734725471561394145872072738),\r\n complex(7.317131068972378096865595229600561710140617977e34,\r\n 8.321873499714402777186848353320412813066170427e34),\r\n complex(0.0615698507236323685519612934241429530190806818395,\r\n -0.00676005783716575013073036218018565206070072304635),\r\n complex(0.3960793007699874918961319170187598400134746631,\r\n -5.593152259116644920546186222529802777409274656e-9),\r\n complex(0.08217199226739447943295069917990417630675021771804,\r\n -0.04701291087643609891018366143118110965272615832184),\r\n complex(0.00457246000350281640952328010227885008541748668738,\r\n -0.00804900791411691821818731763401840373998654987934),\r\n complex(0.8746342859608052666092782112565360755791467973338452,\r\n 0.),\r\n complex(0.00468190164965444174367477874864366058339647648741,\r\n 0.0510735563901306197993676329845149741675029197050),\r\n complex(-0.0023193175200187620902125853834909543869428763219,\r\n -0.025460054739731556004902057663500272721780776336),\r\n complex(9.11463368405637174660562096516414499772662584e304,\r\n 3.97101807145263333769664875189354358563218932e305),\r\n complex(-4.4927207857715598976165541011143706155432296e281,\r\n -2.8019591213423077494444700357168707775769028e281),\r\n complex(2.820947917809305132678577516325951485807107151e-6,\r\n 2.820947917668257736791638444590253942253354058e-6),\r\n complex(2.82094791773878143474039725787438662716372268e-15,\r\n 2.82094791773878143474039725773333923127678361e-15)\r\n ]\r\n assert_func_equal(cephes.wofz, w, z, rtol=1e-13)\r\n\r\n\r\nclass TestAiry(TestCase):\r\n def test_airy(self):\r\n # This tests the airy function to ensure 8 place accuracy in computation\r\n\r\n x = special.airy(.99)\r\n assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)\r\n x = special.airy(.41)\r\n assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)\r\n x = special.airy(-.36)\r\n assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)\r\n\r\n def test_airye(self):\r\n a = special.airye(0.01)\r\n b = special.airy(0.01)\r\n b1 = [None]*4\r\n for n in range(2):\r\n b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))\r\n for n in range(2,4):\r\n b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))\r\n assert_array_almost_equal(a,b1,6)\r\n\r\n def test_bi_zeros(self):\r\n bi = special.bi_zeros(2)\r\n bia = (array([-1.17371322, -3.2710930]),\r\n array([-2.29443968, -4.07315509]),\r\n array([-0.45494438, 0.39652284]),\r\n array([0.60195789, -0.76031014]))\r\n assert_array_almost_equal(bi,bia,4)\r\n\r\n bi = special.bi_zeros(5)\r\n assert_array_almost_equal(bi[0],array([-1.173713222709127,\r\n -3.271093302836352,\r\n -4.830737841662016,\r\n -6.169852128310251,\r\n -7.376762079367764]),11)\r\n\r\n assert_array_almost_equal(bi[1],array([-2.294439682614122,\r\n -4.073155089071828,\r\n -5.512395729663599,\r\n -6.781294445990305,\r\n -7.940178689168587]),10)\r\n\r\n assert_array_almost_equal(bi[2],array([-0.454944383639657,\r\n 0.396522836094465,\r\n -0.367969161486959,\r\n 0.349499116831805,\r\n -0.336026240133662]),11)\r\n\r\n assert_array_almost_equal(bi[3],array([0.601957887976239,\r\n -0.760310141492801,\r\n 0.836991012619261,\r\n -0.88947990142654,\r\n 0.929983638568022]),10)\r\n\r\n def test_ai_zeros(self):\r\n ai = special.ai_zeros(1)\r\n assert_array_almost_equal(ai,(array([-2.33810741]),\r\n array([-1.01879297]),\r\n array([0.5357]),\r\n array([0.7012])),4)\r\n\r\n def test_ai_zeros_big(self):\r\n z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)\r\n ai_z, aip_z, _, _ = special.airy(z)\r\n ai_zp, aip_zp, _, _ = special.airy(zp)\r\n\r\n ai_envelope = 1/abs(z)**(1./4)\r\n aip_envelope = abs(zp)**(1./4)\r\n\r\n # Check values\r\n assert_allclose(ai_zpx, ai_zp, rtol=1e-10)\r\n assert_allclose(aip_zx, aip_z, rtol=1e-10)\r\n\r\n # Check they are zeros\r\n assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)\r\n assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)\r\n\r\n # Check first zeros, DLMF 9.9.1\r\n assert_allclose(z[:6],\r\n [-2.3381074105, -4.0879494441, -5.5205598281,\r\n -6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)\r\n assert_allclose(zp[:6],\r\n [-1.0187929716, -3.2481975822, -4.8200992112,\r\n -6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)\r\n\r\n def test_bi_zeros_big(self):\r\n z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)\r\n _, _, bi_z, bip_z = special.airy(z)\r\n _, _, bi_zp, bip_zp = special.airy(zp)\r\n\r\n bi_envelope = 1/abs(z)**(1./4)\r\n bip_envelope = abs(zp)**(1./4)\r\n\r\n # Check values\r\n assert_allclose(bi_zpx, bi_zp, rtol=1e-10)\r\n assert_allclose(bip_zx, bip_z, rtol=1e-10)\r\n\r\n # Check they are zeros\r\n assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)\r\n assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)\r\n\r\n # Check first zeros, DLMF 9.9.2\r\n assert_allclose(z[:6],\r\n [-1.1737132227, -3.2710933028, -4.8307378417,\r\n -6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)\r\n assert_allclose(zp[:6],\r\n [-2.2944396826, -4.0731550891, -5.5123957297,\r\n -6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)\r\n\r\n\r\nclass TestAssocLaguerre(TestCase):\r\n def test_assoc_laguerre(self):\r\n a1 = special.genlaguerre(11,1)\r\n a2 = special.assoc_laguerre(.2,11,1)\r\n assert_array_almost_equal(a2,a1(.2),8)\r\n a2 = special.assoc_laguerre(1,11,1)\r\n assert_array_almost_equal(a2,a1(1),8)\r\n\r\n\r\nclass TestBesselpoly(TestCase):\r\n def test_besselpoly(self):\r\n pass\r\n\r\n\r\nclass TestKelvin(TestCase):\r\n def test_bei(self):\r\n mbei = special.bei(2)\r\n assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact\r\n\r\n def test_beip(self):\r\n mbeip = special.beip(2)\r\n assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact\r\n\r\n def test_ber(self):\r\n mber = special.ber(2)\r\n assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact\r\n\r\n def test_berp(self):\r\n mberp = special.berp(2)\r\n assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact\r\n\r\n def test_bei_zeros(self):\r\n # Abramowitz & Stegun, Table 9.12\r\n bi = special.bei_zeros(5)\r\n assert_array_almost_equal(bi,array([5.02622,\r\n 9.45541,\r\n 13.89349,\r\n 18.33398,\r\n 22.77544]),4)\r\n\r\n def test_beip_zeros(self):\r\n bip = special.beip_zeros(5)\r\n assert_array_almost_equal(bip,array([3.772673304934953,\r\n 8.280987849760042,\r\n 12.742147523633703,\r\n 17.193431752512542,\r\n 21.641143941167325]),8)\r\n\r\n def test_ber_zeros(self):\r\n ber = special.ber_zeros(5)\r\n assert_array_almost_equal(ber,array([2.84892,\r\n 7.23883,\r\n 11.67396,\r\n 16.11356,\r\n 20.55463]),4)\r\n\r\n def test_berp_zeros(self):\r\n brp = special.berp_zeros(5)\r\n assert_array_almost_equal(brp,array([6.03871,\r\n 10.51364,\r\n 14.96844,\r\n 19.41758,\r\n 23.86430]),4)\r\n\r\n def test_kelvin(self):\r\n mkelv = special.kelvin(2)\r\n assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,\r\n special.ker(2) + special.kei(2)*1j,\r\n special.berp(2) + special.beip(2)*1j,\r\n special.kerp(2) + special.keip(2)*1j),8)\r\n\r\n def test_kei(self):\r\n mkei = special.kei(2)\r\n assert_almost_equal(mkei,-0.20240006776470432,5)\r\n\r\n def test_keip(self):\r\n mkeip = special.keip(2)\r\n assert_almost_equal(mkeip,0.21980790991960536,5)\r\n\r\n def test_ker(self):\r\n mker = special.ker(2)\r\n assert_almost_equal(mker,-0.041664513991509472,5)\r\n\r\n def test_kerp(self):\r\n mkerp = special.kerp(2)\r\n assert_almost_equal(mkerp,-0.10660096588105264,5)\r\n\r\n def test_kei_zeros(self):\r\n kei = special.kei_zeros(5)\r\n assert_array_almost_equal(kei,array([3.91467,\r\n 8.34422,\r\n 12.78256,\r\n 17.22314,\r\n 21.66464]),4)\r\n\r\n def test_keip_zeros(self):\r\n keip = special.keip_zeros(5)\r\n assert_array_almost_equal(keip,array([4.93181,\r\n 9.40405,\r\n 13.85827,\r\n 18.30717,\r\n 22.75379]),4)\r\n\r\n # numbers come from 9.9 of A&S pg. 381\r\n def test_kelvin_zeros(self):\r\n tmp = special.kelvin_zeros(5)\r\n berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp\r\n assert_array_almost_equal(berz,array([2.84892,\r\n 7.23883,\r\n 11.67396,\r\n 16.11356,\r\n 20.55463]),4)\r\n assert_array_almost_equal(beiz,array([5.02622,\r\n 9.45541,\r\n 13.89349,\r\n 18.33398,\r\n 22.77544]),4)\r\n assert_array_almost_equal(kerz,array([1.71854,\r\n 6.12728,\r\n 10.56294,\r\n 15.00269,\r\n 19.44382]),4)\r\n assert_array_almost_equal(keiz,array([3.91467,\r\n 8.34422,\r\n 12.78256,\r\n 17.22314,\r\n 21.66464]),4)\r\n assert_array_almost_equal(berpz,array([6.03871,\r\n 10.51364,\r\n 14.96844,\r\n 19.41758,\r\n 23.86430]),4)\r\n assert_array_almost_equal(beipz,array([3.77267,\r\n # table from 1927 had 3.77320\r\n # but this is more accurate\r\n 8.28099,\r\n 12.74215,\r\n 17.19343,\r\n 21.64114]),4)\r\n assert_array_almost_equal(kerpz,array([2.66584,\r\n 7.17212,\r\n 11.63218,\r\n 16.08312,\r\n 20.53068]),4)\r\n assert_array_almost_equal(keipz,array([4.93181,\r\n 9.40405,\r\n 13.85827,\r\n 18.30717,\r\n 22.75379]),4)\r\n\r\n def test_ker_zeros(self):\r\n ker = special.ker_zeros(5)\r\n assert_array_almost_equal(ker,array([1.71854,\r\n 6.12728,\r\n 10.56294,\r\n 15.00269,\r\n 19.44381]),4)\r\n\r\n def test_kerp_zeros(self):\r\n kerp = special.kerp_zeros(5)\r\n assert_array_almost_equal(kerp,array([2.66584,\r\n 7.17212,\r\n 11.63218,\r\n 16.08312,\r\n 20.53068]),4)\r\n\r\n\r\nclass TestBernoulli(TestCase):\r\n def test_bernoulli(self):\r\n brn = special.bernoulli(5)\r\n assert_array_almost_equal(brn,array([1.0000,\r\n -0.5000,\r\n 0.1667,\r\n 0.0000,\r\n -0.0333,\r\n 0.0000]),4)\r\n\r\n\r\nclass TestBeta(TestCase):\r\n def test_beta(self):\r\n bet = special.beta(2,4)\r\n betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)\r\n assert_almost_equal(bet,betg,8)\r\n\r\n def test_betaln(self):\r\n betln = special.betaln(2,4)\r\n bet = log(abs(special.beta(2,4)))\r\n assert_almost_equal(betln,bet,8)\r\n\r\n def test_betainc(self):\r\n btinc = special.betainc(1,1,.2)\r\n assert_almost_equal(btinc,0.2,8)\r\n\r\n def test_betaincinv(self):\r\n y = special.betaincinv(2,4,.5)\r\n comp = special.betainc(2,4,y)\r\n assert_almost_equal(comp,.5,5)\r\n\r\n\r\nclass TestCombinatorics(TestCase):\r\n def test_comb(self):\r\n assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])\r\n assert_almost_equal(special.comb(10, 3), 120.)\r\n assert_equal(special.comb(10, 3, exact=True), 120)\r\n assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)\r\n\r\n assert_allclose([special.comb(20, k, exact=True) for k in range(21)],\r\n special.comb(20, list(range(21))), atol=1e-15)\r\n\r\n ii = np.iinfo(int).max + 1\r\n assert_equal(special.comb(ii, ii-1, exact=True), ii)\r\n\r\n expected = 100891344545564193334812497256\r\n assert_equal(special.comb(100, 50, exact=True), expected)\r\n\r\n def test_comb_with_np_int64(self):\r\n n = 70\r\n k = 30\r\n np_n = np.int64(n)\r\n np_k = np.int64(k)\r\n assert_equal(special.comb(np_n, np_k, exact=True),\r\n special.comb(n, k, exact=True))\r\n\r\n def test_comb_zeros(self):\r\n assert_equal(special.comb(2, 3, exact=True), 0)\r\n assert_equal(special.comb(-1, 3, exact=True), 0)\r\n assert_equal(special.comb(2, -1, exact=True), 0)\r\n assert_equal(special.comb(2, -1, exact=False), 0)\r\n assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),\r\n [0., 0., 0., 120.])\r\n\r\n def test_perm(self):\r\n assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])\r\n assert_almost_equal(special.perm(10, 3), 720.)\r\n assert_equal(special.perm(10, 3, exact=True), 720)\r\n\r\n def test_perm_zeros(self):\r\n assert_equal(special.perm(2, 3, exact=True), 0)\r\n assert_equal(special.perm(-1, 3, exact=True), 0)\r\n assert_equal(special.perm(2, -1, exact=True), 0)\r\n assert_equal(special.perm(2, -1, exact=False), 0)\r\n assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),\r\n [0., 0., 0., 720.])\r\n\r\n\r\nclass TestTrigonometric(TestCase):\r\n def test_cbrt(self):\r\n cb = special.cbrt(27)\r\n cbrl = 27**(1.0/3.0)\r\n assert_approx_equal(cb,cbrl)\r\n\r\n def test_cbrtmore(self):\r\n cb1 = special.cbrt(27.9)\r\n cbrl1 = 27.9**(1.0/3.0)\r\n assert_almost_equal(cb1,cbrl1,8)\r\n\r\n def test_cosdg(self):\r\n cdg = special.cosdg(90)\r\n cdgrl = cos(pi/2.0)\r\n assert_almost_equal(cdg,cdgrl,8)\r\n\r\n def test_cosdgmore(self):\r\n cdgm = special.cosdg(30)\r\n cdgmrl = cos(pi/6.0)\r\n assert_almost_equal(cdgm,cdgmrl,8)\r\n\r\n def test_cosm1(self):\r\n cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))\r\n csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)\r\n assert_array_almost_equal(cs,csrl,8)\r\n\r\n def test_cotdg(self):\r\n ct = special.cotdg(30)\r\n ctrl = tan(pi/6.0)**(-1)\r\n assert_almost_equal(ct,ctrl,8)\r\n\r\n def test_cotdgmore(self):\r\n ct1 = special.cotdg(45)\r\n ctrl1 = tan(pi/4.0)**(-1)\r\n assert_almost_equal(ct1,ctrl1,8)\r\n\r\n def test_specialpoints(self):\r\n assert_almost_equal(special.cotdg(45), 1.0, 14)\r\n assert_almost_equal(special.cotdg(-45), -1.0, 14)\r\n assert_almost_equal(special.cotdg(90), 0.0, 14)\r\n assert_almost_equal(special.cotdg(-90), 0.0, 14)\r\n assert_almost_equal(special.cotdg(135), -1.0, 14)\r\n assert_almost_equal(special.cotdg(-135), 1.0, 14)\r\n assert_almost_equal(special.cotdg(225), 1.0, 14)\r\n assert_almost_equal(special.cotdg(-225), -1.0, 14)\r\n assert_almost_equal(special.cotdg(270), 0.0, 14)\r\n assert_almost_equal(special.cotdg(-270), 0.0, 14)\r\n assert_almost_equal(special.cotdg(315), -1.0, 14)\r\n assert_almost_equal(special.cotdg(-315), 1.0, 14)\r\n assert_almost_equal(special.cotdg(765), 1.0, 14)\r\n\r\n def test_sinc(self):\r\n # the sinc implementation and more extensive sinc tests are in numpy\r\n assert_array_equal(special.sinc([0]), 1)\r\n assert_equal(special.sinc(0.0), 1.0)\r\n\r\n def test_sindg(self):\r\n sn = special.sindg(90)\r\n assert_equal(sn,1.0)\r\n\r\n def test_sindgmore(self):\r\n snm = special.sindg(30)\r\n snmrl = sin(pi/6.0)\r\n assert_almost_equal(snm,snmrl,8)\r\n snm1 = special.sindg(45)\r\n snmrl1 = sin(pi/4.0)\r\n assert_almost_equal(snm1,snmrl1,8)\r\n\r\n\r\nclass TestTandg(TestCase):\r\n\r\n def test_tandg(self):\r\n tn = special.tandg(30)\r\n tnrl = tan(pi/6.0)\r\n assert_almost_equal(tn,tnrl,8)\r\n\r\n def test_tandgmore(self):\r\n tnm = special.tandg(45)\r\n tnmrl = tan(pi/4.0)\r\n assert_almost_equal(tnm,tnmrl,8)\r\n tnm1 = special.tandg(60)\r\n tnmrl1 = tan(pi/3.0)\r\n assert_almost_equal(tnm1,tnmrl1,8)\r\n\r\n def test_specialpoints(self):\r\n assert_almost_equal(special.tandg(0), 0.0, 14)\r\n assert_almost_equal(special.tandg(45), 1.0, 14)\r\n assert_almost_equal(special.tandg(-45), -1.0, 14)\r\n assert_almost_equal(special.tandg(135), -1.0, 14)\r\n assert_almost_equal(special.tandg(-135), 1.0, 14)\r\n assert_almost_equal(special.tandg(180), 0.0, 14)\r\n assert_almost_equal(special.tandg(-180), 0.0, 14)\r\n assert_almost_equal(special.tandg(225), 1.0, 14)\r\n assert_almost_equal(special.tandg(-225), -1.0, 14)\r\n assert_almost_equal(special.tandg(315), -1.0, 14)\r\n assert_almost_equal(special.tandg(-315), 1.0, 14)\r\n\r\n\r\nclass TestEllip(TestCase):\r\n def test_ellipj_nan(self):\r\n \"\"\"Regression test for #912.\"\"\"\r\n special.ellipj(0.5, np.nan)\r\n\r\n def test_ellipj(self):\r\n el = special.ellipj(0.2,0)\r\n rel = [sin(0.2),cos(0.2),1.0,0.20]\r\n assert_array_almost_equal(el,rel,13)\r\n\r\n def test_ellipk(self):\r\n elk = special.ellipk(.2)\r\n assert_almost_equal(elk,1.659623598610528,11)\r\n\r\n assert_equal(special.ellipkm1(0.0), np.inf)\r\n assert_equal(special.ellipkm1(1.0), pi/2)\r\n assert_equal(special.ellipkm1(np.inf), 0.0)\r\n assert_equal(special.ellipkm1(np.nan), np.nan)\r\n assert_equal(special.ellipkm1(-1), np.nan)\r\n assert_allclose(special.ellipk(-10), 0.7908718902387385)\r\n\r\n def test_ellipkinc(self):\r\n elkinc = special.ellipkinc(pi/2,.2)\r\n elk = special.ellipk(0.2)\r\n assert_almost_equal(elkinc,elk,15)\r\n alpha = 20*pi/180\r\n phi = 45*pi/180\r\n m = sin(alpha)**2\r\n elkinc = special.ellipkinc(phi,m)\r\n assert_almost_equal(elkinc,0.79398143,8)\r\n # From pg. 614 of A & S\r\n\r\n assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)\r\n assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)\r\n assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)\r\n assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)\r\n assert_equal(special.ellipkinc(pi/2, 2), np.nan)\r\n assert_equal(special.ellipkinc(0, 0.5), 0.0)\r\n assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)\r\n assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)\r\n assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)\r\n assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)\r\n assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)\r\n assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)\r\n assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)\r\n assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)\r\n\r\n assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)\r\n assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)\r\n\r\n def test_ellipkinc_2(self):\r\n # Regression test for gh-3550\r\n # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value\r\n mbad = 0.68359375000000011\r\n phi = 0.9272952180016123\r\n m = np.nextafter(mbad, 0)\r\n mvals = []\r\n for j in range(10):\r\n mvals.append(m)\r\n m = np.nextafter(m, 1)\r\n f = special.ellipkinc(phi, mvals)\r\n assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)\r\n # this bug also appears at phi + n * pi for at least small n\r\n f1 = special.ellipkinc(phi + pi, mvals)\r\n assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)\r\n\r\n def test_ellipkinc_singular(self):\r\n # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)\r\n xlog = np.logspace(-300, -17, 25)\r\n xlin = np.linspace(1e-17, 0.1, 25)\r\n xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)\r\n\r\n assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)\r\n assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)\r\n assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)\r\n assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)\r\n assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)\r\n assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)\r\n assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)\r\n assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)\r\n\r\n def test_ellipe(self):\r\n ele = special.ellipe(.2)\r\n assert_almost_equal(ele,1.4890350580958529,8)\r\n\r\n assert_equal(special.ellipe(0.0), pi/2)\r\n assert_equal(special.ellipe(1.0), 1.0)\r\n assert_equal(special.ellipe(-np.inf), np.inf)\r\n assert_equal(special.ellipe(np.nan), np.nan)\r\n assert_equal(special.ellipe(2), np.nan)\r\n assert_allclose(special.ellipe(-10), 3.6391380384177689)\r\n\r\n def test_ellipeinc(self):\r\n eleinc = special.ellipeinc(pi/2,.2)\r\n ele = special.ellipe(0.2)\r\n assert_almost_equal(eleinc,ele,14)\r\n # pg 617 of A & S\r\n alpha, phi = 52*pi/180,35*pi/180\r\n m = sin(alpha)**2\r\n eleinc = special.ellipeinc(phi,m)\r\n assert_almost_equal(eleinc, 0.58823065, 8)\r\n\r\n assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)\r\n assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)\r\n assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)\r\n assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)\r\n assert_equal(special.ellipeinc(pi/2, 2), np.nan)\r\n assert_equal(special.ellipeinc(0, 0.5), 0.0)\r\n assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)\r\n assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)\r\n assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)\r\n assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)\r\n assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)\r\n assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)\r\n assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)\r\n assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)\r\n assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)\r\n\r\n def test_ellipeinc_2(self):\r\n # Regression test for gh-3550\r\n # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value\r\n mbad = 0.68359375000000011\r\n phi = 0.9272952180016123\r\n m = np.nextafter(mbad, 0)\r\n mvals = []\r\n for j in range(10):\r\n mvals.append(m)\r\n m = np.nextafter(m, 1)\r\n f = special.ellipeinc(phi, mvals)\r\n assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)\r\n # this bug also appears at phi + n * pi for at least small n\r\n f1 = special.ellipeinc(phi + pi, mvals)\r\n assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)\r\n\r\n\r\nclass TestErf(TestCase):\r\n\r\n def test_erf(self):\r\n er = special.erf(.25)\r\n assert_almost_equal(er,0.2763263902,8)\r\n\r\n def test_erf_zeros(self):\r\n erz = special.erf_zeros(5)\r\n erzr = array([1.45061616+1.88094300j,\r\n 2.24465928+2.61657514j,\r\n 2.83974105+3.17562810j,\r\n 3.33546074+3.64617438j,\r\n 3.76900557+4.06069723j])\r\n assert_array_almost_equal(erz,erzr,4)\r\n\r\n def _check_variant_func(self, func, other_func, rtol, atol=0):\r\n np.random.seed(1234)\r\n n = 10000\r\n x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)\r\n y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)\r\n z = x + 1j*y\r\n\r\n old_errors = np.seterr(all='ignore')\r\n try:\r\n w = other_func(z)\r\n w_real = other_func(x).real\r\n\r\n mask = np.isfinite(w)\r\n w = w[mask]\r\n z = z[mask]\r\n\r\n mask = np.isfinite(w_real)\r\n w_real = w_real[mask]\r\n x = x[mask]\r\n\r\n # test both real and complex variants\r\n assert_func_equal(func, w, z, rtol=rtol, atol=atol)\r\n assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)\r\n finally:\r\n np.seterr(**old_errors)\r\n\r\n def test_erfc_consistent(self):\r\n self._check_variant_func(\r\n cephes.erfc,\r\n lambda z: 1 - cephes.erf(z),\r\n rtol=1e-12,\r\n atol=1e-14 # <- the test function loses precision\r\n )\r\n\r\n def test_erfcx_consistent(self):\r\n self._check_variant_func(\r\n cephes.erfcx,\r\n lambda z: np.exp(z*z) * cephes.erfc(z),\r\n rtol=1e-12\r\n )\r\n\r\n def test_erfi_consistent(self):\r\n self._check_variant_func(\r\n cephes.erfi,\r\n lambda z: -1j * cephes.erf(1j*z),\r\n rtol=1e-12\r\n )\r\n\r\n def test_dawsn_consistent(self):\r\n self._check_variant_func(\r\n cephes.dawsn,\r\n lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),\r\n rtol=1e-12\r\n )\r\n\r\n def test_erfcinv(self):\r\n i = special.erfcinv(1)\r\n # Use assert_array_equal instead of assert_equal, so the comparsion\r\n # of -0.0 and 0.0 doesn't fail.\r\n assert_array_equal(i, 0)\r\n\r\n def test_erfinv(self):\r\n i = special.erfinv(0)\r\n assert_equal(i,0)\r\n\r\n def test_errprint(self):\r\n a = special.errprint()\r\n b = 1-a # a is the state 1-a inverts state\r\n c = special.errprint(b) # returns last state 'a'\r\n assert_equal(a,c)\r\n d = special.errprint(a) # returns to original state\r\n assert_equal(d,b) # makes sure state was returned\r\n # assert_equal(d,1-a)\r\n\r\n def test_erf_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan, -1, 1]\r\n assert_allclose(special.erf(vals), expected, rtol=1e-15)\r\n\r\n def test_erfc_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan, 2, 0]\r\n assert_allclose(special.erfc(vals), expected, rtol=1e-15)\r\n\r\n def test_erfcx_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan, np.inf, 0]\r\n assert_allclose(special.erfcx(vals), expected, rtol=1e-15)\r\n\r\n def test_erfi_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan, -np.inf, np.inf]\r\n assert_allclose(special.erfi(vals), expected, rtol=1e-15)\r\n\r\n def test_dawsn_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan, -0.0, 0.0]\r\n assert_allclose(special.dawsn(vals), expected, rtol=1e-15)\r\n\r\n def test_wofz_nan_inf(self):\r\n vals = [np.nan, -np.inf, np.inf]\r\n expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]\r\n assert_allclose(special.wofz(vals), expected, rtol=1e-15)\r\n\r\n\r\nclass TestEuler(TestCase):\r\n def test_euler(self):\r\n eu0 = special.euler(0)\r\n eu1 = special.euler(1)\r\n eu2 = special.euler(2) # just checking segfaults\r\n assert_almost_equal(eu0[0],1,8)\r\n assert_almost_equal(eu2[2],-1,8)\r\n eu24 = special.euler(24)\r\n mathworld = [1,1,5,61,1385,50521,2702765,199360981,\r\n 19391512145,2404879675441,\r\n 370371188237525,69348874393137901,\r\n 15514534163557086905]\r\n correct = zeros((25,),'d')\r\n for k in range(0,13):\r\n if (k % 2):\r\n correct[2*k] = -float(mathworld[k])\r\n else:\r\n correct[2*k] = float(mathworld[k])\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n err = nan_to_num((eu24-correct)/correct)\r\n errmax = max(err)\r\n finally:\r\n np.seterr(**olderr)\r\n assert_almost_equal(errmax, 0.0, 14)\r\n\r\n\r\nclass TestExp(TestCase):\r\n def test_exp2(self):\r\n ex = special.exp2(2)\r\n exrl = 2**2\r\n assert_equal(ex,exrl)\r\n\r\n def test_exp2more(self):\r\n exm = special.exp2(2.5)\r\n exmrl = 2**(2.5)\r\n assert_almost_equal(exm,exmrl,8)\r\n\r\n def test_exp10(self):\r\n ex = special.exp10(2)\r\n exrl = 10**2\r\n assert_approx_equal(ex,exrl)\r\n\r\n def test_exp10more(self):\r\n exm = special.exp10(2.5)\r\n exmrl = 10**(2.5)\r\n assert_almost_equal(exm,exmrl,8)\r\n\r\n def test_expm1(self):\r\n ex = (special.expm1(2),special.expm1(3),special.expm1(4))\r\n exrl = (exp(2)-1,exp(3)-1,exp(4)-1)\r\n assert_array_almost_equal(ex,exrl,8)\r\n\r\n def test_expm1more(self):\r\n ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))\r\n exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)\r\n assert_array_almost_equal(ex1,exrl1,8)\r\n\r\n\r\nclass TestFactorialFunctions(TestCase):\r\n def test_factorial(self):\r\n # Some known values, float math\r\n assert_array_almost_equal(special.factorial(0), 1)\r\n assert_array_almost_equal(special.factorial(1), 1)\r\n assert_array_almost_equal(special.factorial(2), 2)\r\n assert_array_almost_equal([6., 24., 120.],\r\n special.factorial([3, 4, 5], exact=False))\r\n assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),\r\n [[120, 6], [24, 6]])\r\n\r\n # Some known values, integer math\r\n assert_equal(special.factorial(0, exact=True), 1)\r\n assert_equal(special.factorial(1, exact=True), 1)\r\n assert_equal(special.factorial(2, exact=True), 2)\r\n assert_equal(special.factorial(5, exact=True), 120)\r\n assert_equal(special.factorial(15, exact=True), 1307674368000)\r\n\r\n # ndarray shape is maintained\r\n assert_equal(special.factorial([7, 4, 15, 10], exact=True),\r\n [5040, 24, 1307674368000, 3628800])\r\n\r\n assert_equal(special.factorial([[5, 3], [4, 3]], True),\r\n [[120, 6], [24, 6]])\r\n\r\n # object arrays\r\n assert_equal(special.factorial(np.arange(-3, 22), True),\r\n special.factorial(np.arange(-3, 22), False))\r\n\r\n # int64 array\r\n assert_equal(special.factorial(np.arange(-3, 15), True),\r\n special.factorial(np.arange(-3, 15), False))\r\n\r\n # int32 array\r\n assert_equal(special.factorial(np.arange(-3, 5), True),\r\n special.factorial(np.arange(-3, 5), False))\r\n\r\n # Consistent output for n < 0\r\n for exact in (True, False):\r\n assert_array_equal(0, special.factorial(-3, exact))\r\n assert_array_equal([1, 2, 0, 0],\r\n special.factorial([1, 2, -5, -4], exact))\r\n\r\n for n in range(0, 22):\r\n # Compare all with math.factorial\r\n correct = math.factorial(n)\r\n assert_array_equal(correct, special.factorial(n, True))\r\n assert_array_equal(correct, special.factorial([n], True)[0])\r\n\r\n assert_allclose(float(correct), special.factorial(n, False))\r\n assert_allclose(float(correct), special.factorial([n], False)[0])\r\n\r\n # Compare exact=True vs False, scalar vs array\r\n assert_array_equal(special.factorial(n, True),\r\n special.factorial(n, False))\r\n\r\n assert_array_equal(special.factorial([n], True),\r\n special.factorial([n], False))\r\n\r\n def test_factorial2(self):\r\n assert_array_almost_equal([105., 384., 945.],\r\n special.factorial2([7, 8, 9], exact=False))\r\n assert_equal(special.factorial2(7, exact=True), 105)\r\n\r\n def test_factorialk(self):\r\n assert_equal(special.factorialk(5, 1, exact=True), 120)\r\n assert_equal(special.factorialk(5, 3, exact=True), 10)\r\n\r\n\r\nclass TestFresnel(TestCase):\r\n def test_fresnel(self):\r\n frs = array(special.fresnel(.5))\r\n assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)\r\n\r\n def test_fresnel_inf1(self):\r\n frs = special.fresnel(np.inf)\r\n assert_equal(frs, (0.5, 0.5))\r\n\r\n def test_fresnel_inf2(self):\r\n frs = special.fresnel(-np.inf)\r\n assert_equal(frs, (-0.5, -0.5))\r\n\r\n # values from pg 329 Table 7.11 of A & S\r\n # slightly corrected in 4th decimal place\r\n def test_fresnel_zeros(self):\r\n szo, czo = special.fresnel_zeros(5)\r\n assert_array_almost_equal(szo,\r\n array([2.0093+0.2885j,\r\n 2.8335+0.2443j,\r\n 3.4675+0.2185j,\r\n 4.0026+0.2009j,\r\n 4.4742+0.1877j]),3)\r\n assert_array_almost_equal(czo,\r\n array([1.7437+0.3057j,\r\n 2.6515+0.2529j,\r\n 3.3204+0.2240j,\r\n 3.8757+0.2047j,\r\n 4.3611+0.1907j]),3)\r\n vals1 = special.fresnel(szo)[0]\r\n vals2 = special.fresnel(czo)[1]\r\n assert_array_almost_equal(vals1,0,14)\r\n assert_array_almost_equal(vals2,0,14)\r\n\r\n def test_fresnelc_zeros(self):\r\n szo, czo = special.fresnel_zeros(6)\r\n frc = special.fresnelc_zeros(6)\r\n assert_array_almost_equal(frc,czo,12)\r\n\r\n def test_fresnels_zeros(self):\r\n szo, czo = special.fresnel_zeros(5)\r\n frs = special.fresnels_zeros(5)\r\n assert_array_almost_equal(frs,szo,12)\r\n\r\n\r\nclass TestGamma(TestCase):\r\n def test_gamma(self):\r\n gam = special.gamma(5)\r\n assert_equal(gam,24.0)\r\n\r\n def test_gammaln(self):\r\n gamln = special.gammaln(3)\r\n lngam = log(special.gamma(3))\r\n assert_almost_equal(gamln,lngam,8)\r\n\r\n def test_gammainc(self):\r\n gama = special.gammainc(.5,.5)\r\n assert_almost_equal(gama,.7,1)\r\n\r\n def test_gammaincnan(self):\r\n gama = special.gammainc(-1,1)\r\n assert_(isnan(gama))\r\n\r\n def test_gammainczero(self):\r\n # bad arg but zero integration limit\r\n gama = special.gammainc(-1,0)\r\n assert_equal(gama,0.0)\r\n\r\n def test_gammaincinf(self):\r\n gama = special.gammainc(0.5, np.inf)\r\n assert_equal(gama,1.0)\r\n\r\n def test_gammaincc(self):\r\n gicc = special.gammaincc(.5,.5)\r\n greal = 1 - special.gammainc(.5,.5)\r\n assert_almost_equal(gicc,greal,8)\r\n\r\n def test_gammainccnan(self):\r\n gama = special.gammaincc(-1,1)\r\n assert_(isnan(gama))\r\n\r\n def test_gammainccinf(self):\r\n gama = special.gammaincc(0.5,np.inf)\r\n assert_equal(gama,0.0)\r\n\r\n def test_gammainccinv(self):\r\n gccinv = special.gammainccinv(.5,.5)\r\n gcinv = special.gammaincinv(.5,.5)\r\n assert_almost_equal(gccinv,gcinv,8)\r\n\r\n @with_special_errors\r\n def test_gammaincinv(self):\r\n y = special.gammaincinv(.4,.4)\r\n x = special.gammainc(.4,y)\r\n assert_almost_equal(x,0.4,1)\r\n y = special.gammainc(10, 0.05)\r\n x = special.gammaincinv(10, 2.5715803516000736e-20)\r\n assert_almost_equal(0.05, x, decimal=10)\r\n assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)\r\n x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)\r\n assert_almost_equal(11.0, x, decimal=10)\r\n\r\n @with_special_errors\r\n def test_975(self):\r\n # Regression test for ticket #975 -- switch point in algorithm\r\n # check that things work OK at the point, immediately next floats\r\n # around it, and a bit further away\r\n pts = [0.25,\r\n np.nextafter(0.25, 0), 0.25 - 1e-12,\r\n np.nextafter(0.25, 1), 0.25 + 1e-12]\r\n for xp in pts:\r\n y = special.gammaincinv(.4, xp)\r\n x = special.gammainc(0.4, y)\r\n assert_tol_equal(x, xp, rtol=1e-12)\r\n\r\n def test_rgamma(self):\r\n rgam = special.rgamma(8)\r\n rlgam = 1/special.gamma(8)\r\n assert_almost_equal(rgam,rlgam,8)\r\n\r\n def test_infinity(self):\r\n assert_(np.isinf(special.gamma(-1)))\r\n assert_equal(special.rgamma(-1), 0)\r\n\r\n\r\nclass TestHankel(TestCase):\r\n\r\n def test_negv1(self):\r\n assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)\r\n\r\n def test_hankel1(self):\r\n hank1 = special.hankel1(1,.1)\r\n hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)\r\n assert_almost_equal(hank1,hankrl,8)\r\n\r\n def test_negv1e(self):\r\n assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)\r\n\r\n def test_hankel1e(self):\r\n hank1e = special.hankel1e(1,.1)\r\n hankrle = special.hankel1(1,.1)*exp(-.1j)\r\n assert_almost_equal(hank1e,hankrle,8)\r\n\r\n def test_negv2(self):\r\n assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)\r\n\r\n def test_hankel2(self):\r\n hank2 = special.hankel2(1,.1)\r\n hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)\r\n assert_almost_equal(hank2,hankrl2,8)\r\n\r\n def test_neg2e(self):\r\n assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)\r\n\r\n def test_hankl2e(self):\r\n hank2e = special.hankel2e(1,.1)\r\n hankrl2e = special.hankel2e(1,.1)\r\n assert_almost_equal(hank2e,hankrl2e,8)\r\n\r\n\r\nclass TestHyper(TestCase):\r\n def test_h1vp(self):\r\n h1 = special.h1vp(1,.1)\r\n h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)\r\n assert_almost_equal(h1,h1real,8)\r\n\r\n def test_h2vp(self):\r\n h2 = special.h2vp(1,.1)\r\n h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)\r\n assert_almost_equal(h2,h2real,8)\r\n\r\n def test_hyp0f1(self):\r\n # scalar input\r\n assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)\r\n assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)\r\n\r\n # float input, expected values match mpmath\r\n x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])\r\n expected = np.array([0.58493659229143, 0.70566805723127, 1.0,\r\n 1.37789689539747, 1.60373685288480])\r\n assert_allclose(x, expected, rtol=1e-12)\r\n\r\n # complex input\r\n x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)\r\n assert_allclose(x, expected.astype(complex), rtol=1e-12)\r\n\r\n # test broadcasting\r\n x1 = [0.5, 1.5, 2.5]\r\n x2 = [0, 1, 0.5]\r\n x = special.hyp0f1(x1, x2)\r\n expected = [1.0, 1.8134302039235093, 1.21482702689997]\r\n assert_allclose(x, expected, rtol=1e-12)\r\n x = special.hyp0f1(np.row_stack([x1] * 2), x2)\r\n assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)\r\n assert_raises(ValueError, special.hyp0f1,\r\n np.row_stack([x1] * 3), [0, 1])\r\n\r\n def test_hyp0f1_gh5764(self):\r\n # Just checks the point that failed; there's a more systematic\r\n # test in test_mpmath\r\n res = special.hyp0f1(0.8, 0.5 + 0.5*1J)\r\n # The expected value was generated using mpmath\r\n assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)\r\n\r\n def test_hyp1f1(self):\r\n hyp1 = special.hyp1f1(.1,.1,.3)\r\n assert_almost_equal(hyp1, 1.3498588075760032,7)\r\n\r\n # test contributed by Moritz Deger (2008-05-29)\r\n # http://projects.scipy.org/scipy/scipy/ticket/659\r\n\r\n # reference data obtained from mathematica [ a, b, x, m(a,b,x)]:\r\n # produced with test_hyp1f1.nb\r\n ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],\r\n [2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],\r\n [-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],\r\n [5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],\r\n [-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],\r\n [4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],\r\n [1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],\r\n [2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],\r\n [1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],\r\n [1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],\r\n [-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],\r\n [8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],\r\n [1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],\r\n [-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],\r\n [2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],\r\n [2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],\r\n [6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],\r\n [-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],\r\n [2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],\r\n [8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],\r\n [1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],\r\n [-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],\r\n [2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],\r\n [-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],\r\n [3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],\r\n [-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],\r\n [2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],\r\n [-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],\r\n [1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],\r\n [-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],\r\n [-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],\r\n [-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],\r\n [-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],\r\n [3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],\r\n [6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],\r\n [-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],\r\n [2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],\r\n [1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],\r\n [1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],\r\n [1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],\r\n [1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],\r\n [-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],\r\n [-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],\r\n [7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],\r\n [2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],\r\n [-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],\r\n [-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],\r\n [-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],\r\n [-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],\r\n [-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],\r\n [2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],\r\n [5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],\r\n [-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],\r\n [-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],\r\n [5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],\r\n [-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],\r\n [1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],\r\n [2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],\r\n [5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],\r\n [-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],\r\n [1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],\r\n [6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],\r\n [1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],\r\n [-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],\r\n [-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],\r\n [-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],\r\n [-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],\r\n [1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],\r\n [2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],\r\n [-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],\r\n [2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],\r\n [-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],\r\n [2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],\r\n [1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],\r\n [-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],\r\n [7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],\r\n [2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],\r\n [8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],\r\n [-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],\r\n [-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],\r\n [-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],\r\n [-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],\r\n [-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],\r\n [-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],\r\n [6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],\r\n [-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],\r\n [-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],\r\n [6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],\r\n [-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],\r\n [7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],\r\n [-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],\r\n [5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],\r\n [3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],\r\n [-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],\r\n [2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],\r\n [2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],\r\n [-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],\r\n [-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],\r\n [-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],\r\n [-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])\r\n\r\n for a,b,c,expected in ref_data:\r\n result = special.hyp1f1(a,b,c)\r\n assert_(abs(expected - result)/expected < 1e-4)\r\n\r\n def test_hyp1f1_gh2957(self):\r\n hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)\r\n hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)\r\n assert_almost_equal(hyp1, hyp2, 12)\r\n\r\n def test_hyp1f1_gh2282(self):\r\n hyp = special.hyp1f1(0.5, 1.5, -1000)\r\n assert_almost_equal(hyp, 0.028024956081989643, 12)\r\n\r\n def test_hyp1f2(self):\r\n pass\r\n\r\n def test_hyp2f0(self):\r\n pass\r\n\r\n def test_hyp2f1(self):\r\n # a collection of special cases taken from AMS 55\r\n values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],\r\n [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],\r\n [1, 1, 2, 0.2, -1/0.2*log(1-0.2)],\r\n [3, 3.5, 1.5, 0.2**2,\r\n 0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],\r\n [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],\r\n [3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],\r\n [3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *\r\n special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],\r\n [5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *\r\n special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],\r\n [4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *\r\n special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],\r\n # and some others\r\n # ticket #424\r\n [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],\r\n # negative integer a or b, with c-a-b integer and x > 0.9\r\n [-2,3,1,0.95,0.715],\r\n [2,-3,1,0.95,-0.007],\r\n [-6,3,1,0.95,0.0000810625],\r\n [2,-5,1,0.95,-0.000029375],\r\n # huge negative integers\r\n (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),\r\n (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),\r\n ]\r\n for i, (a, b, c, x, v) in enumerate(values):\r\n cv = special.hyp2f1(a, b, c, x)\r\n assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)\r\n\r\n def test_hyp3f0(self):\r\n pass\r\n\r\n def test_hyperu(self):\r\n val1 = special.hyperu(1,0.1,100)\r\n assert_almost_equal(val1,0.0098153,7)\r\n a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]\r\n a,b = asarray(a), asarray(b)\r\n z = 0.5\r\n hypu = special.hyperu(a,b,z)\r\n hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /\r\n (special.gamma(1+a-b)*special.gamma(b)) -\r\n z**(1-b)*special.hyp1f1(1+a-b,2-b,z)\r\n / (special.gamma(a)*special.gamma(2-b)))\r\n assert_array_almost_equal(hypu,hprl,12)\r\n\r\n def test_hyperu_gh2287(self):\r\n assert_almost_equal(special.hyperu(1, 1.5, 20.2),\r\n 0.048360918656699191, 12)\r\n\r\n\r\nclass TestBessel(TestCase):\r\n def test_itj0y0(self):\r\n it0 = array(special.itj0y0(.2))\r\n assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)\r\n\r\n def test_it2j0y0(self):\r\n it2 = array(special.it2j0y0(.2))\r\n assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)\r\n\r\n def test_negv_iv(self):\r\n assert_equal(special.iv(3,2), special.iv(-3,2))\r\n\r\n def test_j0(self):\r\n oz = special.j0(.1)\r\n ozr = special.jn(0,.1)\r\n assert_almost_equal(oz,ozr,8)\r\n\r\n def test_j1(self):\r\n o1 = special.j1(.1)\r\n o1r = special.jn(1,.1)\r\n assert_almost_equal(o1,o1r,8)\r\n\r\n def test_jn(self):\r\n jnnr = special.jn(1,.2)\r\n assert_almost_equal(jnnr,0.099500832639235995,8)\r\n\r\n def test_negv_jv(self):\r\n assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)\r\n\r\n def test_jv(self):\r\n values = [[0, 0.1, 0.99750156206604002],\r\n [2./3, 1e-8, 0.3239028506761532e-5],\r\n [2./3, 1e-10, 0.1503423854873779e-6],\r\n [3.1, 1e-10, 0.1711956265409013e-32],\r\n [2./3, 4.0, -0.2325440850267039],\r\n ]\r\n for i, (v, x, y) in enumerate(values):\r\n yc = special.jv(v, x)\r\n assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)\r\n\r\n def test_negv_jve(self):\r\n assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)\r\n\r\n def test_jve(self):\r\n jvexp = special.jve(1,.2)\r\n assert_almost_equal(jvexp,0.099500832639235995,8)\r\n jvexp1 = special.jve(1,.2+1j)\r\n z = .2+1j\r\n jvexpr = special.jv(1,z)*exp(-abs(z.imag))\r\n assert_almost_equal(jvexp1,jvexpr,8)\r\n\r\n def test_jn_zeros(self):\r\n jn0 = special.jn_zeros(0,5)\r\n jn1 = special.jn_zeros(1,5)\r\n assert_array_almost_equal(jn0,array([2.4048255577,\r\n 5.5200781103,\r\n 8.6537279129,\r\n 11.7915344391,\r\n 14.9309177086]),4)\r\n assert_array_almost_equal(jn1,array([3.83171,\r\n 7.01559,\r\n 10.17347,\r\n 13.32369,\r\n 16.47063]),4)\r\n\r\n jn102 = special.jn_zeros(102,5)\r\n assert_tol_equal(jn102, array([110.89174935992040343,\r\n 117.83464175788308398,\r\n 123.70194191713507279,\r\n 129.02417238949092824,\r\n 134.00114761868422559]), rtol=1e-13)\r\n\r\n jn301 = special.jn_zeros(301,5)\r\n assert_tol_equal(jn301, array([313.59097866698830153,\r\n 323.21549776096288280,\r\n 331.22338738656748796,\r\n 338.39676338872084500,\r\n 345.03284233056064157]), rtol=1e-13)\r\n\r\n def test_jn_zeros_slow(self):\r\n jn0 = special.jn_zeros(0, 300)\r\n assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)\r\n assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)\r\n assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)\r\n\r\n jn10 = special.jn_zeros(10, 300)\r\n assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)\r\n assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)\r\n assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)\r\n\r\n jn3010 = special.jn_zeros(3010,5)\r\n assert_tol_equal(jn3010, array([3036.86590780927,\r\n 3057.06598526482,\r\n 3073.66360690272,\r\n 3088.37736494778,\r\n 3101.86438139042]), rtol=1e-8)\r\n\r\n def test_jnjnp_zeros(self):\r\n jn = special.jn\r\n\r\n def jnp(n, x):\r\n return (jn(n-1,x) - jn(n+1,x))/2\r\n for nt in range(1, 30):\r\n z, n, m, t = special.jnjnp_zeros(nt)\r\n for zz, nn, tt in zip(z, n, t):\r\n if tt == 0:\r\n assert_allclose(jn(nn, zz), 0, atol=1e-6)\r\n elif tt == 1:\r\n assert_allclose(jnp(nn, zz), 0, atol=1e-6)\r\n else:\r\n raise AssertionError(\"Invalid t return for nt=%d\" % nt)\r\n\r\n def test_jnp_zeros(self):\r\n jnp = special.jnp_zeros(1,5)\r\n assert_array_almost_equal(jnp, array([1.84118,\r\n 5.33144,\r\n 8.53632,\r\n 11.70600,\r\n 14.86359]),4)\r\n jnp = special.jnp_zeros(443,5)\r\n assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)\r\n\r\n def test_jnyn_zeros(self):\r\n jnz = special.jnyn_zeros(1,5)\r\n assert_array_almost_equal(jnz,(array([3.83171,\r\n 7.01559,\r\n 10.17347,\r\n 13.32369,\r\n 16.47063]),\r\n array([1.84118,\r\n 5.33144,\r\n 8.53632,\r\n 11.70600,\r\n 14.86359]),\r\n array([2.19714,\r\n 5.42968,\r\n 8.59601,\r\n 11.74915,\r\n 14.89744]),\r\n array([3.68302,\r\n 6.94150,\r\n 10.12340,\r\n 13.28576,\r\n 16.44006])),5)\r\n\r\n def test_jvp(self):\r\n jvprim = special.jvp(2,2)\r\n jv0 = (special.jv(1,2)-special.jv(3,2))/2\r\n assert_almost_equal(jvprim,jv0,10)\r\n\r\n def test_k0(self):\r\n ozk = special.k0(.1)\r\n ozkr = special.kv(0,.1)\r\n assert_almost_equal(ozk,ozkr,8)\r\n\r\n def test_k0e(self):\r\n ozke = special.k0e(.1)\r\n ozker = special.kve(0,.1)\r\n assert_almost_equal(ozke,ozker,8)\r\n\r\n def test_k1(self):\r\n o1k = special.k1(.1)\r\n o1kr = special.kv(1,.1)\r\n assert_almost_equal(o1k,o1kr,8)\r\n\r\n def test_k1e(self):\r\n o1ke = special.k1e(.1)\r\n o1ker = special.kve(1,.1)\r\n assert_almost_equal(o1ke,o1ker,8)\r\n\r\n def test_jacobi(self):\r\n a = 5*np.random.random() - 1\r\n b = 5*np.random.random() - 1\r\n P0 = special.jacobi(0,a,b)\r\n P1 = special.jacobi(1,a,b)\r\n P2 = special.jacobi(2,a,b)\r\n P3 = special.jacobi(3,a,b)\r\n\r\n assert_array_almost_equal(P0.c,[1],13)\r\n assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)\r\n cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]\r\n p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]\r\n assert_array_almost_equal(P2.c,array(p2c)/8.0,13)\r\n cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),\r\n 12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]\r\n p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]\r\n assert_array_almost_equal(P3.c,array(p3c)/48.0,13)\r\n\r\n def test_kn(self):\r\n kn1 = special.kn(0,.2)\r\n assert_almost_equal(kn1,1.7527038555281462,8)\r\n\r\n def test_negv_kv(self):\r\n assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))\r\n\r\n def test_kv0(self):\r\n kv0 = special.kv(0,.2)\r\n assert_almost_equal(kv0, 1.7527038555281462, 10)\r\n\r\n def test_kv1(self):\r\n kv1 = special.kv(1,0.2)\r\n assert_almost_equal(kv1, 4.775972543220472, 10)\r\n\r\n def test_kv2(self):\r\n kv2 = special.kv(2,0.2)\r\n assert_almost_equal(kv2, 49.51242928773287, 10)\r\n\r\n def test_kn_largeorder(self):\r\n assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)\r\n\r\n def test_kv_largearg(self):\r\n assert_equal(special.kv(0, 1e19), 0)\r\n\r\n def test_negv_kve(self):\r\n assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))\r\n\r\n def test_kve(self):\r\n kve1 = special.kve(0,.2)\r\n kv1 = special.kv(0,.2)*exp(.2)\r\n assert_almost_equal(kve1,kv1,8)\r\n z = .2+1j\r\n kve2 = special.kve(0,z)\r\n kv2 = special.kv(0,z)*exp(z)\r\n assert_almost_equal(kve2,kv2,8)\r\n\r\n def test_kvp_v0n1(self):\r\n z = 2.2\r\n assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)\r\n\r\n def test_kvp_n1(self):\r\n v = 3.\r\n z = 2.2\r\n xc = -special.kv(v+1,z) + v/z*special.kv(v,z)\r\n x = special.kvp(v,z, n=1)\r\n assert_almost_equal(xc, x, 10) # this function (kvp) is broken\r\n\r\n def test_kvp_n2(self):\r\n v = 3.\r\n z = 2.2\r\n xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z\r\n x = special.kvp(v, z, n=2)\r\n assert_almost_equal(xc, x, 10)\r\n\r\n def test_y0(self):\r\n oz = special.y0(.1)\r\n ozr = special.yn(0,.1)\r\n assert_almost_equal(oz,ozr,8)\r\n\r\n def test_y1(self):\r\n o1 = special.y1(.1)\r\n o1r = special.yn(1,.1)\r\n assert_almost_equal(o1,o1r,8)\r\n\r\n def test_y0_zeros(self):\r\n yo,ypo = special.y0_zeros(2)\r\n zo,zpo = special.y0_zeros(2,complex=1)\r\n all = r_[yo,zo]\r\n allval = r_[ypo,zpo]\r\n assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)\r\n assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)\r\n\r\n def test_y1_zeros(self):\r\n y1 = special.y1_zeros(1)\r\n assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)\r\n\r\n def test_y1p_zeros(self):\r\n y1p = special.y1p_zeros(1,complex=1)\r\n assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)\r\n\r\n def test_yn_zeros(self):\r\n an = special.yn_zeros(4,2)\r\n assert_array_almost_equal(an,array([5.64515, 9.36162]),5)\r\n an = special.yn_zeros(443,5)\r\n assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,\r\n 472.80651546418663566, 481.27353184725625838,\r\n 488.98055964441374646], rtol=1e-15)\r\n\r\n def test_ynp_zeros(self):\r\n ao = special.ynp_zeros(0,2)\r\n assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)\r\n ao = special.ynp_zeros(43,5)\r\n assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)\r\n ao = special.ynp_zeros(443,5)\r\n assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)\r\n\r\n def test_ynp_zeros_large_order(self):\r\n ao = special.ynp_zeros(443,5)\r\n assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)\r\n\r\n def test_yn(self):\r\n yn2n = special.yn(1,.2)\r\n assert_almost_equal(yn2n,-3.3238249881118471,8)\r\n\r\n def test_negv_yv(self):\r\n assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)\r\n\r\n def test_yv(self):\r\n yv2 = special.yv(1,.2)\r\n assert_almost_equal(yv2,-3.3238249881118471,8)\r\n\r\n def test_negv_yve(self):\r\n assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)\r\n\r\n def test_yve(self):\r\n yve2 = special.yve(1,.2)\r\n assert_almost_equal(yve2,-3.3238249881118471,8)\r\n yve2r = special.yv(1,.2+1j)*exp(-1)\r\n yve22 = special.yve(1,.2+1j)\r\n assert_almost_equal(yve22,yve2r,8)\r\n\r\n def test_yvp(self):\r\n yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0\r\n yvp1 = special.yvp(2,.2)\r\n assert_array_almost_equal(yvp1,yvpr,10)\r\n\r\n def _cephes_vs_amos_points(self):\r\n \"\"\"Yield points at which to compare Cephes implementation to AMOS\"\"\"\r\n # check several points, including large-amplitude ones\r\n for v in [-120, -100.3, -20., -10., -1., -.5,\r\n 0., 1., 12.49, 120., 301]:\r\n for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,\r\n 700.6, 1300, 10003]:\r\n yield v, z\r\n\r\n # check half-integers; these are problematic points at least\r\n # for cephes/iv\r\n for v in 0.5 + arange(-60, 60):\r\n yield v, 3.5\r\n\r\n def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):\r\n for v, z in self._cephes_vs_amos_points():\r\n if skip is not None and skip(v, z):\r\n continue\r\n c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)\r\n if np.isinf(c1):\r\n assert_(np.abs(c2) >= 1e300, (v, z))\r\n elif np.isnan(c1):\r\n assert_(c2.imag != 0, (v, z))\r\n else:\r\n assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)\r\n if v == int(v):\r\n assert_tol_equal(c3, c2, err_msg=(v, z),\r\n rtol=rtol, atol=atol)\r\n\r\n def test_jv_cephes_vs_amos(self):\r\n self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)\r\n\r\n def test_yv_cephes_vs_amos(self):\r\n self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)\r\n\r\n def test_yv_cephes_vs_amos_only_small_orders(self):\r\n skipper = lambda v, z: (abs(v) > 50)\r\n self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)\r\n\r\n def test_iv_cephes_vs_amos(self):\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n @dec.slow\r\n def test_iv_cephes_vs_amos_mass_test(self):\r\n N = 1000000\r\n np.random.seed(1)\r\n v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)\r\n x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)\r\n\r\n imsk = (np.random.randint(8, size=N) == 0)\r\n v[imsk] = v[imsk].astype(int)\r\n\r\n old_err = np.seterr(all='ignore')\r\n try:\r\n c1 = special.iv(v, x)\r\n c2 = special.iv(v, x+0j)\r\n\r\n # deal with differences in the inf and zero cutoffs\r\n c1[abs(c1) > 1e300] = np.inf\r\n c2[abs(c2) > 1e300] = np.inf\r\n c1[abs(c1) < 1e-300] = 0\r\n c2[abs(c2) < 1e-300] = 0\r\n\r\n dc = abs(c1/c2 - 1)\r\n dc[np.isnan(dc)] = 0\r\n finally:\r\n np.seterr(**old_err)\r\n\r\n k = np.argmax(dc)\r\n\r\n # Most error apparently comes from AMOS and not our implementation;\r\n # there are some problems near integer orders there\r\n assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))\r\n\r\n def test_kv_cephes_vs_amos(self):\r\n self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)\r\n self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)\r\n\r\n def test_ticket_623(self):\r\n assert_tol_equal(special.jv(3, 4), 0.43017147387562193)\r\n assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)\r\n assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)\r\n\r\n def test_ticket_853(self):\r\n \"\"\"Negative-order Bessels\"\"\"\r\n # cephes\r\n assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)\r\n assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)\r\n assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)\r\n assert_tol_equal(special.yv(-2, 1), -1.650682606816255)\r\n assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)\r\n assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)\r\n assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)\r\n assert_tol_equal(special.kv(-2, 1), 1.624838898635178)\r\n assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)\r\n assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)\r\n assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)\r\n assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)\r\n # amos\r\n assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)\r\n assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)\r\n assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)\r\n assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)\r\n\r\n assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)\r\n assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)\r\n assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)\r\n assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)\r\n\r\n assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)\r\n assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)\r\n assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)\r\n assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)\r\n\r\n assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)\r\n assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)\r\n assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)\r\n assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)\r\n\r\n assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))\r\n assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))\r\n assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))\r\n assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))\r\n\r\n assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))\r\n assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))\r\n\r\n def test_ticket_854(self):\r\n \"\"\"Real-valued Bessel domains\"\"\"\r\n assert_(isnan(special.jv(0.5, -1)))\r\n assert_(isnan(special.iv(0.5, -1)))\r\n assert_(isnan(special.yv(0.5, -1)))\r\n assert_(isnan(special.yv(1, -1)))\r\n assert_(isnan(special.kv(0.5, -1)))\r\n assert_(isnan(special.kv(1, -1)))\r\n assert_(isnan(special.jve(0.5, -1)))\r\n assert_(isnan(special.ive(0.5, -1)))\r\n assert_(isnan(special.yve(0.5, -1)))\r\n assert_(isnan(special.yve(1, -1)))\r\n assert_(isnan(special.kve(0.5, -1)))\r\n assert_(isnan(special.kve(1, -1)))\r\n assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))\r\n assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))\r\n\r\n def test_ticket_503(self):\r\n \"\"\"Real-valued Bessel I overflow\"\"\"\r\n assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)\r\n assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)\r\n\r\n def test_iv_hyperg_poles(self):\r\n assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)\r\n\r\n def iv_series(self, v, z, n=200):\r\n k = arange(0, n).astype(float_)\r\n r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)\r\n r[isnan(r)] = inf\r\n r = exp(r)\r\n err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10\r\n return r.sum(), err\r\n\r\n def test_i0_series(self):\r\n for z in [1., 10., 200.5]:\r\n value, err = self.iv_series(0, z)\r\n assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)\r\n\r\n def test_i1_series(self):\r\n for z in [1., 10., 200.5]:\r\n value, err = self.iv_series(1, z)\r\n assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)\r\n\r\n def test_iv_series(self):\r\n for v in [-20., -10., -1., 0., 1., 12.49, 120.]:\r\n for z in [1., 10., 200.5, -1+2j]:\r\n value, err = self.iv_series(v, z)\r\n assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))\r\n\r\n def test_i0(self):\r\n values = [[0.0, 1.0],\r\n [1e-10, 1.0],\r\n [0.1, 0.9071009258],\r\n [0.5, 0.6450352706],\r\n [1.0, 0.4657596077],\r\n [2.5, 0.2700464416],\r\n [5.0, 0.1835408126],\r\n [20.0, 0.0897803119],\r\n ]\r\n for i, (x, v) in enumerate(values):\r\n cv = special.i0(x) * exp(-x)\r\n assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)\r\n\r\n def test_i0e(self):\r\n oize = special.i0e(.1)\r\n oizer = special.ive(0,.1)\r\n assert_almost_equal(oize,oizer,8)\r\n\r\n def test_i1(self):\r\n values = [[0.0, 0.0],\r\n [1e-10, 0.4999999999500000e-10],\r\n [0.1, 0.0452984468],\r\n [0.5, 0.1564208032],\r\n [1.0, 0.2079104154],\r\n [5.0, 0.1639722669],\r\n [20.0, 0.0875062222],\r\n ]\r\n for i, (x, v) in enumerate(values):\r\n cv = special.i1(x) * exp(-x)\r\n assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)\r\n\r\n def test_i1e(self):\r\n oi1e = special.i1e(.1)\r\n oi1er = special.ive(1,.1)\r\n assert_almost_equal(oi1e,oi1er,8)\r\n\r\n def test_iti0k0(self):\r\n iti0 = array(special.iti0k0(5))\r\n assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)\r\n\r\n def test_it2i0k0(self):\r\n it2k = special.it2i0k0(.1)\r\n assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)\r\n\r\n def test_iv(self):\r\n iv1 = special.iv(0,.1)*exp(-.1)\r\n assert_almost_equal(iv1,0.90710092578230106,10)\r\n\r\n def test_negv_ive(self):\r\n assert_equal(special.ive(3,2), special.ive(-3,2))\r\n\r\n def test_ive(self):\r\n ive1 = special.ive(0,.1)\r\n iv1 = special.iv(0,.1)*exp(-.1)\r\n assert_almost_equal(ive1,iv1,10)\r\n\r\n def test_ivp0(self):\r\n assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)\r\n\r\n def test_ivp(self):\r\n y = (special.iv(0,2) + special.iv(2,2))/2\r\n x = special.ivp(1,2)\r\n assert_almost_equal(x,y,10)\r\n\r\n\r\nclass TestLaguerre(TestCase):\r\n def test_laguerre(self):\r\n lag0 = special.laguerre(0)\r\n lag1 = special.laguerre(1)\r\n lag2 = special.laguerre(2)\r\n lag3 = special.laguerre(3)\r\n lag4 = special.laguerre(4)\r\n lag5 = special.laguerre(5)\r\n assert_array_almost_equal(lag0.c,[1],13)\r\n assert_array_almost_equal(lag1.c,[-1,1],13)\r\n assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)\r\n assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)\r\n assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)\r\n assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)\r\n\r\n def test_genlaguerre(self):\r\n k = 5*np.random.random() - 0.9\r\n lag0 = special.genlaguerre(0,k)\r\n lag1 = special.genlaguerre(1,k)\r\n lag2 = special.genlaguerre(2,k)\r\n lag3 = special.genlaguerre(3,k)\r\n assert_equal(lag0.c,[1])\r\n assert_equal(lag1.c,[-1,k+1])\r\n assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)\r\n assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)\r\n\r\n\r\n# Base polynomials come from Abrahmowitz and Stegan\r\nclass TestLegendre(TestCase):\r\n def test_legendre(self):\r\n leg0 = special.legendre(0)\r\n leg1 = special.legendre(1)\r\n leg2 = special.legendre(2)\r\n leg3 = special.legendre(3)\r\n leg4 = special.legendre(4)\r\n leg5 = special.legendre(5)\r\n assert_equal(leg0.c, [1])\r\n assert_equal(leg1.c, [1,0])\r\n assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)\r\n assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)\r\n assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)\r\n assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)\r\n\r\n\r\nclass TestLambda(TestCase):\r\n def test_lmbda(self):\r\n lam = special.lmbda(1,.1)\r\n lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),\r\n array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))\r\n assert_array_almost_equal(lam,lamr,8)\r\n\r\n\r\nclass TestLog1p(TestCase):\r\n def test_log1p(self):\r\n l1p = (special.log1p(10), special.log1p(11), special.log1p(12))\r\n l1prl = (log(11), log(12), log(13))\r\n assert_array_almost_equal(l1p,l1prl,8)\r\n\r\n def test_log1pmore(self):\r\n l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))\r\n l1pmrl = (log(2),log(2.1),log(2.2))\r\n assert_array_almost_equal(l1pm,l1pmrl,8)\r\n\r\n\r\nclass TestLegendreFunctions(TestCase):\r\n def test_clpmn(self):\r\n z = 0.5+0.3j\r\n clp = special.clpmn(2, 2, z, 3)\r\n assert_array_almost_equal(clp,\r\n (array([[1.0000, z, 0.5*(3*z*z-1)],\r\n [0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],\r\n [0.0000, 0.0000, 3*(z*z-1)]]),\r\n array([[0.0000, 1.0000, 3*z],\r\n [0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],\r\n [0.0000, 0.0000, 6*z]])),\r\n 7)\r\n\r\n def test_clpmn_close_to_real_2(self):\r\n eps = 1e-10\r\n m = 1\r\n n = 3\r\n x = 0.5\r\n clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]\r\n clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]\r\n assert_array_almost_equal(array([clp_plus, clp_minus]),\r\n array([special.lpmv(m, n, x),\r\n special.lpmv(m, n, x)]),\r\n 7)\r\n\r\n def test_clpmn_close_to_real_3(self):\r\n eps = 1e-10\r\n m = 1\r\n n = 3\r\n x = 0.5\r\n clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]\r\n clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]\r\n assert_array_almost_equal(array([clp_plus, clp_minus]),\r\n array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),\r\n special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),\r\n 7)\r\n\r\n def test_clpmn_across_unit_circle(self):\r\n eps = 1e-7\r\n m = 1\r\n n = 1\r\n x = 1j\r\n for type in [2, 3]:\r\n assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],\r\n special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)\r\n\r\n def test_inf(self):\r\n for z in (1, -1):\r\n for n in range(4):\r\n for m in range(1, n):\r\n lp = special.clpmn(m, n, z)\r\n assert_(np.isinf(lp[1][1,1:]).all())\r\n lp = special.lpmn(m, n, z)\r\n assert_(np.isinf(lp[1][1,1:]).all())\r\n\r\n def test_deriv_clpmn(self):\r\n # data inside and outside of the unit circle\r\n zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,\r\n 1+1j, -1+1j, -1-1j, 1-1j]\r\n m = 2\r\n n = 3\r\n for type in [2, 3]:\r\n for z in zvals:\r\n for h in [1e-3, 1e-3j]:\r\n approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]\r\n - special.clpmn(m, n, z-0.5*h, type)[0])/h\r\n assert_allclose(special.clpmn(m, n, z, type)[1],\r\n approx_derivative,\r\n rtol=1e-4)\r\n\r\n def test_lpmn(self):\r\n lp = special.lpmn(0,2,.5)\r\n assert_array_almost_equal(lp,(array([[1.00000,\r\n 0.50000,\r\n -0.12500]]),\r\n array([[0.00000,\r\n 1.00000,\r\n 1.50000]])),4)\r\n\r\n def test_lpn(self):\r\n lpnf = special.lpn(2,.5)\r\n assert_array_almost_equal(lpnf,(array([1.00000,\r\n 0.50000,\r\n -0.12500]),\r\n array([0.00000,\r\n 1.00000,\r\n 1.50000])),4)\r\n\r\n def test_lpmv(self):\r\n lp = special.lpmv(0,2,.5)\r\n assert_almost_equal(lp,-0.125,7)\r\n lp = special.lpmv(0,40,.001)\r\n assert_almost_equal(lp,0.1252678976534484,7)\r\n\r\n # XXX: this is outside the domain of the current implementation,\r\n # so ensure it returns a NaN rather than a wrong answer.\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n lp = special.lpmv(-1,-1,.001)\r\n finally:\r\n np.seterr(**olderr)\r\n assert_(lp != 0 or np.isnan(lp))\r\n\r\n def test_lqmn(self):\r\n lqmnf = special.lqmn(0,2,.5)\r\n lqf = special.lqn(2,.5)\r\n assert_array_almost_equal(lqmnf[0][0],lqf[0],4)\r\n assert_array_almost_equal(lqmnf[1][0],lqf[1],4)\r\n\r\n def test_lqmn_gt1(self):\r\n \"\"\"algorithm for real arguments changes at 1.0001\r\n test against analytical result for m=2, n=1\r\n \"\"\"\r\n x0 = 1.0001\r\n delta = 0.00002\r\n for x in (x0-delta, x0+delta):\r\n lq = special.lqmn(2, 1, x)[0][-1, -1]\r\n expected = 2/(x*x-1)\r\n assert_almost_equal(lq, expected)\r\n\r\n def test_lqmn_shape(self):\r\n a, b = special.lqmn(4, 4, 1.1)\r\n assert_equal(a.shape, (5, 5))\r\n assert_equal(b.shape, (5, 5))\r\n\r\n a, b = special.lqmn(4, 0, 1.1)\r\n assert_equal(a.shape, (5, 1))\r\n assert_equal(b.shape, (5, 1))\r\n\r\n def test_lqn(self):\r\n lqf = special.lqn(2,.5)\r\n assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),\r\n array([1.3333, 1.216, -0.8427])),4)\r\n\r\n\r\nclass TestMathieu(TestCase):\r\n\r\n def test_mathieu_a(self):\r\n pass\r\n\r\n def test_mathieu_even_coef(self):\r\n mc = special.mathieu_even_coef(2,5)\r\n # Q not defined broken and cannot figure out proper reporting order\r\n\r\n def test_mathieu_odd_coef(self):\r\n # same problem as above\r\n pass\r\n\r\n\r\nclass TestFresnelIntegral(TestCase):\r\n\r\n def test_modfresnelp(self):\r\n pass\r\n\r\n def test_modfresnelm(self):\r\n pass\r\n\r\n\r\nclass TestOblCvSeq(TestCase):\r\n def test_obl_cv_seq(self):\r\n obl = special.obl_cv_seq(0,3,1)\r\n assert_array_almost_equal(obl,array([-0.348602,\r\n 1.393206,\r\n 5.486800,\r\n 11.492120]),5)\r\n\r\n\r\nclass TestParabolicCylinder(TestCase):\r\n def test_pbdn_seq(self):\r\n pb = special.pbdn_seq(1,.1)\r\n assert_array_almost_equal(pb,(array([0.9975,\r\n 0.0998]),\r\n array([-0.0499,\r\n 0.9925])),4)\r\n\r\n def test_pbdv(self):\r\n pbv = special.pbdv(1,.2)\r\n derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]\r\n\r\n def test_pbdv_seq(self):\r\n pbn = special.pbdn_seq(1,.1)\r\n pbv = special.pbdv_seq(1,.1)\r\n assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)\r\n\r\n def test_pbdv_points(self):\r\n # simple case\r\n eta = np.linspace(-10, 10, 5)\r\n z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)\r\n assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)\r\n\r\n # some points\r\n assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)\r\n assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)\r\n\r\n def test_pbdv_gradient(self):\r\n x = np.linspace(-4, 4, 8)[:,None]\r\n eta = np.linspace(-10, 10, 5)[None,:]\r\n\r\n p = special.pbdv(eta, x)\r\n eps = 1e-7 + 1e-7*abs(x)\r\n dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.\r\n assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)\r\n\r\n def test_pbvv_gradient(self):\r\n x = np.linspace(-4, 4, 8)[:,None]\r\n eta = np.linspace(-10, 10, 5)[None,:]\r\n\r\n p = special.pbvv(eta, x)\r\n eps = 1e-7 + 1e-7*abs(x)\r\n dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.\r\n assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)\r\n\r\n\r\nclass TestPolygamma(TestCase):\r\n # from Table 6.2 (pg. 271) of A&S\r\n def test_polygamma(self):\r\n poly2 = special.polygamma(2,1)\r\n poly3 = special.polygamma(3,1)\r\n assert_almost_equal(poly2,-2.4041138063,10)\r\n assert_almost_equal(poly3,6.4939394023,10)\r\n\r\n # Test polygamma(0, x) == psi(x)\r\n x = [2, 3, 1.1e14]\r\n assert_almost_equal(special.polygamma(0, x), special.psi(x))\r\n\r\n # Test broadcasting\r\n n = [0, 1, 2]\r\n x = [0.5, 1.5, 2.5]\r\n expected = [-1.9635100260214238, 0.93480220054467933,\r\n -0.23620405164172739]\r\n assert_almost_equal(special.polygamma(n, x), expected)\r\n expected = np.row_stack([expected]*2)\r\n assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),\r\n expected)\r\n assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),\r\n expected)\r\n\r\n\r\nclass TestProCvSeq(TestCase):\r\n def test_pro_cv_seq(self):\r\n prol = special.pro_cv_seq(0,3,1)\r\n assert_array_almost_equal(prol,array([0.319000,\r\n 2.593084,\r\n 6.533471,\r\n 12.514462]),5)\r\n\r\n\r\nclass TestPsi(TestCase):\r\n def test_psi(self):\r\n ps = special.psi(1)\r\n assert_almost_equal(ps,-0.57721566490153287,8)\r\n\r\n\r\nclass TestRadian(TestCase):\r\n def test_radian(self):\r\n rad = special.radian(90,0,0)\r\n assert_almost_equal(rad,pi/2.0,5)\r\n\r\n def test_radianmore(self):\r\n rad1 = special.radian(90,1,60)\r\n assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)\r\n\r\n\r\nclass TestRiccati(TestCase):\r\n def test_riccati_jn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)\r\n ricjn = special.riccati_jn(1,.2)\r\n assert_array_almost_equal(ricjn,jnrl,8)\r\n\r\n def test_riccati_yn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)\r\n ricyn = special.riccati_yn(1,.2)\r\n assert_array_almost_equal(ricyn,ynrl,8)\r\n\r\n\r\nclass TestRound(TestCase):\r\n def test_round(self):\r\n rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))\r\n\r\n # Note: According to the documentation, scipy.special.round is\r\n # supposed to round to the nearest even number if the fractional\r\n # part is exactly 0.5. On some platforms, this does not appear\r\n # to work and thus this test may fail. However, this unit test is\r\n # correctly written.\r\n rndrl = (10,10,10,11)\r\n assert_array_equal(rnd,rndrl)\r\n\r\n\r\ndef test_sph_harm():\r\n # Tests derived from tables in\r\n # http://en.wikipedia.org/wiki/Table_of_spherical_harmonics\r\n sh = special.sph_harm\r\n pi = np.pi\r\n exp = np.exp\r\n sqrt = np.sqrt\r\n sin = np.sin\r\n cos = np.cos\r\n yield (assert_array_almost_equal, sh(0,0,0,0),\r\n 0.5/sqrt(pi))\r\n yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),\r\n 0.25*sqrt(15./(2.*pi)) *\r\n (sin(pi/4))**2.)\r\n yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),\r\n 0.25*sqrt(15./(2.*pi)))\r\n yield (assert_array_almost_equal, sh(2,2,pi,pi/2),\r\n 0.25*sqrt(15/(2.*pi)) *\r\n exp(0+2.*pi*1j)*sin(pi/2.)**2.)\r\n yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),\r\n (3./8.)*sqrt(5./(2.*pi)) *\r\n exp(0+2.*pi/4.*1j) *\r\n sin(pi/3.)**2. *\r\n (7.*cos(pi/3.)**2.-1))\r\n yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),\r\n (3./16.)*sqrt(35./(2.*pi)) *\r\n exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)\r\n\r\n\r\ndef test_sph_harm_ufunc_loop_selection():\r\n # see https://github.com/scipy/scipy/issues/4895\r\n dt = np.dtype(np.complex128)\r\n assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)\r\n assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)\r\n assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)\r\n assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)\r\n assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)\r\n assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)\r\n\r\n\r\nclass TestSpherical(TestCase):\r\n def test_sph_harm(self):\r\n # see test_sph_harm function\r\n pass\r\n\r\n def test_sph_in(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n i1n = special.sph_in(1,.2)\r\n inp0 = (i1n[0][1])\r\n inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])\r\n assert_array_almost_equal(i1n[0],array([1.0066800127054699381,\r\n 0.066933714568029540839]),12)\r\n assert_array_almost_equal(i1n[1],[inp0,inp1],12)\r\n\r\n def test_sph_inkn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]\r\n inkn = r_[special.sph_inkn(1,.2)]\r\n assert_array_almost_equal(inkn,spikn,10)\r\n\r\n def test_sph_in_kn_order0(self):\r\n x = 1.\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n sph_i0 = special.sph_in(0, x)\r\n sph_i0_expected = np.array([np.sinh(x)/x,\r\n np.cosh(x)/x-np.sinh(x)/x**2])\r\n assert_array_almost_equal(r_[sph_i0], sph_i0_expected)\r\n sph_k0 = special.sph_kn(0, x)\r\n sph_k0_expected = np.array([0.5*pi*exp(-x)/x,\r\n -0.5*pi*exp(-x)*(1/x+1/x**2)])\r\n assert_array_almost_equal(r_[sph_k0], sph_k0_expected)\r\n sph_i0k0 = special.sph_inkn(0, x)\r\n assert_array_almost_equal(r_[sph_i0+sph_k0],\r\n r_[sph_i0k0],\r\n 10)\r\n\r\n def test_sph_jn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n s1 = special.sph_jn(2,.2)\r\n s10 = -s1[0][1]\r\n s11 = s1[0][0]-2.0/0.2*s1[0][1]\r\n s12 = s1[0][1]-3.0/0.2*s1[0][2]\r\n assert_array_almost_equal(s1[0],[0.99334665397530607731,\r\n 0.066400380670322230863,\r\n 0.0026590560795273856680],12)\r\n assert_array_almost_equal(s1[1],[s10,s11,s12],12)\r\n\r\n def test_sph_jnyn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition\r\n jnyn1 = r_[special.sph_jnyn(1,.2)]\r\n assert_array_almost_equal(jnyn1,jnyn,9)\r\n\r\n def test_sph_kn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n kn = special.sph_kn(2,.2)\r\n kn0 = -kn[0][1]\r\n kn1 = -kn[0][0]-2.0/0.2*kn[0][1]\r\n kn2 = -kn[0][1]-3.0/0.2*kn[0][2]\r\n assert_array_almost_equal(kn[0],[6.4302962978445670140,\r\n 38.581777787067402086,\r\n 585.15696310385559829],12)\r\n assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)\r\n\r\n def test_sph_yn(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", DeprecationWarning)\r\n sy1 = special.sph_yn(2,.2)[0][2]\r\n sy2 = special.sph_yn(0,.2)[0][0]\r\n sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value\r\n assert_almost_equal(sy1,-377.52483,5) # previous values in the system\r\n assert_almost_equal(sy2,-4.9003329,5)\r\n sy3 = special.sph_yn(1,.2)[1][1]\r\n assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).\r\n\r\n\r\nclass TestStruve(object):\r\n def _series(self, v, z, n=100):\r\n \"\"\"Compute Struve function & error estimate from its power series.\"\"\"\r\n k = arange(0, n)\r\n r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)\r\n err = abs(r).max() * finfo(float_).eps * n\r\n return r.sum(), err\r\n\r\n def test_vs_series(self):\r\n \"\"\"Check Struve function versus its power series\"\"\"\r\n for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:\r\n for z in [1, 10, 19, 21, 30]:\r\n value, err = self._series(v, z)\r\n assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)\r\n\r\n def test_some_values(self):\r\n assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)\r\n assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)\r\n assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)\r\n assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)\r\n assert_equal(special.struve(-12, -41), -special.struve(-12, 41))\r\n assert_equal(special.struve(+12, -41), -special.struve(+12, 41))\r\n assert_equal(special.struve(-11, -41), +special.struve(-11, 41))\r\n assert_equal(special.struve(+11, -41), +special.struve(+11, 41))\r\n\r\n assert_(isnan(special.struve(-7.1, -1)))\r\n assert_(isnan(special.struve(-10.1, -1)))\r\n\r\n def test_regression_679(self):\r\n \"\"\"Regression test for #679\"\"\"\r\n assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))\r\n assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))\r\n assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))\r\n\r\n\r\ndef test_chi2_smalldf():\r\n assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)\r\n\r\n\r\ndef test_ch2_inf():\r\n assert_equal(special.chdtr(0.7,np.inf), 1.0)\r\n\r\n\r\ndef test_chi2c_smalldf():\r\n assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)\r\n\r\n\r\ndef test_chi2_inv_smalldf():\r\n assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)\r\n\r\n\r\ndef test_agm_simple():\r\n assert_allclose(special.agm(24, 6), 13.4581714817)\r\n assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)\r\n\r\n\r\ndef test_legacy():\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n\r\n # Legacy behavior: truncating arguments to integers\r\n assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))\r\n assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))\r\n assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))\r\n assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))\r\n assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))\r\n assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))\r\n assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))\r\n assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))\r\n assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))\r\n assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))\r\n assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))\r\n assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))\r\n assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))\r\n assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))\r\n assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))\r\n\r\n\r\n@with_special_errors\r\ndef test_error_raising():\r\n assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)\r\n\r\n\r\ndef test_xlogy():\r\n def xfunc(x, y):\r\n if x == 0 and not np.isnan(y):\r\n return x\r\n else:\r\n return x*np.log(y)\r\n\r\n z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)\r\n z2 = np.r_[z1, [(0, 1j), (1, 1j)]]\r\n\r\n w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])\r\n assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)\r\n w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])\r\n assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_xlog1py():\r\n def xfunc(x, y):\r\n if x == 0 and not np.isnan(y):\r\n return x\r\n else:\r\n return x * np.log1p(y)\r\n\r\n z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),\r\n (1, 1e-30)], dtype=float)\r\n w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])\r\n assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_entr():\r\n def xfunc(x):\r\n if x < 0:\r\n return -np.inf\r\n else:\r\n return -special.xlogy(x, x)\r\n values = (0, 0.5, 1.0, np.inf)\r\n signs = [-1, 1]\r\n arr = []\r\n for sgn, v in itertools.product(signs, values):\r\n arr.append(sgn * v)\r\n z = np.array(arr, dtype=float)\r\n w = np.vectorize(xfunc, otypes=[np.float64])(z)\r\n assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_kl_div():\r\n def xfunc(x, y):\r\n if x < 0 or y < 0 or (y == 0 and x != 0):\r\n # extension of natural domain to preserve convexity\r\n return np.inf\r\n elif np.isposinf(x) or np.isposinf(y):\r\n # limits within the natural domain\r\n return np.inf\r\n elif x == 0:\r\n return y\r\n else:\r\n return special.xlogy(x, x/y) - x + y\r\n values = (0, 0.5, 1.0)\r\n signs = [-1, 1]\r\n arr = []\r\n for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):\r\n arr.append((sgna*va, sgnb*vb))\r\n z = np.array(arr, dtype=float)\r\n w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])\r\n assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_rel_entr():\r\n def xfunc(x, y):\r\n if x > 0 and y > 0:\r\n return special.xlogy(x, x/y)\r\n elif x == 0 and y >= 0:\r\n return 0\r\n else:\r\n return np.inf\r\n values = (0, 0.5, 1.0)\r\n signs = [-1, 1]\r\n arr = []\r\n for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):\r\n arr.append((sgna*va, sgnb*vb))\r\n z = np.array(arr, dtype=float)\r\n w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])\r\n assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_huber():\r\n assert_equal(special.huber(-1, 1.5), np.inf)\r\n assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))\r\n assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))\r\n\r\n def xfunc(delta, r):\r\n if delta < 0:\r\n return np.inf\r\n elif np.abs(r) < delta:\r\n return 0.5 * np.square(r)\r\n else:\r\n return delta * (np.abs(r) - 0.5 * delta)\r\n\r\n z = np.random.randn(10, 2)\r\n w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])\r\n assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)\r\n\r\n\r\ndef test_pseudo_huber():\r\n def xfunc(delta, r):\r\n if delta < 0:\r\n return np.inf\r\n elif (not delta) or (not r):\r\n return 0\r\n else:\r\n return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)\r\n\r\n z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])\r\n w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])\r\n assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\"\r\nObjects for dealing with Chebyshev series.\r\n\r\nThis module provides a number of objects (mostly functions) useful for\r\ndealing with Chebyshev series, including a `Chebyshev` class that\r\nencapsulates the usual arithmetic operations. (General information\r\non how this module represents and works with such polynomials is in the\r\ndocstring for its \"parent\" sub-package, `numpy.polynomial`).\r\n\r\nConstants\r\n---------\r\n- `chebdomain` -- Chebyshev series default domain, [-1,1].\r\n- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates\r\n identically to 0.\r\n- `chebone` -- (Coefficients of the) Chebyshev series that evaluates\r\n identically to 1.\r\n- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,\r\n ``f(x) = x``.\r\n\r\nArithmetic\r\n----------\r\n- `chebadd` -- add two Chebyshev series.\r\n- `chebsub` -- subtract one Chebyshev series from another.\r\n- `chebmul` -- multiply two Chebyshev series.\r\n- `chebdiv` -- divide one Chebyshev series by another.\r\n- `chebpow` -- raise a Chebyshev series to an positive integer power\r\n- `chebval` -- evaluate a Chebyshev series at given points.\r\n- `chebval2d` -- evaluate a 2D Chebyshev series at given points.\r\n- `chebval3d` -- evaluate a 3D Chebyshev series at given points.\r\n- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.\r\n- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.\r\n\r\nCalculus\r\n--------\r\n- `chebder` -- differentiate a Chebyshev series.\r\n- `chebint` -- integrate a Chebyshev series.\r\n\r\nMisc Functions\r\n--------------\r\n- `chebfromroots` -- create a Chebyshev series with specified roots.\r\n- `chebroots` -- find the roots of a Chebyshev series.\r\n- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.\r\n- `chebvander2d` -- Vandermonde-like matrix for 2D power series.\r\n- `chebvander3d` -- Vandermonde-like matrix for 3D power series.\r\n- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.\r\n- `chebweight` -- Chebyshev weight function.\r\n- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.\r\n- `chebfit` -- least-squares fit returning a Chebyshev series.\r\n- `chebpts1` -- Chebyshev points of the first kind.\r\n- `chebpts2` -- Chebyshev points of the second kind.\r\n- `chebtrim` -- trim leading coefficients from a Chebyshev series.\r\n- `chebline` -- Chebyshev series representing given straight line.\r\n- `cheb2poly` -- convert a Chebyshev series to a polynomial.\r\n- `poly2cheb` -- convert a polynomial to a Chebyshev series.\r\n\r\nClasses\r\n-------\r\n- `Chebyshev` -- A Chebyshev series class.\r\n\r\nSee also\r\n--------\r\n`numpy.polynomial`\r\n\r\nNotes\r\n-----\r\nThe implementations of multiplication, division, integration, and\r\ndifferentiation use the algebraic identities [1]_:\r\n\r\n.. math ::\r\n T_n(x) = \\\\frac{z^n + z^{-n}}{2} \\\\\\\\\r\n z\\\\frac{dx}{dz} = \\\\frac{z - z^{-1}}{2}.\r\n\r\nwhere\r\n\r\n.. math :: x = \\\\frac{z + z^{-1}}{2}.\r\n\r\nThese identities allow a Chebyshev series to be expressed as a finite,\r\nsymmetric Laurent series. In this module, this sort of Laurent series\r\nis referred to as a \"z-series.\"\r\n\r\nReferences\r\n----------\r\n.. [1] A. T. Benjamin, et al., \"Combinatorial Trigonometry with Chebyshev\r\n Polynomials,\" *Journal of Statistical Planning and Inference 14*, 2008\r\n (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)\r\n\r\n\"\"\"\r\nfrom __future__ import division, absolute_import, print_function\r\n\r\nimport warnings\r\nimport numpy as np\r\nimport numpy.linalg as la\r\n\r\nfrom . import polyutils as pu\r\nfrom ._polybase import ABCPolyBase\r\n\r\n__all__ = [\r\n 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',\r\n 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',\r\n 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',\r\n 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',\r\n 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',\r\n 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',\r\n 'chebgauss', 'chebweight']\r\n\r\nchebtrim = pu.trimcoef\r\n\r\n#\r\n# A collection of functions for manipulating z-series. These are private\r\n# functions and do minimal error checking.\r\n#\r\n\r\ndef _cseries_to_zseries(c):\r\n \"\"\"Covert Chebyshev series to z-series.\r\n\r\n Covert a Chebyshev series to the equivalent z-series. The result is\r\n never an empty array. The dtype of the return is the same as that of\r\n the input. No checks are run on the arguments as this routine is for\r\n internal use.\r\n\r\n Parameters\r\n ----------\r\n c : 1-D ndarray\r\n Chebyshev coefficients, ordered from low to high\r\n\r\n Returns\r\n -------\r\n zs : 1-D ndarray\r\n Odd length symmetric z-series, ordered from low to high.\r\n\r\n \"\"\"\r\n n = c.size\r\n zs = np.zeros(2*n-1, dtype=c.dtype)\r\n zs[n-1:] = c/2\r\n return zs + zs[::-1]\r\n\r\n\r\ndef _zseries_to_cseries(zs):\r\n \"\"\"Covert z-series to a Chebyshev series.\r\n\r\n Covert a z series to the equivalent Chebyshev series. The result is\r\n never an empty array. The dtype of the return is the same as that of\r\n the input. No checks are run on the arguments as this routine is for\r\n internal use.\r\n\r\n Parameters\r\n ----------\r\n zs : 1-D ndarray\r\n Odd length symmetric z-series, ordered from low to high.\r\n\r\n Returns\r\n -------\r\n c : 1-D ndarray\r\n Chebyshev coefficients, ordered from low to high.\r\n\r\n \"\"\"\r\n n = (zs.size + 1)//2\r\n c = zs[n-1:].copy()\r\n c[1:n] *= 2\r\n return c\r\n\r\n\r\ndef _zseries_mul(z1, z2):\r\n \"\"\"Multiply two z-series.\r\n\r\n Multiply two z-series to produce a z-series.\r\n\r\n Parameters\r\n ----------\r\n z1, z2 : 1-D ndarray\r\n The arrays must be 1-D but this is not checked.\r\n\r\n Returns\r\n -------\r\n product : 1-D ndarray\r\n The product z-series.\r\n\r\n Notes\r\n -----\r\n This is simply convolution. If symmetric/anti-symmetric z-series are\r\n denoted by S/A then the following rules apply:\r\n\r\n S*S, A*A -> S\r\n S*A, A*S -> A\r\n\r\n \"\"\"\r\n return np.convolve(z1, z2)\r\n\r\n\r\ndef _zseries_div(z1, z2):\r\n \"\"\"Divide the first z-series by the second.\r\n\r\n Divide `z1` by `z2` and return the quotient and remainder as z-series.\r\n Warning: this implementation only applies when both z1 and z2 have the\r\n same symmetry, which is sufficient for present purposes.\r\n\r\n Parameters\r\n ----------\r\n z1, z2 : 1-D ndarray\r\n The arrays must be 1-D and have the same symmetry, but this is not\r\n checked.\r\n\r\n Returns\r\n -------\r\n\r\n (quotient, remainder) : 1-D ndarrays\r\n Quotient and remainder as z-series.\r\n\r\n Notes\r\n -----\r\n This is not the same as polynomial division on account of the desired form\r\n of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A\r\n then the following rules apply:\r\n\r\n S/S -> S,S\r\n A/A -> S,A\r\n\r\n The restriction to types of the same symmetry could be fixed but seems like\r\n unneeded generality. There is no natural form for the remainder in the case\r\n where there is no symmetry.\r\n\r\n \"\"\"\r\n z1 = z1.copy()\r\n z2 = z2.copy()\r\n len1 = len(z1)\r\n len2 = len(z2)\r\n if len2 == 1:\r\n z1 /= z2\r\n return z1, z1[:1]*0\r\n elif len1 < len2:\r\n return z1[:1]*0, z1\r\n else:\r\n dlen = len1 - len2\r\n scl = z2[0]\r\n z2 /= scl\r\n quo = np.empty(dlen + 1, dtype=z1.dtype)\r\n i = 0\r\n j = dlen\r\n while i < j:\r\n r = z1[i]\r\n quo[i] = z1[i]\r\n quo[dlen - i] = r\r\n tmp = r*z2\r\n z1[i:i+len2] -= tmp\r\n z1[j:j+len2] -= tmp\r\n i += 1\r\n j -= 1\r\n r = z1[i]\r\n quo[i] = r\r\n tmp = r*z2\r\n z1[i:i+len2] -= tmp\r\n quo /= scl\r\n rem = z1[i+1:i-1+len2].copy()\r\n return quo, rem\r\n\r\n\r\ndef _zseries_der(zs):\r\n \"\"\"Differentiate a z-series.\r\n\r\n The derivative is with respect to x, not z. This is achieved using the\r\n chain rule and the value of dx/dz given in the module notes.\r\n\r\n Parameters\r\n ----------\r\n zs : z-series\r\n The z-series to differentiate.\r\n\r\n Returns\r\n -------\r\n derivative : z-series\r\n The derivative\r\n\r\n Notes\r\n -----\r\n The zseries for x (ns) has been multiplied by two in order to avoid\r\n using floats that are incompatible with Decimal and likely other\r\n specialized scalar types. This scaling has been compensated by\r\n multiplying the value of zs by two also so that the two cancels in the\r\n division.\r\n\r\n \"\"\"\r\n n = len(zs)//2\r\n ns = np.array([-1, 0, 1], dtype=zs.dtype)\r\n zs *= np.arange(-n, n+1)*2\r\n d, r = _zseries_div(zs, ns)\r\n return d\r\n\r\n\r\ndef _zseries_int(zs):\r\n \"\"\"Integrate a z-series.\r\n\r\n The integral is with respect to x, not z. This is achieved by a change\r\n of variable using dx/dz given in the module notes.\r\n\r\n Parameters\r\n ----------\r\n zs : z-series\r\n The z-series to integrate\r\n\r\n Returns\r\n -------\r\n integral : z-series\r\n The indefinite integral\r\n\r\n Notes\r\n -----\r\n The zseries for x (ns) has been multiplied by two in order to avoid\r\n using floats that are incompatible with Decimal and likely other\r\n specialized scalar types. This scaling has been compensated by\r\n dividing the resulting zs by two.\r\n\r\n \"\"\"\r\n n = 1 + len(zs)//2\r\n ns = np.array([-1, 0, 1], dtype=zs.dtype)\r\n zs = _zseries_mul(zs, ns)\r\n div = np.arange(-n, n+1)*2\r\n zs[:n] /= div[:n]\r\n zs[n+1:] /= div[n+1:]\r\n zs[n] = 0\r\n return zs\r\n\r\n#\r\n# Chebyshev series functions\r\n#\r\n\r\n\r\ndef poly2cheb(pol):\r\n \"\"\"\r\n Convert a polynomial to a Chebyshev series.\r\n\r\n Convert an array representing the coefficients of a polynomial (relative\r\n to the \"standard\" basis) ordered from lowest degree to highest, to an\r\n array of the coefficients of the equivalent Chebyshev series, ordered\r\n from lowest to highest degree.\r\n\r\n Parameters\r\n ----------\r\n pol : array_like\r\n 1-D array containing the polynomial coefficients\r\n\r\n Returns\r\n -------\r\n c : ndarray\r\n 1-D array containing the coefficients of the equivalent Chebyshev\r\n series.\r\n\r\n See Also\r\n --------\r\n cheb2poly\r\n\r\n Notes\r\n -----\r\n The easy way to do conversions between polynomial basis sets\r\n is to use the convert method of a class instance.\r\n\r\n Examples\r\n --------\r\n >>> from numpy import polynomial as P\r\n >>> p = P.Polynomial(range(4))\r\n >>> p\r\n Polynomial([ 0., 1., 2., 3.], [-1., 1.])\r\n >>> c = p.convert(kind=P.Chebyshev)\r\n >>> c\r\n Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])\r\n >>> P.poly2cheb(range(4))\r\n array([ 1. , 3.25, 1. , 0.75])\r\n\r\n \"\"\"\r\n [pol] = pu.as_series([pol])\r\n deg = len(pol) - 1\r\n res = 0\r\n for i in range(deg, -1, -1):\r\n res = chebadd(chebmulx(res), pol[i])\r\n return res\r\n\r\n\r\ndef cheb2poly(c):\r\n \"\"\"\r\n Convert a Chebyshev series to a polynomial.\r\n\r\n Convert an array representing the coefficients of a Chebyshev series,\r\n ordered from lowest degree to highest, to an array of the coefficients\r\n of the equivalent polynomial (relative to the \"standard\" basis) ordered\r\n from lowest to highest degree.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array containing the Chebyshev series coefficients, ordered\r\n from lowest order term to highest.\r\n\r\n Returns\r\n -------\r\n pol : ndarray\r\n 1-D array containing the coefficients of the equivalent polynomial\r\n (relative to the \"standard\" basis) ordered from lowest order term\r\n to highest.\r\n\r\n See Also\r\n --------\r\n poly2cheb\r\n\r\n Notes\r\n -----\r\n The easy way to do conversions between polynomial basis sets\r\n is to use the convert method of a class instance.\r\n\r\n Examples\r\n --------\r\n >>> from numpy import polynomial as P\r\n >>> c = P.Chebyshev(range(4))\r\n >>> c\r\n Chebyshev([ 0., 1., 2., 3.], [-1., 1.])\r\n >>> p = c.convert(kind=P.Polynomial)\r\n >>> p\r\n Polynomial([ -2., -8., 4., 12.], [-1., 1.])\r\n >>> P.cheb2poly(range(4))\r\n array([ -2., -8., 4., 12.])\r\n\r\n \"\"\"\r\n from .polynomial import polyadd, polysub, polymulx\r\n\r\n [c] = pu.as_series([c])\r\n n = len(c)\r\n if n < 3:\r\n return c\r\n else:\r\n c0 = c[-2]\r\n c1 = c[-1]\r\n # i is the current degree of c1\r\n for i in range(n - 1, 1, -1):\r\n tmp = c0\r\n c0 = polysub(c[i - 2], c1)\r\n c1 = polyadd(tmp, polymulx(c1)*2)\r\n return polyadd(c0, polymulx(c1))\r\n\r\n\r\n#\r\n# These are constant arrays are of integer type so as to be compatible\r\n# with the widest range of other types, such as Decimal.\r\n#\r\n\r\n# Chebyshev default domain.\r\nchebdomain = np.array([-1, 1])\r\n\r\n# Chebyshev coefficients representing zero.\r\nchebzero = np.array([0])\r\n\r\n# Chebyshev coefficients representing one.\r\nchebone = np.array([1])\r\n\r\n# Chebyshev coefficients representing the identity x.\r\nchebx = np.array([0, 1])\r\n\r\n\r\ndef chebline(off, scl):\r\n \"\"\"\r\n Chebyshev series whose graph is a straight line.\r\n\r\n\r\n\r\n Parameters\r\n ----------\r\n off, scl : scalars\r\n The specified line is given by ``off + scl*x``.\r\n\r\n Returns\r\n -------\r\n y : ndarray\r\n This module's representation of the Chebyshev series for\r\n ``off + scl*x``.\r\n\r\n See Also\r\n --------\r\n polyline\r\n\r\n Examples\r\n --------\r\n >>> import numpy.polynomial.chebyshev as C\r\n >>> C.chebline(3,2)\r\n array([3, 2])\r\n >>> C.chebval(-3, C.chebline(3,2)) # should be -3\r\n -3.0\r\n\r\n \"\"\"\r\n if scl != 0:\r\n return np.array([off, scl])\r\n else:\r\n return np.array([off])\r\n\r\n\r\ndef chebfromroots(roots):\r\n \"\"\"\r\n Generate a Chebyshev series with given roots.\r\n\r\n The function returns the coefficients of the polynomial\r\n\r\n .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),\r\n\r\n in Chebyshev form, where the `r_n` are the roots specified in `roots`.\r\n If a zero has multiplicity n, then it must appear in `roots` n times.\r\n For instance, if 2 is a root of multiplicity three and 3 is a root of\r\n multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The\r\n roots can appear in any order.\r\n\r\n If the returned coefficients are `c`, then\r\n\r\n .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)\r\n\r\n The coefficient of the last term is not generally 1 for monic\r\n polynomials in Chebyshev form.\r\n\r\n Parameters\r\n ----------\r\n roots : array_like\r\n Sequence containing the roots.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n 1-D array of coefficients. If all roots are real then `out` is a\r\n real array, if some of the roots are complex, then `out` is complex\r\n even if all the coefficients in the result are real (see Examples\r\n below).\r\n\r\n See Also\r\n --------\r\n polyfromroots, legfromroots, lagfromroots, hermfromroots,\r\n hermefromroots.\r\n\r\n Examples\r\n --------\r\n >>> import numpy.polynomial.chebyshev as C\r\n >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis\r\n array([ 0. , -0.25, 0. , 0.25])\r\n >>> j = complex(0,1)\r\n >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis\r\n array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])\r\n\r\n \"\"\"\r\n if len(roots) == 0:\r\n return np.ones(1)\r\n else:\r\n [roots] = pu.as_series([roots], trim=False)\r\n roots.sort()\r\n p = [chebline(-r, 1) for r in roots]\r\n n = len(p)\r\n while n > 1:\r\n m, r = divmod(n, 2)\r\n tmp = [chebmul(p[i], p[i+m]) for i in range(m)]\r\n if r:\r\n tmp[0] = chebmul(tmp[0], p[-1])\r\n p = tmp\r\n n = m\r\n return p[0]\r\n\r\n\r\ndef chebadd(c1, c2):\r\n \"\"\"\r\n Add one Chebyshev series to another.\r\n\r\n Returns the sum of two Chebyshev series `c1` + `c2`. The arguments\r\n are sequences of coefficients ordered from lowest order term to\r\n highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of Chebyshev series coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Array representing the Chebyshev series of their sum.\r\n\r\n See Also\r\n --------\r\n chebsub, chebmul, chebdiv, chebpow\r\n\r\n Notes\r\n -----\r\n Unlike multiplication, division, etc., the sum of two Chebyshev series\r\n is a Chebyshev series (without having to \"reproject\" the result onto\r\n the basis set) so addition, just like that of \"standard\" polynomials,\r\n is simply \"component-wise.\"\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> C.chebadd(c1,c2)\r\n array([ 4., 4., 4.])\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] += c2\r\n ret = c1\r\n else:\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef chebsub(c1, c2):\r\n \"\"\"\r\n Subtract one Chebyshev series from another.\r\n\r\n Returns the difference of two Chebyshev series `c1` - `c2`. The\r\n sequences of coefficients are from lowest order term to highest, i.e.,\r\n [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of Chebyshev series coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Of Chebyshev series coefficients representing their difference.\r\n\r\n See Also\r\n --------\r\n chebadd, chebmul, chebdiv, chebpow\r\n\r\n Notes\r\n -----\r\n Unlike multiplication, division, etc., the difference of two Chebyshev\r\n series is a Chebyshev series (without having to \"reproject\" the result\r\n onto the basis set) so subtraction, just like that of \"standard\"\r\n polynomials, is simply \"component-wise.\"\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> C.chebsub(c1,c2)\r\n array([-2., 0., 2.])\r\n >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)\r\n array([ 2., 0., -2.])\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] -= c2\r\n ret = c1\r\n else:\r\n c2 = -c2\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef chebmulx(c):\r\n \"\"\"Multiply a Chebyshev series by x.\r\n\r\n Multiply the polynomial `c` by x, where x is the independent\r\n variable.\r\n\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of Chebyshev series coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Array representing the result of the multiplication.\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n # The zero series needs special treatment\r\n if len(c) == 1 and c[0] == 0:\r\n return c\r\n\r\n prd = np.empty(len(c) + 1, dtype=c.dtype)\r\n prd[0] = c[0]*0\r\n prd[1] = c[0]\r\n if len(c) > 1:\r\n tmp = c[1:]/2\r\n prd[2:] = tmp\r\n prd[0:-2] += tmp\r\n return prd\r\n\r\n\r\ndef chebmul(c1, c2):\r\n \"\"\"\r\n Multiply one Chebyshev series by another.\r\n\r\n Returns the product of two Chebyshev series `c1` * `c2`. The arguments\r\n are sequences of coefficients, from lowest order \"term\" to highest,\r\n e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of Chebyshev series coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Of Chebyshev series coefficients representing their product.\r\n\r\n See Also\r\n --------\r\n chebadd, chebsub, chebdiv, chebpow\r\n\r\n Notes\r\n -----\r\n In general, the (polynomial) product of two C-series results in terms\r\n that are not in the Chebyshev polynomial basis set. Thus, to express\r\n the product as a C-series, it is typically necessary to \"reproject\"\r\n the product onto said basis set, which typically produces\r\n \"unintuitive live\" (but correct) results; see Examples section below.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> C.chebmul(c1,c2) # multiplication requires \"reprojection\"\r\n array([ 6.5, 12. , 12. , 4. , 1.5])\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n z1 = _cseries_to_zseries(c1)\r\n z2 = _cseries_to_zseries(c2)\r\n prd = _zseries_mul(z1, z2)\r\n ret = _zseries_to_cseries(prd)\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef chebdiv(c1, c2):\r\n \"\"\"\r\n Divide one Chebyshev series by another.\r\n\r\n Returns the quotient-with-remainder of two Chebyshev series\r\n `c1` / `c2`. The arguments are sequences of coefficients from lowest\r\n order \"term\" to highest, e.g., [1,2,3] represents the series\r\n ``T_0 + 2*T_1 + 3*T_2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of Chebyshev series coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n [quo, rem] : ndarrays\r\n Of Chebyshev series coefficients representing the quotient and\r\n remainder.\r\n\r\n See Also\r\n --------\r\n chebadd, chebsub, chebmul, chebpow\r\n\r\n Notes\r\n -----\r\n In general, the (polynomial) division of one C-series by another\r\n results in quotient and remainder terms that are not in the Chebyshev\r\n polynomial basis set. Thus, to express these results as C-series, it\r\n is typically necessary to \"reproject\" the results onto said basis\r\n set, which typically produces \"unintuitive\" (but correct) results;\r\n see Examples section below.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> C.chebdiv(c1,c2) # quotient \"intuitive,\" remainder not\r\n (array([ 3.]), array([-8., -4.]))\r\n >>> c2 = (0,1,2,3)\r\n >>> C.chebdiv(c2,c1) # neither \"intuitive\"\r\n (array([ 0., 2.]), array([-2., -4.]))\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if c2[-1] == 0:\r\n raise ZeroDivisionError()\r\n\r\n lc1 = len(c1)\r\n lc2 = len(c2)\r\n if lc1 < lc2:\r\n return c1[:1]*0, c1\r\n elif lc2 == 1:\r\n return c1/c2[-1], c1[:1]*0\r\n else:\r\n z1 = _cseries_to_zseries(c1)\r\n z2 = _cseries_to_zseries(c2)\r\n quo, rem = _zseries_div(z1, z2)\r\n quo = pu.trimseq(_zseries_to_cseries(quo))\r\n rem = pu.trimseq(_zseries_to_cseries(rem))\r\n return quo, rem\r\n\r\n\r\ndef chebpow(c, pow, maxpower=16):\r\n \"\"\"Raise a Chebyshev series to a power.\r\n\r\n Returns the Chebyshev series `c` raised to the power `pow`. The\r\n argument `c` is a sequence of coefficients ordered from low to high.\r\n i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of Chebyshev series coefficients ordered from low to\r\n high.\r\n pow : integer\r\n Power to which the series will be raised\r\n maxpower : integer, optional\r\n Maximum power allowed. This is mainly to limit growth of the series\r\n to unmanageable size. Default is 16\r\n\r\n Returns\r\n -------\r\n coef : ndarray\r\n Chebyshev series of power.\r\n\r\n See Also\r\n --------\r\n chebadd, chebsub, chebmul, chebdiv\r\n\r\n Examples\r\n --------\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n power = int(pow)\r\n if power != pow or power < 0:\r\n raise ValueError(\"Power must be a non-negative integer.\")\r\n elif maxpower is not None and power > maxpower:\r\n raise ValueError(\"Power is too large\")\r\n elif power == 0:\r\n return np.array([1], dtype=c.dtype)\r\n elif power == 1:\r\n return c\r\n else:\r\n # This can be made more efficient by using powers of two\r\n # in the usual way.\r\n zs = _cseries_to_zseries(c)\r\n prd = zs\r\n for i in range(2, power + 1):\r\n prd = np.convolve(prd, zs)\r\n return _zseries_to_cseries(prd)\r\n\r\n\r\ndef chebder(c, m=1, scl=1, axis=0):\r\n \"\"\"\r\n Differentiate a Chebyshev series.\r\n\r\n Returns the Chebyshev series coefficients `c` differentiated `m` times\r\n along `axis`. At each iteration the result is multiplied by `scl` (the\r\n scaling factor is for use in a linear change of variable). The argument\r\n `c` is an array of coefficients from low to high degree along each\r\n axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``\r\n while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +\r\n 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is\r\n ``y``.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n Array of Chebyshev series coefficients. If c is multidimensional\r\n the different axis correspond to different variables with the\r\n degree in each axis given by the corresponding index.\r\n m : int, optional\r\n Number of derivatives taken, must be non-negative. (Default: 1)\r\n scl : scalar, optional\r\n Each differentiation is multiplied by `scl`. The end result is\r\n multiplication by ``scl**m``. This is for use in a linear change of\r\n variable. (Default: 1)\r\n axis : int, optional\r\n Axis over which the derivative is taken. (Default: 0).\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n der : ndarray\r\n Chebyshev series of the derivative.\r\n\r\n See Also\r\n --------\r\n chebint\r\n\r\n Notes\r\n -----\r\n In general, the result of differentiating a C-series needs to be\r\n \"reprojected\" onto the C-series basis set. Thus, typically, the\r\n result of this function is \"unintuitive,\" albeit correct; see Examples\r\n section below.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c = (1,2,3,4)\r\n >>> C.chebder(c)\r\n array([ 14., 12., 24.])\r\n >>> C.chebder(c,3)\r\n array([ 96.])\r\n >>> C.chebder(c,scl=-1)\r\n array([-14., -12., -24.])\r\n >>> C.chebder(c,2,-1)\r\n array([ 12., 96.])\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=1)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n c = c.astype(np.double)\r\n cnt, iaxis = [int(t) for t in [m, axis]]\r\n\r\n if cnt != m:\r\n raise ValueError(\"The order of derivation must be integer\")\r\n if cnt < 0:\r\n raise ValueError(\"The order of derivation must be non-negative\")\r\n if iaxis != axis:\r\n raise ValueError(\"The axis must be integer\")\r\n if not -c.ndim <= iaxis < c.ndim:\r\n raise ValueError(\"The axis is out of range\")\r\n if iaxis < 0:\r\n iaxis += c.ndim\r\n\r\n if cnt == 0:\r\n return c\r\n\r\n c = np.rollaxis(c, iaxis)\r\n n = len(c)\r\n if cnt >= n:\r\n c = c[:1]*0\r\n else:\r\n for i in range(cnt):\r\n n = n - 1\r\n c *= scl\r\n der = np.empty((n,) + c.shape[1:], dtype=c.dtype)\r\n for j in range(n, 2, -1):\r\n der[j - 1] = (2*j)*c[j]\r\n c[j - 2] += (j*c[j])/(j - 2)\r\n if n > 1:\r\n der[1] = 4*c[2]\r\n der[0] = c[1]\r\n c = der\r\n c = np.rollaxis(c, 0, iaxis + 1)\r\n return c\r\n\r\n\r\ndef chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):\r\n \"\"\"\r\n Integrate a Chebyshev series.\r\n\r\n Returns the Chebyshev series coefficients `c` integrated `m` times from\r\n `lbnd` along `axis`. At each iteration the resulting series is\r\n **multiplied** by `scl` and an integration constant, `k`, is added.\r\n The scaling factor is for use in a linear change of variable. (\"Buyer\r\n beware\": note that, depending on what one is doing, one may want `scl`\r\n to be the reciprocal of what one might expect; for more information,\r\n see the Notes section below.) The argument `c` is an array of\r\n coefficients from low to high degree along each axis, e.g., [1,2,3]\r\n represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]\r\n represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +\r\n 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n Array of Chebyshev series coefficients. If c is multidimensional\r\n the different axis correspond to different variables with the\r\n degree in each axis given by the corresponding index.\r\n m : int, optional\r\n Order of integration, must be positive. (Default: 1)\r\n k : {[], list, scalar}, optional\r\n Integration constant(s). The value of the first integral at zero\r\n is the first value in the list, the value of the second integral\r\n at zero is the second value, etc. If ``k == []`` (the default),\r\n all constants are set to zero. If ``m == 1``, a single scalar can\r\n be given instead of a list.\r\n lbnd : scalar, optional\r\n The lower bound of the integral. (Default: 0)\r\n scl : scalar, optional\r\n Following each integration the result is *multiplied* by `scl`\r\n before the integration constant is added. (Default: 1)\r\n axis : int, optional\r\n Axis over which the integral is taken. (Default: 0).\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n S : ndarray\r\n C-series coefficients of the integral.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or\r\n ``np.isscalar(scl) == False``.\r\n\r\n See Also\r\n --------\r\n chebder\r\n\r\n Notes\r\n -----\r\n Note that the result of each integration is *multiplied* by `scl`.\r\n Why is this important to note? Say one is making a linear change of\r\n variable :math:`u = ax + b` in an integral relative to `x`. Then\r\n .. math::`dx = du/a`, so one will need to set `scl` equal to\r\n :math:`1/a`- perhaps not what one would have first thought.\r\n\r\n Also note that, in general, the result of integrating a C-series needs\r\n to be \"reprojected\" onto the C-series basis set. Thus, typically,\r\n the result of this function is \"unintuitive,\" albeit correct; see\r\n Examples section below.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import chebyshev as C\r\n >>> c = (1,2,3)\r\n >>> C.chebint(c)\r\n array([ 0.5, -0.5, 0.5, 0.5])\r\n >>> C.chebint(c,3)\r\n array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,\r\n 0.00625 ])\r\n >>> C.chebint(c, k=3)\r\n array([ 3.5, -0.5, 0.5, 0.5])\r\n >>> C.chebint(c,lbnd=-2)\r\n array([ 8.5, -0.5, 0.5, 0.5])\r\n >>> C.chebint(c,scl=-2)\r\n array([-1., 1., -1., -1.])\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=1)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n c = c.astype(np.double)\r\n if not np.iterable(k):\r\n k = [k]\r\n cnt, iaxis = [int(t) for t in [m, axis]]\r\n\r\n if cnt != m:\r\n raise ValueError(\"The order of integration must be integer\")\r\n if cnt < 0:\r\n raise ValueError(\"The order of integration must be non-negative\")\r\n if len(k) > cnt:\r\n raise ValueError(\"Too many integration constants\")\r\n if iaxis != axis:\r\n raise ValueError(\"The axis must be integer\")\r\n if not -c.ndim <= iaxis < c.ndim:\r\n raise ValueError(\"The axis is out of range\")\r\n if iaxis < 0:\r\n iaxis += c.ndim\r\n\r\n if cnt == 0:\r\n return c\r\n\r\n c = np.rollaxis(c, iaxis)\r\n k = list(k) + [0]*(cnt - len(k))\r\n for i in range(cnt):\r\n n = len(c)\r\n c *= scl\r\n if n == 1 and np.all(c[0] == 0):\r\n c[0] += k[i]\r\n else:\r\n tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)\r\n tmp[0] = c[0]*0\r\n tmp[1] = c[0]\r\n if n > 1:\r\n tmp[2] = c[1]/4\r\n for j in range(2, n):\r\n t = c[j]/(2*j + 1)\r\n tmp[j + 1] = c[j]/(2*(j + 1))\r\n tmp[j - 1] -= c[j]/(2*(j - 1))\r\n tmp[0] += k[i] - chebval(lbnd, tmp)\r\n c = tmp\r\n c = np.rollaxis(c, 0, iaxis + 1)\r\n return c\r\n\r\n\r\ndef chebval(x, c, tensor=True):\r\n \"\"\"\r\n Evaluate a Chebyshev series at points x.\r\n\r\n If `c` is of length `n + 1`, this function returns the value:\r\n\r\n .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)\r\n\r\n The parameter `x` is converted to an array only if it is a tuple or a\r\n list, otherwise it is treated as a scalar. In either case, either `x`\r\n or its elements must support multiplication and addition both with\r\n themselves and with the elements of `c`.\r\n\r\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\r\n `c` is multidimensional, then the shape of the result depends on the\r\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\r\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\r\n scalars have shape (,).\r\n\r\n Trailing zeros in the coefficients will be used in the evaluation, so\r\n they should be avoided if efficiency is a concern.\r\n\r\n Parameters\r\n ----------\r\n x : array_like, compatible object\r\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\r\n it is left unchanged and treated as a scalar. In either case, `x`\r\n or its elements must support addition and multiplication with\r\n with themselves and with the elements of `c`.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficients for terms of\r\n degree n are contained in c[n]. If `c` is multidimensional the\r\n remaining indices enumerate multiple polynomials. In the two\r\n dimensional case the coefficients may be thought of as stored in\r\n the columns of `c`.\r\n tensor : boolean, optional\r\n If True, the shape of the coefficient array is extended with ones\r\n on the right, one for each dimension of `x`. Scalars have dimension 0\r\n for this action. The result is that every column of coefficients in\r\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\r\n over the columns of `c` for the evaluation. This keyword is useful\r\n when `c` is multidimensional. The default value is True.\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n values : ndarray, algebra_like\r\n The shape of the return value is described above.\r\n\r\n See Also\r\n --------\r\n chebval2d, chebgrid2d, chebval3d, chebgrid3d\r\n\r\n Notes\r\n -----\r\n The evaluation uses Clenshaw recursion, aka synthetic division.\r\n\r\n Examples\r\n --------\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=1)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n c = c.astype(np.double)\r\n if isinstance(x, (tuple, list)):\r\n x = np.asarray(x)\r\n if isinstance(x, np.ndarray) and tensor:\r\n c = c.reshape(c.shape + (1,)*x.ndim)\r\n\r\n if len(c) == 1:\r\n c0 = c[0]\r\n c1 = 0\r\n elif len(c) == 2:\r\n c0 = c[0]\r\n c1 = c[1]\r\n else:\r\n x2 = 2*x\r\n c0 = c[-2]\r\n c1 = c[-1]\r\n for i in range(3, len(c) + 1):\r\n tmp = c0\r\n c0 = c[-i] - c1\r\n c1 = tmp + c1*x2\r\n return c0 + c1*x\r\n\r\n\r\ndef chebval2d(x, y, c):\r\n \"\"\"\r\n Evaluate a 2-D Chebyshev series at points (x, y).\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(x,y) = \\\\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)\r\n\r\n The parameters `x` and `y` are converted to arrays only if they are\r\n tuples or a lists, otherwise they are treated as a scalars and they\r\n must have the same shape after conversion. In either case, either `x`\r\n and `y` or their elements must support multiplication and addition both\r\n with themselves and with the elements of `c`.\r\n\r\n If `c` is a 1-D array a one is implicitly appended to its shape to make\r\n it 2-D. The shape of the result will be c.shape[2:] + x.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like, compatible objects\r\n The two dimensional series is evaluated at the points `(x, y)`,\r\n where `x` and `y` must have the same shape. If `x` or `y` is a list\r\n or tuple, it is first converted to an ndarray, otherwise it is left\r\n unchanged and if it isn't an ndarray it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficient of the term\r\n of multi-degree i,j is contained in ``c[i,j]``. If `c` has\r\n dimension greater than 2 the remaining indices enumerate multiple\r\n sets of coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional Chebyshev series at points formed\r\n from pairs of corresponding values from `x` and `y`.\r\n\r\n See Also\r\n --------\r\n chebval, chebgrid2d, chebval3d, chebgrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n try:\r\n x, y = np.array((x, y), copy=0)\r\n except:\r\n raise ValueError('x, y are incompatible')\r\n\r\n c = chebval(x, c)\r\n c = chebval(y, c, tensor=False)\r\n return c\r\n\r\n\r\ndef chebgrid2d(x, y, c):\r\n \"\"\"\r\n Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),\r\n\r\n where the points `(a, b)` consist of all pairs formed by taking\r\n `a` from `x` and `b` from `y`. The resulting points form a grid with\r\n `x` in the first dimension and `y` in the second.\r\n\r\n The parameters `x` and `y` are converted to arrays only if they are\r\n tuples or a lists, otherwise they are treated as a scalars. In either\r\n case, either `x` and `y` or their elements must support multiplication\r\n and addition both with themselves and with the elements of `c`.\r\n\r\n If `c` has fewer than two dimensions, ones are implicitly appended to\r\n its shape to make it 2-D. The shape of the result will be c.shape[2:] +\r\n x.shape + y.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like, compatible objects\r\n The two dimensional series is evaluated at the points in the\r\n Cartesian product of `x` and `y`. If `x` or `y` is a list or\r\n tuple, it is first converted to an ndarray, otherwise it is left\r\n unchanged and, if it isn't an ndarray, it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficient of the term of\r\n multi-degree i,j is contained in `c[i,j]`. If `c` has dimension\r\n greater than two the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional Chebyshev series at points in the\r\n Cartesian product of `x` and `y`.\r\n\r\n See Also\r\n --------\r\n chebval, chebval2d, chebval3d, chebgrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n c = chebval(x, c)\r\n c = chebval(y, c)\r\n return c\r\n\r\n\r\ndef chebval3d(x, y, z, c):\r\n \"\"\"\r\n Evaluate a 3-D Chebyshev series at points (x, y, z).\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(x,y,z) = \\\\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)\r\n\r\n The parameters `x`, `y`, and `z` are converted to arrays only if\r\n they are tuples or a lists, otherwise they are treated as a scalars and\r\n they must have the same shape after conversion. In either case, either\r\n `x`, `y`, and `z` or their elements must support multiplication and\r\n addition both with themselves and with the elements of `c`.\r\n\r\n If `c` has fewer than 3 dimensions, ones are implicitly appended to its\r\n shape to make it 3-D. The shape of the result will be c.shape[3:] +\r\n x.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like, compatible object\r\n The three dimensional series is evaluated at the points\r\n `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If\r\n any of `x`, `y`, or `z` is a list or tuple, it is first converted\r\n to an ndarray, otherwise it is left unchanged and if it isn't an\r\n ndarray it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficient of the term of\r\n multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension\r\n greater than 3 the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the multidimensional polynomial on points formed with\r\n triples of corresponding values from `x`, `y`, and `z`.\r\n\r\n See Also\r\n --------\r\n chebval, chebval2d, chebgrid2d, chebgrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n try:\r\n x, y, z = np.array((x, y, z), copy=0)\r\n except:\r\n raise ValueError('x, y, z are incompatible')\r\n\r\n c = chebval(x, c)\r\n c = chebval(y, c, tensor=False)\r\n c = chebval(z, c, tensor=False)\r\n return c\r\n\r\n\r\ndef chebgrid3d(x, y, z, c):\r\n \"\"\"\r\n Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(a,b,c) = \\\\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)\r\n\r\n where the points `(a, b, c)` consist of all triples formed by taking\r\n `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form\r\n a grid with `x` in the first dimension, `y` in the second, and `z` in\r\n the third.\r\n\r\n The parameters `x`, `y`, and `z` are converted to arrays only if they\r\n are tuples or a lists, otherwise they are treated as a scalars. In\r\n either case, either `x`, `y`, and `z` or their elements must support\r\n multiplication and addition both with themselves and with the elements\r\n of `c`.\r\n\r\n If `c` has fewer than three dimensions, ones are implicitly appended to\r\n its shape to make it 3-D. The shape of the result will be c.shape[3:] +\r\n x.shape + y.shape + z.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like, compatible objects\r\n The three dimensional series is evaluated at the points in the\r\n Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a\r\n list or tuple, it is first converted to an ndarray, otherwise it is\r\n left unchanged and, if it isn't an ndarray, it is treated as a\r\n scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficients for terms of\r\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\r\n greater than two the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional polynomial at points in the Cartesian\r\n product of `x` and `y`.\r\n\r\n See Also\r\n --------\r\n chebval, chebval2d, chebgrid2d, chebval3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n c = chebval(x, c)\r\n c = chebval(y, c)\r\n c = chebval(z, c)\r\n return c\r\n\r\n\r\ndef chebvander(x, deg):\r\n \"\"\"Pseudo-Vandermonde matrix of given degree.\r\n\r\n Returns the pseudo-Vandermonde matrix of degree `deg` and sample points\r\n `x`. The pseudo-Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., i] = T_i(x),\r\n\r\n where `0 <= i <= deg`. The leading indices of `V` index the elements of\r\n `x` and the last index is the degree of the Chebyshev polynomial.\r\n\r\n If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the\r\n matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and\r\n ``chebval(x, c)`` are the same up to roundoff. This equivalence is\r\n useful both for least squares fitting and for the evaluation of a large\r\n number of Chebyshev series of the same degree and sample points.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Array of points. The dtype is converted to float64 or complex128\r\n depending on whether any of the elements are complex. If `x` is\r\n scalar it is converted to a 1-D array.\r\n deg : int\r\n Degree of the resulting matrix.\r\n\r\n Returns\r\n -------\r\n vander : ndarray\r\n The pseudo Vandermonde matrix. The shape of the returned matrix is\r\n ``x.shape + (deg + 1,)``, where The last index is the degree of the\r\n corresponding Chebyshev polynomial. The dtype will be the same as\r\n the converted `x`.\r\n\r\n \"\"\"\r\n ideg = int(deg)\r\n if ideg != deg:\r\n raise ValueError(\"deg must be integer\")\r\n if ideg < 0:\r\n raise ValueError(\"deg must be non-negative\")\r\n\r\n x = np.array(x, copy=0, ndmin=1) + 0.0\r\n dims = (ideg + 1,) + x.shape\r\n dtyp = x.dtype\r\n v = np.empty(dims, dtype=dtyp)\r\n # Use forward recursion to generate the entries.\r\n v[0] = x*0 + 1\r\n if ideg > 0:\r\n x2 = 2*x\r\n v[1] = x\r\n for i in range(2, ideg + 1):\r\n v[i] = v[i-1]*x2 - v[i-2]\r\n return np.rollaxis(v, 0, v.ndim)\r\n\r\n\r\ndef chebvander2d(x, y, deg):\r\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\r\n\r\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\r\n points `(x, y)`. The pseudo-Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),\r\n\r\n where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of\r\n `V` index the points `(x, y)` and the last index encodes the degrees of\r\n the Chebyshev polynomials.\r\n\r\n If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`\r\n correspond to the elements of a 2-D coefficient array `c` of shape\r\n (xdeg + 1, ydeg + 1) in the order\r\n\r\n .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...\r\n\r\n and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same\r\n up to roundoff. This equivalence is useful both for least squares\r\n fitting and for the evaluation of a large number of 2-D Chebyshev\r\n series of the same degrees and sample points.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like\r\n Arrays of point coordinates, all of the same shape. The dtypes\r\n will be converted to either float64 or complex128 depending on\r\n whether any of the elements are complex. Scalars are converted to\r\n 1-D arrays.\r\n deg : list of ints\r\n List of maximum degrees of the form [x_deg, y_deg].\r\n\r\n Returns\r\n -------\r\n vander2d : ndarray\r\n The shape of the returned matrix is ``x.shape + (order,)``, where\r\n :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same\r\n as the converted `x` and `y`.\r\n\r\n See Also\r\n --------\r\n chebvander, chebvander3d. chebval2d, chebval3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n ideg = [int(d) for d in deg]\r\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\r\n if is_valid != [1, 1]:\r\n raise ValueError(\"degrees must be non-negative integers\")\r\n degx, degy = ideg\r\n x, y = np.array((x, y), copy=0) + 0.0\r\n\r\n vx = chebvander(x, degx)\r\n vy = chebvander(y, degy)\r\n v = vx[..., None]*vy[..., None,:]\r\n return v.reshape(v.shape[:-2] + (-1,))\r\n\r\n\r\ndef chebvander3d(x, y, z, deg):\r\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\r\n\r\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\r\n points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,\r\n then The pseudo-Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),\r\n\r\n where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading\r\n indices of `V` index the points `(x, y, z)` and the last index encodes\r\n the degrees of the Chebyshev polynomials.\r\n\r\n If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns\r\n of `V` correspond to the elements of a 3-D coefficient array `c` of\r\n shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order\r\n\r\n .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...\r\n\r\n and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the\r\n same up to roundoff. This equivalence is useful both for least squares\r\n fitting and for the evaluation of a large number of 3-D Chebyshev\r\n series of the same degrees and sample points.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like\r\n Arrays of point coordinates, all of the same shape. The dtypes will\r\n be converted to either float64 or complex128 depending on whether\r\n any of the elements are complex. Scalars are converted to 1-D\r\n arrays.\r\n deg : list of ints\r\n List of maximum degrees of the form [x_deg, y_deg, z_deg].\r\n\r\n Returns\r\n -------\r\n vander3d : ndarray\r\n The shape of the returned matrix is ``x.shape + (order,)``, where\r\n :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will\r\n be the same as the converted `x`, `y`, and `z`.\r\n\r\n See Also\r\n --------\r\n chebvander, chebvander3d. chebval2d, chebval3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n ideg = [int(d) for d in deg]\r\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\r\n if is_valid != [1, 1, 1]:\r\n raise ValueError(\"degrees must be non-negative integers\")\r\n degx, degy, degz = ideg\r\n x, y, z = np.array((x, y, z), copy=0) + 0.0\r\n\r\n vx = chebvander(x, degx)\r\n vy = chebvander(y, degy)\r\n vz = chebvander(z, degz)\r\n v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]\r\n return v.reshape(v.shape[:-3] + (-1,))\r\n\r\n\r\ndef chebfit(x, y, deg, rcond=None, full=False, w=None):\r\n \"\"\"\r\n Least squares fit of Chebyshev series to data.\r\n\r\n Return the coefficients of a Legendre series of degree `deg` that is the\r\n least squares fit to the data values `y` given at points `x`. If `y` is\r\n 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple\r\n fits are done, one for each column of `y`, and the resulting\r\n coefficients are stored in the corresponding columns of a 2-D return.\r\n The fitted polynomial(s) are in the form\r\n\r\n .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),\r\n\r\n where `n` is `deg`.\r\n\r\n Parameters\r\n ----------\r\n x : array_like, shape (M,)\r\n x-coordinates of the M sample points ``(x[i], y[i])``.\r\n y : array_like, shape (M,) or (M, K)\r\n y-coordinates of the sample points. Several data sets of sample\r\n points sharing the same x-coordinates can be fitted at once by\r\n passing in a 2D-array that contains one dataset per column.\r\n deg : int or 1-D array_like\r\n Degree(s) of the fitting polynomials. If `deg` is a single integer\r\n all terms up to and including the `deg`'th term are included in the\r\n fit. For Numpy versions >= 1.11 a list of integers specifying the\r\n degrees of the terms to include may be used instead.\r\n rcond : float, optional\r\n Relative condition number of the fit. Singular values smaller than\r\n this relative to the largest singular value will be ignored. The\r\n default value is len(x)*eps, where eps is the relative precision of\r\n the float type, about 2e-16 in most cases.\r\n full : bool, optional\r\n Switch determining nature of return value. When it is False (the\r\n default) just the coefficients are returned, when True diagnostic\r\n information from the singular value decomposition is also returned.\r\n w : array_like, shape (`M`,), optional\r\n Weights. If not None, the contribution of each point\r\n ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the\r\n weights are chosen so that the errors of the products ``w[i]*y[i]``\r\n all have the same variance. The default value is None.\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n Returns\r\n -------\r\n coef : ndarray, shape (M,) or (M, K)\r\n Chebyshev coefficients ordered from low to high. If `y` was 2-D,\r\n the coefficients for the data in column k of `y` are in column\r\n `k`.\r\n\r\n [residuals, rank, singular_values, rcond] : list\r\n These values are only returned if `full` = True\r\n\r\n resid -- sum of squared residuals of the least squares fit\r\n rank -- the numerical rank of the scaled Vandermonde matrix\r\n sv -- singular values of the scaled Vandermonde matrix\r\n rcond -- value of `rcond`.\r\n\r\n For more details, see `linalg.lstsq`.\r\n\r\n Warns\r\n -----\r\n RankWarning\r\n The rank of the coefficient matrix in the least-squares fit is\r\n deficient. The warning is only raised if `full` = False. The\r\n warnings can be turned off by\r\n\r\n >>> import warnings\r\n >>> warnings.simplefilter('ignore', RankWarning)\r\n\r\n See Also\r\n --------\r\n polyfit, legfit, lagfit, hermfit, hermefit\r\n chebval : Evaluates a Chebyshev series.\r\n chebvander : Vandermonde matrix of Chebyshev series.\r\n chebweight : Chebyshev weight function.\r\n linalg.lstsq : Computes a least-squares fit from the matrix.\r\n scipy.interpolate.UnivariateSpline : Computes spline fits.\r\n\r\n Notes\r\n -----\r\n The solution is the coefficients of the Chebyshev series `p` that\r\n minimizes the sum of the weighted squared errors\r\n\r\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\r\n\r\n where :math:`w_j` are the weights. This problem is solved by setting up\r\n as the (typically) overdetermined matrix equation\r\n\r\n .. math:: V(x) * c = w * y,\r\n\r\n where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the\r\n coefficients to be solved for, `w` are the weights, and `y` are the\r\n observed values. This equation is then solved using the singular value\r\n decomposition of `V`.\r\n\r\n If some of the singular values of `V` are so small that they are\r\n neglected, then a `RankWarning` will be issued. This means that the\r\n coefficient values may be poorly determined. Using a lower order fit\r\n will usually get rid of the warning. The `rcond` parameter can also be\r\n set to a value smaller than its default, but the resulting fit may be\r\n spurious and have large contributions from roundoff error.\r\n\r\n Fits using Chebyshev series are usually better conditioned than fits\r\n using power series, but much can depend on the distribution of the\r\n sample points and the smoothness of the data. If the quality of the fit\r\n is inadequate splines may be a good alternative.\r\n\r\n References\r\n ----------\r\n .. [1] Wikipedia, \"Curve fitting\",\r\n http://en.wikipedia.org/wiki/Curve_fitting\r\n\r\n Examples\r\n --------\r\n\r\n \"\"\"\r\n x = np.asarray(x) + 0.0\r\n y = np.asarray(y) + 0.0\r\n deg = np.asarray(deg)\r\n\r\n # check arguments.\r\n if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:\r\n raise TypeError(\"deg must be an int or non-empty 1-D array of int\")\r\n if deg.min() < 0:\r\n raise ValueError(\"expected deg >= 0\")\r\n if x.ndim != 1:\r\n raise TypeError(\"expected 1D vector for x\")\r\n if x.size == 0:\r\n raise TypeError(\"expected non-empty vector for x\")\r\n if y.ndim < 1 or y.ndim > 2:\r\n raise TypeError(\"expected 1D or 2D array for y\")\r\n if len(x) != len(y):\r\n raise TypeError(\"expected x and y to have same length\")\r\n\r\n if deg.ndim == 0:\r\n lmax = deg\r\n order = lmax + 1\r\n van = chebvander(x, lmax)\r\n else:\r\n deg = np.sort(deg)\r\n lmax = deg[-1]\r\n order = len(deg)\r\n van = chebvander(x, lmax)[:, deg]\r\n\r\n # set up the least squares matrices in transposed form\r\n lhs = van.T\r\n rhs = y.T\r\n if w is not None:\r\n w = np.asarray(w) + 0.0\r\n if w.ndim != 1:\r\n raise TypeError(\"expected 1D vector for w\")\r\n if len(x) != len(w):\r\n raise TypeError(\"expected x and w to have same length\")\r\n # apply weights. Don't use inplace operations as they\r\n # can cause problems with NA.\r\n lhs = lhs * w\r\n rhs = rhs * w\r\n\r\n # set rcond\r\n if rcond is None:\r\n rcond = len(x)*np.finfo(x.dtype).eps\r\n\r\n # Determine the norms of the design matrix columns.\r\n if issubclass(lhs.dtype.type, np.complexfloating):\r\n scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))\r\n else:\r\n scl = np.sqrt(np.square(lhs).sum(1))\r\n scl[scl == 0] = 1\r\n\r\n # Solve the least squares problem.\r\n c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)\r\n c = (c.T/scl).T\r\n\r\n # Expand c to include non-fitted coefficients which are set to zero\r\n if deg.ndim > 0:\r\n if c.ndim == 2:\r\n cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)\r\n else:\r\n cc = np.zeros(lmax + 1, dtype=c.dtype)\r\n cc[deg] = c\r\n c = cc\r\n\r\n # warn on rank reduction\r\n if rank != order and not full:\r\n msg = \"The fit may be poorly conditioned\"\r\n warnings.warn(msg, pu.RankWarning)\r\n\r\n if full:\r\n return c, [resids, rank, s, rcond]\r\n else:\r\n return c\r\n\r\n\r\ndef chebcompanion(c):\r\n \"\"\"Return the scaled companion matrix of c.\r\n\r\n The basis polynomials are scaled so that the companion matrix is\r\n symmetric when `c` is a Chebyshev basis polynomial. This provides\r\n better eigenvalue estimates than the unscaled case and for basis\r\n polynomials the eigenvalues are guaranteed to be real if\r\n `numpy.linalg.eigvalsh` is used to obtain them.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of Chebyshev series coefficients ordered from low to high\r\n degree.\r\n\r\n Returns\r\n -------\r\n mat : ndarray\r\n Scaled companion matrix of dimensions (deg, deg).\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded::1.7.0\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n if len(c) < 2:\r\n raise ValueError('Series must have maximum degree of at least 1.')\r\n if len(c) == 2:\r\n return np.array([[-c[0]/c[1]]])\r\n\r\n n = len(c) - 1\r\n mat = np.zeros((n, n), dtype=c.dtype)\r\n scl = np.array([1.] + [np.sqrt(.5)]*(n-1))\r\n top = mat.reshape(-1)[1::n+1]\r\n bot = mat.reshape(-1)[n::n+1]\r\n top[0] = np.sqrt(.5)\r\n top[1:] = 1/2\r\n bot[...] = top\r\n mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5\r\n return mat\r\n\r\n\r\ndef chebroots(c):\r\n \"\"\"\r\n Compute the roots of a Chebyshev series.\r\n\r\n Return the roots (a.k.a. \"zeros\") of the polynomial\r\n\r\n .. math:: p(x) = \\\\sum_i c[i] * T_i(x).\r\n\r\n Parameters\r\n ----------\r\n c : 1-D array_like\r\n 1-D array of coefficients.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Array of the roots of the series. If all the roots are real,\r\n then `out` is also real, otherwise it is complex.\r\n\r\n See Also\r\n --------\r\n polyroots, legroots, lagroots, hermroots, hermeroots\r\n\r\n Notes\r\n -----\r\n The root estimates are obtained as the eigenvalues of the companion\r\n matrix, Roots far from the origin of the complex plane may have large\r\n errors due to the numerical instability of the series for such\r\n values. Roots with multiplicity greater than 1 will also show larger\r\n errors as the value of the series near such points is relatively\r\n insensitive to errors in the roots. Isolated roots near the origin can\r\n be improved by a few iterations of Newton's method.\r\n\r\n The Chebyshev series basis polynomials aren't powers of `x` so the\r\n results of this function may seem unintuitive.\r\n\r\n Examples\r\n --------\r\n >>> import numpy.polynomial.chebyshev as cheb\r\n >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots\r\n array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n if len(c) < 2:\r\n return np.array([], dtype=c.dtype)\r\n if len(c) == 2:\r\n return np.array([-c[0]/c[1]])\r\n\r\n m = chebcompanion(c)\r\n r = la.eigvals(m)\r\n r.sort()\r\n return r\r\n\r\n\r\ndef chebgauss(deg):\r\n \"\"\"\r\n Gauss-Chebyshev quadrature.\r\n\r\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\r\n These sample points and weights will correctly integrate polynomials of\r\n degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with\r\n the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.\r\n\r\n Parameters\r\n ----------\r\n deg : int\r\n Number of sample points and weights. It must be >= 1.\r\n\r\n Returns\r\n -------\r\n x : ndarray\r\n 1-D ndarray containing the sample points.\r\n y : ndarray\r\n 1-D ndarray containing the weights.\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n The results have only been tested up to degree 100, higher degrees may\r\n be problematic. For Gauss-Chebyshev there are closed form solutions for\r\n the sample points and weights. If n = `deg`, then\r\n\r\n .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))\r\n\r\n .. math:: w_i = \\pi / n\r\n\r\n \"\"\"\r\n ideg = int(deg)\r\n if ideg != deg or ideg < 1:\r\n raise ValueError(\"deg must be a non-negative integer\")\r\n\r\n x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))\r\n w = np.ones(ideg)*(np.pi/ideg)\r\n\r\n return x, w\r\n\r\n\r\ndef chebweight(x):\r\n \"\"\"\r\n The weight function of the Chebyshev polynomials.\r\n\r\n The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of\r\n integration is :math:`[-1, 1]`. The Chebyshev polynomials are\r\n orthogonal, but not normalized, with respect to this weight function.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Values at which the weight function will be computed.\r\n\r\n Returns\r\n -------\r\n w : ndarray\r\n The weight function at `x`.\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))\r\n return w\r\n\r\n\r\ndef chebpts1(npts):\r\n \"\"\"\r\n Chebyshev points of the first kind.\r\n\r\n The Chebyshev points of the first kind are the points ``cos(x)``,\r\n where ``x = [pi*(k + .5)/npts for k in range(npts)]``.\r\n\r\n Parameters\r\n ----------\r\n npts : int\r\n Number of sample points desired.\r\n\r\n Returns\r\n -------\r\n pts : ndarray\r\n The Chebyshev points of the first kind.\r\n\r\n See Also\r\n --------\r\n chebpts2\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n \"\"\"\r\n _npts = int(npts)\r\n if _npts != npts:\r\n raise ValueError(\"npts must be integer\")\r\n if _npts < 1:\r\n raise ValueError(\"npts must be >= 1\")\r\n\r\n x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)\r\n return np.cos(x)\r\n\r\n\r\ndef chebpts2(npts):\r\n \"\"\"\r\n Chebyshev points of the second kind.\r\n\r\n The Chebyshev points of the second kind are the points ``cos(x)``,\r\n where ``x = [pi*k/(npts - 1) for k in range(npts)]``.\r\n\r\n Parameters\r\n ----------\r\n npts : int\r\n Number of sample points desired.\r\n\r\n Returns\r\n -------\r\n pts : ndarray\r\n The Chebyshev points of the second kind.\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n \"\"\"\r\n _npts = int(npts)\r\n if _npts != npts:\r\n raise ValueError(\"npts must be integer\")\r\n if _npts < 2:\r\n raise ValueError(\"npts must be >= 2\")\r\n\r\n x = np.linspace(-np.pi, 0, _npts)\r\n return np.cos(x)\r\n\r\n\r\n#\r\n# Chebyshev series class\r\n#\r\n\r\nclass Chebyshev(ABCPolyBase):\r\n \"\"\"A Chebyshev series class.\r\n\r\n The Chebyshev class provides the standard Python numerical methods\r\n '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the\r\n methods listed below.\r\n\r\n Parameters\r\n ----------\r\n coef : array_like\r\n Chebyshev coefficients in order of increasing degree, i.e.,\r\n ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.\r\n domain : (2,) array_like, optional\r\n Domain to use. The interval ``[domain[0], domain[1]]`` is mapped\r\n to the interval ``[window[0], window[1]]`` by shifting and scaling.\r\n The default value is [-1, 1].\r\n window : (2,) array_like, optional\r\n Window, see `domain` for its use. The default value is [-1, 1].\r\n\r\n .. versionadded:: 1.6.0\r\n\r\n \"\"\"\r\n # Virtual Functions\r\n _add = staticmethod(chebadd)\r\n _sub = staticmethod(chebsub)\r\n _mul = staticmethod(chebmul)\r\n _div = staticmethod(chebdiv)\r\n _pow = staticmethod(chebpow)\r\n _val = staticmethod(chebval)\r\n _int = staticmethod(chebint)\r\n _der = staticmethod(chebder)\r\n _fit = staticmethod(chebfit)\r\n _line = staticmethod(chebline)\r\n _roots = staticmethod(chebroots)\r\n _fromroots = staticmethod(chebfromroots)\r\n\r\n # Virtual properties\r\n nickname = 'cheb'\r\n domain = np.array(chebdomain)\r\n window = np.array(chebdomain)\r\n", "#!/usr/bin/env python\r\n# Created by Pearu Peterson, September 2002\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n__usage__ = \"\"\"\r\nBuild fftpack:\r\n python setup_fftpack.py build\r\nRun tests if scipy is installed:\r\n python -c 'import scipy;scipy.fftpack.test(<level>)'\r\nRun tests if fftpack is not installed:\r\n python tests/test_pseudo_diffs.py [<level>]\r\n\"\"\"\r\n\r\nfrom numpy.testing import (TestCase, assert_equal, assert_almost_equal,\r\n assert_array_almost_equal, run_module_suite)\r\nfrom scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,\r\n ihilbert, shift, fftfreq, cs_diff, sc_diff,\r\n ss_diff, cc_diff)\r\n\r\nimport numpy as np\r\nfrom numpy import arange, sin, cos, pi, exp, tanh, sum, sign\r\nfrom numpy.random import random\r\n\r\n\r\ndef direct_diff(x,k=1,period=None):\r\n fx = fft(x)\r\n n = len(fx)\r\n if period is None:\r\n period = 2*pi\r\n w = fftfreq(n)*2j*pi/period*n\r\n if k < 0:\r\n w = 1 / w**k\r\n w[0] = 0.0\r\n else:\r\n w = w**k\r\n if n > 2000:\r\n w[250:n-250] = 0.0\r\n return ifft(w*fx).real\r\n\r\n\r\ndef direct_tilbert(x,h=1,period=None):\r\n fx = fft(x)\r\n n = len(fx)\r\n if period is None:\r\n period = 2*pi\r\n w = fftfreq(n)*h*2*pi/period*n\r\n w[0] = 1\r\n w = 1j/tanh(w)\r\n w[0] = 0j\r\n return ifft(w*fx)\r\n\r\n\r\ndef direct_itilbert(x,h=1,period=None):\r\n fx = fft(x)\r\n n = len(fx)\r\n if period is None:\r\n period = 2*pi\r\n w = fftfreq(n)*h*2*pi/period*n\r\n w = -1j*tanh(w)\r\n return ifft(w*fx)\r\n\r\n\r\ndef direct_hilbert(x):\r\n fx = fft(x)\r\n n = len(fx)\r\n w = fftfreq(n)*n\r\n w = 1j*sign(w)\r\n return ifft(w*fx)\r\n\r\n\r\ndef direct_ihilbert(x):\r\n return -direct_hilbert(x)\r\n\r\n\r\ndef direct_shift(x,a,period=None):\r\n n = len(x)\r\n if period is None:\r\n k = fftfreq(n)*1j*n\r\n else:\r\n k = fftfreq(n)*2j*pi/period*n\r\n return ifft(fft(x)*exp(k*a)).real\r\n\r\n\r\nclass TestDiff(TestCase):\r\n\r\n def test_definition(self):\r\n for n in [16,17,64,127,32]:\r\n x = arange(n)*2*pi/n\r\n assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))\r\n assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))\r\n assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))\r\n assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))\r\n assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))\r\n assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))\r\n assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))\r\n assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))\r\n assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))\r\n assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))\r\n assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))\r\n assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))\r\n assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))\r\n assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))\r\n for k in range(5):\r\n assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))\r\n assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))\r\n\r\n def test_period(self):\r\n for n in [17,64]:\r\n x = arange(n)/float(n)\r\n assert_array_almost_equal(diff(sin(2*pi*x),period=1),\r\n 2*pi*cos(2*pi*x))\r\n assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),\r\n -(2*pi)**3*cos(2*pi*x))\r\n\r\n def test_sin(self):\r\n for n in [32,64,77]:\r\n x = arange(n)*2*pi/n\r\n assert_array_almost_equal(diff(sin(x)),cos(x))\r\n assert_array_almost_equal(diff(cos(x)),-sin(x))\r\n assert_array_almost_equal(diff(sin(x),2),-sin(x))\r\n assert_array_almost_equal(diff(sin(x),4),sin(x))\r\n assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))\r\n assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))\r\n\r\n def test_expr(self):\r\n for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:\r\n x = arange(n)*2*pi/n\r\n f = sin(x)*cos(4*x)+exp(sin(3*x))\r\n df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))\r\n ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\\\r\n - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))\r\n d1 = diff(f)\r\n assert_array_almost_equal(d1,df)\r\n assert_array_almost_equal(diff(df),ddf)\r\n assert_array_almost_equal(diff(f,2),ddf)\r\n assert_array_almost_equal(diff(ddf,-1),df)\r\n\r\n def test_expr_large(self):\r\n for n in [2048,4096]:\r\n x = arange(n)*2*pi/n\r\n f = sin(x)*cos(4*x)+exp(sin(3*x))\r\n df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))\r\n ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\\\r\n - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))\r\n assert_array_almost_equal(diff(f),df)\r\n assert_array_almost_equal(diff(df),ddf)\r\n assert_array_almost_equal(diff(ddf,-1),df)\r\n assert_array_almost_equal(diff(f,2),ddf)\r\n\r\n def test_int(self):\r\n n = 64\r\n x = arange(n)*2*pi/n\r\n assert_array_almost_equal(diff(sin(x),-1),-cos(x))\r\n assert_array_almost_equal(diff(sin(x),-2),-sin(x))\r\n assert_array_almost_equal(diff(sin(x),-4),sin(x))\r\n assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))\r\n\r\n def test_random_even(self):\r\n for k in [0,2,4,6]:\r\n for n in [60,32,64,56,55]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n # zeroing Nyquist mode:\r\n f = diff(diff(f,1),-1)\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(diff(diff(f,k),-k),f)\r\n assert_array_almost_equal(diff(diff(f,-k),k),f)\r\n\r\n def test_random_odd(self):\r\n for k in [0,1,2,3,4,5,6]:\r\n for n in [33,65,55]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(diff(diff(f,k),-k),f)\r\n assert_array_almost_equal(diff(diff(f,-k),k),f)\r\n\r\n def test_zero_nyquist(self):\r\n for k in [0,1,2,3,4,5,6]:\r\n for n in [32,33,64,56,55]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n # zeroing Nyquist mode:\r\n f = diff(diff(f,1),-1)\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(diff(diff(f,k),-k),f)\r\n assert_array_almost_equal(diff(diff(f,-k),k),f)\r\n\r\n\r\nclass TestTilbert(TestCase):\r\n\r\n def test_definition(self):\r\n for h in [0.1,0.5,1,5.5,10]:\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n y = tilbert(sin(x),h)\r\n y1 = direct_tilbert(sin(x),h)\r\n assert_array_almost_equal(y,y1)\r\n assert_array_almost_equal(tilbert(sin(x),h),\r\n direct_tilbert(sin(x),h))\r\n assert_array_almost_equal(tilbert(sin(2*x),h),\r\n direct_tilbert(sin(2*x),h))\r\n\r\n def test_random_even(self):\r\n for h in [0.1,0.5,1,5.5,10]:\r\n for n in [32,64,56]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)\r\n\r\n def test_random_odd(self):\r\n for h in [0.1,0.5,1,5.5,10]:\r\n for n in [33,65,55]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(itilbert(tilbert(f,h),h),f)\r\n assert_array_almost_equal(tilbert(itilbert(f,h),h),f)\r\n\r\n\r\nclass TestITilbert(TestCase):\r\n\r\n def test_definition(self):\r\n for h in [0.1,0.5,1,5.5,10]:\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n y = itilbert(sin(x),h)\r\n y1 = direct_itilbert(sin(x),h)\r\n assert_array_almost_equal(y,y1)\r\n assert_array_almost_equal(itilbert(sin(x),h),\r\n direct_itilbert(sin(x),h))\r\n assert_array_almost_equal(itilbert(sin(2*x),h),\r\n direct_itilbert(sin(2*x),h))\r\n\r\n\r\nclass TestHilbert(TestCase):\r\n\r\n def test_definition(self):\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n y = hilbert(sin(x))\r\n y1 = direct_hilbert(sin(x))\r\n assert_array_almost_equal(y,y1)\r\n assert_array_almost_equal(hilbert(sin(2*x)),\r\n direct_hilbert(sin(2*x)))\r\n\r\n def test_tilbert_relation(self):\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n f = sin(x)+cos(2*x)*sin(x)\r\n y = hilbert(f)\r\n y1 = direct_hilbert(f)\r\n assert_array_almost_equal(y,y1)\r\n y2 = tilbert(f,h=10)\r\n assert_array_almost_equal(y,y2)\r\n\r\n def test_random_odd(self):\r\n for n in [33,65,55]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(ihilbert(hilbert(f)),f)\r\n assert_array_almost_equal(hilbert(ihilbert(f)),f)\r\n\r\n def test_random_even(self):\r\n for n in [32,64,56]:\r\n f = random((n,))\r\n af = sum(f,axis=0)/n\r\n f = f-af\r\n # zeroing Nyquist mode:\r\n f = diff(diff(f,1),-1)\r\n assert_almost_equal(sum(f,axis=0),0.0)\r\n assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)\r\n assert_array_almost_equal(hilbert(ihilbert(f)),f)\r\n\r\n\r\nclass TestIHilbert(TestCase):\r\n\r\n def test_definition(self):\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n y = ihilbert(sin(x))\r\n y1 = direct_ihilbert(sin(x))\r\n assert_array_almost_equal(y,y1)\r\n assert_array_almost_equal(ihilbert(sin(2*x)),\r\n direct_ihilbert(sin(2*x)))\r\n\r\n def test_itilbert_relation(self):\r\n for n in [16,17,64,127]:\r\n x = arange(n)*2*pi/n\r\n f = sin(x)+cos(2*x)*sin(x)\r\n y = ihilbert(f)\r\n y1 = direct_ihilbert(f)\r\n assert_array_almost_equal(y,y1)\r\n y2 = itilbert(f,h=10)\r\n assert_array_almost_equal(y,y2)\r\n\r\n\r\nclass TestShift(TestCase):\r\n\r\n def test_definition(self):\r\n for n in [18,17,64,127,32,2048,256]:\r\n x = arange(n)*2*pi/n\r\n for a in [0.1,3]:\r\n assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))\r\n assert_array_almost_equal(shift(sin(x),a),sin(x+a))\r\n assert_array_almost_equal(shift(cos(x),a),cos(x+a))\r\n assert_array_almost_equal(shift(cos(2*x)+sin(x),a),\r\n cos(2*(x+a))+sin(x+a))\r\n assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))\r\n assert_array_almost_equal(shift(sin(x),2*pi),sin(x))\r\n assert_array_almost_equal(shift(sin(x),pi),-sin(x))\r\n assert_array_almost_equal(shift(sin(x),pi/2),cos(x))\r\n\r\n\r\nclass TestOverwrite(object):\r\n \"\"\"Check input overwrite behavior \"\"\"\r\n\r\n real_dtypes = [np.float32, np.float64]\r\n dtypes = real_dtypes + [np.complex64, np.complex128]\r\n\r\n def _check(self, x, routine, *args, **kwargs):\r\n x2 = x.copy()\r\n routine(x2, *args, **kwargs)\r\n sig = routine.__name__\r\n if args:\r\n sig += repr(args)\r\n if kwargs:\r\n sig += repr(kwargs)\r\n assert_equal(x2, x, err_msg=\"spurious overwrite in %s\" % sig)\r\n\r\n def _check_1d(self, routine, dtype, shape, *args, **kwargs):\r\n np.random.seed(1234)\r\n if np.issubdtype(dtype, np.complexfloating):\r\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\r\n else:\r\n data = np.random.randn(*shape)\r\n data = data.astype(dtype)\r\n self._check(data, routine, *args, **kwargs)\r\n\r\n def test_diff(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(diff, dtype, (16,))\r\n\r\n def test_tilbert(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(tilbert, dtype, (16,), 1.6)\r\n\r\n def test_itilbert(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(itilbert, dtype, (16,), 1.6)\r\n\r\n def test_hilbert(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(hilbert, dtype, (16,))\r\n\r\n def test_cs_diff(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)\r\n\r\n def test_sc_diff(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)\r\n\r\n def test_ss_diff(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)\r\n\r\n def test_cc_diff(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)\r\n\r\n def test_shift(self):\r\n for dtype in self.dtypes:\r\n self._check_1d(shift, dtype, (16,), 1.0)\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "from __future__ import division, print_function, absolute_import\r\n\r\nimport warnings\r\nfrom . import _minpack\r\n\r\nimport numpy as np\r\nfrom numpy import (atleast_1d, dot, take, triu, shape, eye,\r\n transpose, zeros, product, greater, array,\r\n all, where, isscalar, asarray, inf, abs,\r\n finfo, inexact, issubdtype, dtype)\r\nfrom scipy.linalg import svd\r\nfrom scipy._lib._util import _asarray_validated, _lazywhere\r\nfrom .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning\r\nfrom ._lsq import least_squares\r\nfrom ._lsq.common import make_strictly_feasible\r\nfrom ._lsq.least_squares import prepare_bounds\r\n\r\n\r\nerror = _minpack.error\r\n\r\n__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']\r\n\r\n\r\ndef _check_func(checker, argname, thefunc, x0, args, numinputs,\r\n output_shape=None):\r\n res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))\r\n if (output_shape is not None) and (shape(res) != output_shape):\r\n if (output_shape[0] != 1):\r\n if len(output_shape) > 1:\r\n if output_shape[1] == 1:\r\n return shape(res)\r\n msg = \"%s: there is a mismatch between the input and output \" \\\r\n \"shape of the '%s' argument\" % (checker, argname)\r\n func_name = getattr(thefunc, '__name__', None)\r\n if func_name:\r\n msg += \" '%s'.\" % func_name\r\n else:\r\n msg += \".\"\r\n msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))\r\n raise TypeError(msg)\r\n if issubdtype(res.dtype, inexact):\r\n dt = res.dtype\r\n else:\r\n dt = dtype(float)\r\n return shape(res), dt\r\n\r\n\r\ndef fsolve(func, x0, args=(), fprime=None, full_output=0,\r\n col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,\r\n epsfcn=None, factor=100, diag=None):\r\n \"\"\"\r\n Find the roots of a function.\r\n\r\n Return the roots of the (non-linear) equations defined by\r\n ``func(x) = 0`` given a starting estimate.\r\n\r\n Parameters\r\n ----------\r\n func : callable ``f(x, *args)``\r\n A function that takes at least one (possibly vector) argument.\r\n x0 : ndarray\r\n The starting estimate for the roots of ``func(x) = 0``.\r\n args : tuple, optional\r\n Any extra arguments to `func`.\r\n fprime : callable(x), optional\r\n A function to compute the Jacobian of `func` with derivatives\r\n across the rows. By default, the Jacobian will be estimated.\r\n full_output : bool, optional\r\n If True, return optional outputs.\r\n col_deriv : bool, optional\r\n Specify whether the Jacobian function computes derivatives down\r\n the columns (faster, because there is no transpose operation).\r\n xtol : float, optional\r\n The calculation will terminate if the relative error between two\r\n consecutive iterates is at most `xtol`.\r\n maxfev : int, optional\r\n The maximum number of calls to the function. If zero, then\r\n ``100*(N+1)`` is the maximum where N is the number of elements\r\n in `x0`.\r\n band : tuple, optional\r\n If set to a two-sequence containing the number of sub- and\r\n super-diagonals within the band of the Jacobi matrix, the\r\n Jacobi matrix is considered banded (only for ``fprime=None``).\r\n epsfcn : float, optional\r\n A suitable step length for the forward-difference\r\n approximation of the Jacobian (for ``fprime=None``). If\r\n `epsfcn` is less than the machine precision, it is assumed\r\n that the relative errors in the functions are of the order of\r\n the machine precision.\r\n factor : float, optional\r\n A parameter determining the initial step bound\r\n (``factor * || diag * x||``). Should be in the interval\r\n ``(0.1, 100)``.\r\n diag : sequence, optional\r\n N positive entries that serve as a scale factors for the\r\n variables.\r\n\r\n Returns\r\n -------\r\n x : ndarray\r\n The solution (or the result of the last iteration for\r\n an unsuccessful call).\r\n infodict : dict\r\n A dictionary of optional outputs with the keys:\r\n\r\n ``nfev``\r\n number of function calls\r\n ``njev``\r\n number of Jacobian calls\r\n ``fvec``\r\n function evaluated at the output\r\n ``fjac``\r\n the orthogonal matrix, q, produced by the QR\r\n factorization of the final approximate Jacobian\r\n matrix, stored column wise\r\n ``r``\r\n upper triangular matrix produced by QR factorization\r\n of the same matrix\r\n ``qtf``\r\n the vector ``(transpose(q) * fvec)``\r\n\r\n ier : int\r\n An integer flag. Set to 1 if a solution was found, otherwise refer\r\n to `mesg` for more information.\r\n mesg : str\r\n If no solution is found, `mesg` details the cause of failure.\r\n\r\n See Also\r\n --------\r\n root : Interface to root finding algorithms for multivariate\r\n functions. See the 'hybr' `method` in particular.\r\n\r\n Notes\r\n -----\r\n ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.\r\n\r\n \"\"\"\r\n options = {'col_deriv': col_deriv,\r\n 'xtol': xtol,\r\n 'maxfev': maxfev,\r\n 'band': band,\r\n 'eps': epsfcn,\r\n 'factor': factor,\r\n 'diag': diag}\r\n\r\n res = _root_hybr(func, x0, args, jac=fprime, **options)\r\n if full_output:\r\n x = res['x']\r\n info = dict((k, res.get(k))\r\n for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)\r\n info['fvec'] = res['fun']\r\n return x, info, res['status'], res['message']\r\n else:\r\n status = res['status']\r\n msg = res['message']\r\n if status == 0:\r\n raise TypeError(msg)\r\n elif status == 1:\r\n pass\r\n elif status in [2, 3, 4, 5]:\r\n warnings.warn(msg, RuntimeWarning)\r\n else:\r\n raise TypeError(msg)\r\n return res['x']\r\n\r\n\r\ndef _root_hybr(func, x0, args=(), jac=None,\r\n col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,\r\n factor=100, diag=None, **unknown_options):\r\n \"\"\"\r\n Find the roots of a multivariate function using MINPACK's hybrd and\r\n hybrj routines (modified Powell method).\r\n\r\n Options\r\n -------\r\n col_deriv : bool\r\n Specify whether the Jacobian function computes derivatives down\r\n the columns (faster, because there is no transpose operation).\r\n xtol : float\r\n The calculation will terminate if the relative error between two\r\n consecutive iterates is at most `xtol`.\r\n maxfev : int\r\n The maximum number of calls to the function. If zero, then\r\n ``100*(N+1)`` is the maximum where N is the number of elements\r\n in `x0`.\r\n band : tuple\r\n If set to a two-sequence containing the number of sub- and\r\n super-diagonals within the band of the Jacobi matrix, the\r\n Jacobi matrix is considered banded (only for ``fprime=None``).\r\n eps : float\r\n A suitable step length for the forward-difference\r\n approximation of the Jacobian (for ``fprime=None``). If\r\n `eps` is less than the machine precision, it is assumed\r\n that the relative errors in the functions are of the order of\r\n the machine precision.\r\n factor : float\r\n A parameter determining the initial step bound\r\n (``factor * || diag * x||``). Should be in the interval\r\n ``(0.1, 100)``.\r\n diag : sequence\r\n N positive entries that serve as a scale factors for the\r\n variables.\r\n\r\n \"\"\"\r\n _check_unknown_options(unknown_options)\r\n epsfcn = eps\r\n\r\n x0 = asarray(x0).flatten()\r\n n = len(x0)\r\n if not isinstance(args, tuple):\r\n args = (args,)\r\n shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))\r\n if epsfcn is None:\r\n epsfcn = finfo(dtype).eps\r\n Dfun = jac\r\n if Dfun is None:\r\n if band is None:\r\n ml, mu = -10, -10\r\n else:\r\n ml, mu = band[:2]\r\n if maxfev == 0:\r\n maxfev = 200 * (n + 1)\r\n retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,\r\n ml, mu, epsfcn, factor, diag)\r\n else:\r\n _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))\r\n if (maxfev == 0):\r\n maxfev = 100 * (n + 1)\r\n retval = _minpack._hybrj(func, Dfun, x0, args, 1,\r\n col_deriv, xtol, maxfev, factor, diag)\r\n\r\n x, status = retval[0], retval[-1]\r\n\r\n errors = {0: \"Improper input parameters were entered.\",\r\n 1: \"The solution converged.\",\r\n 2: \"The number of calls to function has \"\r\n \"reached maxfev = %d.\" % maxfev,\r\n 3: \"xtol=%f is too small, no further improvement \"\r\n \"in the approximate\\n solution \"\r\n \"is possible.\" % xtol,\r\n 4: \"The iteration is not making good progress, as measured \"\r\n \"by the \\n improvement from the last five \"\r\n \"Jacobian evaluations.\",\r\n 5: \"The iteration is not making good progress, \"\r\n \"as measured by the \\n improvement from the last \"\r\n \"ten iterations.\",\r\n 'unknown': \"An error occurred.\"}\r\n\r\n info = retval[1]\r\n info['fun'] = info.pop('fvec')\r\n sol = OptimizeResult(x=x, success=(status == 1), status=status)\r\n sol.update(info)\r\n try:\r\n sol['message'] = errors[status]\r\n except KeyError:\r\n info['message'] = errors['unknown']\r\n\r\n return sol\r\n\r\n\r\ndef leastsq(func, x0, args=(), Dfun=None, full_output=0,\r\n col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,\r\n gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):\r\n \"\"\"\r\n Minimize the sum of squares of a set of equations.\r\n\r\n ::\r\n\r\n x = arg min(sum(func(y)**2,axis=0))\r\n y\r\n\r\n Parameters\r\n ----------\r\n func : callable\r\n should take at least one (possibly length N vector) argument and\r\n returns M floating point numbers. It must not return NaNs or\r\n fitting might fail.\r\n x0 : ndarray\r\n The starting estimate for the minimization.\r\n args : tuple, optional\r\n Any extra arguments to func are placed in this tuple.\r\n Dfun : callable, optional\r\n A function or method to compute the Jacobian of func with derivatives\r\n across the rows. If this is None, the Jacobian will be estimated.\r\n full_output : bool, optional\r\n non-zero to return all optional outputs.\r\n col_deriv : bool, optional\r\n non-zero to specify that the Jacobian function computes derivatives\r\n down the columns (faster, because there is no transpose operation).\r\n ftol : float, optional\r\n Relative error desired in the sum of squares.\r\n xtol : float, optional\r\n Relative error desired in the approximate solution.\r\n gtol : float, optional\r\n Orthogonality desired between the function vector and the columns of\r\n the Jacobian.\r\n maxfev : int, optional\r\n The maximum number of calls to the function. If `Dfun` is provided\r\n then the default `maxfev` is 100*(N+1) where N is the number of elements\r\n in x0, otherwise the default `maxfev` is 200*(N+1).\r\n epsfcn : float, optional\r\n A variable used in determining a suitable step length for the forward-\r\n difference approximation of the Jacobian (for Dfun=None).\r\n Normally the actual step length will be sqrt(epsfcn)*x\r\n If epsfcn is less than the machine precision, it is assumed that the\r\n relative errors are of the order of the machine precision.\r\n factor : float, optional\r\n A parameter determining the initial step bound\r\n (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.\r\n diag : sequence, optional\r\n N positive entries that serve as a scale factors for the variables.\r\n\r\n Returns\r\n -------\r\n x : ndarray\r\n The solution (or the result of the last iteration for an unsuccessful\r\n call).\r\n cov_x : ndarray\r\n Uses the fjac and ipvt optional outputs to construct an\r\n estimate of the jacobian around the solution. None if a\r\n singular matrix encountered (indicates very flat curvature in\r\n some direction). This matrix must be multiplied by the\r\n residual variance to get the covariance of the\r\n parameter estimates -- see curve_fit.\r\n infodict : dict\r\n a dictionary of optional outputs with the key s:\r\n\r\n ``nfev``\r\n The number of function calls\r\n ``fvec``\r\n The function evaluated at the output\r\n ``fjac``\r\n A permutation of the R matrix of a QR\r\n factorization of the final approximate\r\n Jacobian matrix, stored column wise.\r\n Together with ipvt, the covariance of the\r\n estimate can be approximated.\r\n ``ipvt``\r\n An integer array of length N which defines\r\n a permutation matrix, p, such that\r\n fjac*p = q*r, where r is upper triangular\r\n with diagonal elements of nonincreasing\r\n magnitude. Column j of p is column ipvt(j)\r\n of the identity matrix.\r\n ``qtf``\r\n The vector (transpose(q) * fvec).\r\n\r\n mesg : str\r\n A string message giving information about the cause of failure.\r\n ier : int\r\n An integer flag. If it is equal to 1, 2, 3 or 4, the solution was\r\n found. Otherwise, the solution was not found. In either case, the\r\n optional output variable 'mesg' gives more information.\r\n\r\n Notes\r\n -----\r\n \"leastsq\" is a wrapper around MINPACK's lmdif and lmder algorithms.\r\n\r\n cov_x is a Jacobian approximation to the Hessian of the least squares\r\n objective function.\r\n This approximation assumes that the objective function is based on the\r\n difference between some observed target data (ydata) and a (non-linear)\r\n function of the parameters `f(xdata, params)` ::\r\n\r\n func(params) = ydata - f(xdata, params)\r\n\r\n so that the objective function is ::\r\n\r\n min sum((ydata - f(xdata, params))**2, axis=0)\r\n params\r\n\r\n \"\"\"\r\n x0 = asarray(x0).flatten()\r\n n = len(x0)\r\n if not isinstance(args, tuple):\r\n args = (args,)\r\n shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)\r\n m = shape[0]\r\n if n > m:\r\n raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))\r\n if epsfcn is None:\r\n epsfcn = finfo(dtype).eps\r\n if Dfun is None:\r\n if maxfev == 0:\r\n maxfev = 200*(n + 1)\r\n retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,\r\n gtol, maxfev, epsfcn, factor, diag)\r\n else:\r\n if col_deriv:\r\n _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))\r\n else:\r\n _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))\r\n if maxfev == 0:\r\n maxfev = 100 * (n + 1)\r\n retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,\r\n ftol, xtol, gtol, maxfev, factor, diag)\r\n\r\n errors = {0: [\"Improper input parameters.\", TypeError],\r\n 1: [\"Both actual and predicted relative reductions \"\r\n \"in the sum of squares\\n are at most %f\" % ftol, None],\r\n 2: [\"The relative error between two consecutive \"\r\n \"iterates is at most %f\" % xtol, None],\r\n 3: [\"Both actual and predicted relative reductions in \"\r\n \"the sum of squares\\n are at most %f and the \"\r\n \"relative error between two consecutive \"\r\n \"iterates is at \\n most %f\" % (ftol, xtol), None],\r\n 4: [\"The cosine of the angle between func(x) and any \"\r\n \"column of the\\n Jacobian is at most %f in \"\r\n \"absolute value\" % gtol, None],\r\n 5: [\"Number of calls to function has reached \"\r\n \"maxfev = %d.\" % maxfev, ValueError],\r\n 6: [\"ftol=%f is too small, no further reduction \"\r\n \"in the sum of squares\\n is possible.\"\"\" % ftol,\r\n ValueError],\r\n 7: [\"xtol=%f is too small, no further improvement in \"\r\n \"the approximate\\n solution is possible.\" % xtol,\r\n ValueError],\r\n 8: [\"gtol=%f is too small, func(x) is orthogonal to the \"\r\n \"columns of\\n the Jacobian to machine \"\r\n \"precision.\" % gtol, ValueError],\r\n 'unknown': [\"Unknown error.\", TypeError]}\r\n\r\n info = retval[-1] # The FORTRAN return value\r\n\r\n if info not in [1, 2, 3, 4] and not full_output:\r\n if info in [5, 6, 7, 8]:\r\n warnings.warn(errors[info][0], RuntimeWarning)\r\n else:\r\n try:\r\n raise errors[info][1](errors[info][0])\r\n except KeyError:\r\n raise errors['unknown'][1](errors['unknown'][0])\r\n\r\n mesg = errors[info][0]\r\n if full_output:\r\n cov_x = None\r\n if info in [1, 2, 3, 4]:\r\n from numpy.dual import inv\r\n from numpy.linalg import LinAlgError\r\n perm = take(eye(n), retval[1]['ipvt'] - 1, 0)\r\n r = triu(transpose(retval[1]['fjac'])[:n, :])\r\n R = dot(r, perm)\r\n try:\r\n cov_x = inv(dot(transpose(R), R))\r\n except (LinAlgError, ValueError):\r\n pass\r\n return (retval[0], cov_x) + retval[1:-1] + (mesg, info)\r\n else:\r\n return (retval[0], info)\r\n\r\n\r\ndef _wrap_func(func, xdata, ydata, weights):\r\n if weights is None:\r\n def func_wrapped(params):\r\n return func(xdata, *params) - ydata\r\n else:\r\n def func_wrapped(params):\r\n return weights * (func(xdata, *params) - ydata)\r\n return func_wrapped\r\n\r\n\r\ndef _wrap_jac(jac, xdata, weights):\r\n if weights is None:\r\n def jac_wrapped(params):\r\n return jac(xdata, *params)\r\n else:\r\n def jac_wrapped(params):\r\n return weights[:, np.newaxis] * np.asarray(jac(xdata, *params))\r\n return jac_wrapped\r\n\r\n\r\ndef _initialize_feasible(lb, ub):\r\n p0 = np.ones_like(lb)\r\n lb_finite = np.isfinite(lb)\r\n ub_finite = np.isfinite(ub)\r\n\r\n mask = lb_finite & ub_finite\r\n p0[mask] = 0.5 * (lb[mask] + ub[mask])\r\n \r\n mask = lb_finite & ~ub_finite\r\n p0[mask] = lb[mask] + 1\r\n \r\n mask = ~lb_finite & ub_finite\r\n p0[mask] = ub[mask] - 1\r\n\r\n return p0\r\n\r\n\r\ndef curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,\r\n check_finite=True, bounds=(-np.inf, np.inf), method=None,\r\n jac=None, **kwargs):\r\n \"\"\"\r\n Use non-linear least squares to fit a function, f, to data.\r\n\r\n Assumes ``ydata = f(xdata, *params) + eps``\r\n\r\n Parameters\r\n ----------\r\n f : callable\r\n The model function, f(x, ...). It must take the independent\r\n variable as the first argument and the parameters to fit as\r\n separate remaining arguments.\r\n xdata : An M-length sequence or an (k,M)-shaped array\r\n for functions with k predictors.\r\n The independent variable where the data is measured.\r\n ydata : M-length sequence\r\n The dependent data --- nominally f(xdata, ...)\r\n p0 : None, scalar, or N-length sequence, optional\r\n Initial guess for the parameters. If None, then the initial\r\n values will all be 1 (if the number of parameters for the function\r\n can be determined using introspection, otherwise a ValueError\r\n is raised).\r\n sigma : None or M-length sequence, optional\r\n If not None, the uncertainties in the ydata array. These are used as\r\n weights in the least-squares problem\r\n i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``\r\n If None, the uncertainties are assumed to be 1.\r\n absolute_sigma : bool, optional\r\n If False, `sigma` denotes relative weights of the data points.\r\n The returned covariance matrix `pcov` is based on *estimated*\r\n errors in the data, and is not affected by the overall\r\n magnitude of the values in `sigma`. Only the relative\r\n magnitudes of the `sigma` values matter.\r\n\r\n If True, `sigma` describes one standard deviation errors of\r\n the input data points. The estimated covariance in `pcov` is\r\n based on these values.\r\n check_finite : bool, optional\r\n If True, check that the input arrays do not contain nans of infs,\r\n and raise a ValueError if they do. Setting this parameter to\r\n False may silently produce nonsensical results if the input arrays\r\n do contain nans. Default is True.\r\n bounds : 2-tuple of array_like, optional\r\n Lower and upper bounds on independent variables. Defaults to no bounds. \r\n Each element of the tuple must be either an array with the length equal\r\n to the number of parameters, or a scalar (in which case the bound is\r\n taken to be the same for all parameters.) Use ``np.inf`` with an\r\n appropriate sign to disable bounds on all or some parameters.\r\n\r\n .. versionadded:: 0.17\r\n method : {'lm', 'trf', 'dogbox'}, optional\r\n Method to use for optimization. See `least_squares` for more details.\r\n Default is 'lm' for unconstrained problems and 'trf' if `bounds` are\r\n provided. The method 'lm' won't work when the number of observations\r\n is less than the number of variables, use 'trf' or 'dogbox' in this\r\n case.\r\n\r\n .. versionadded:: 0.17\r\n jac : callable, string or None, optional\r\n Function with signature ``jac(x, ...)`` which computes the Jacobian\r\n matrix of the model function with respect to parameters as a dense\r\n array_like structure. It will be scaled according to provided `sigma`.\r\n If None (default), the Jacobian will be estimated numerically.\r\n String keywords for 'trf' and 'dogbox' methods can be used to select\r\n a finite difference scheme, see `least_squares`.\r\n\r\n .. versionadded:: 0.18\r\n kwargs\r\n Keyword arguments passed to `leastsq` for ``method='lm'`` or\r\n `least_squares` otherwise.\r\n\r\n Returns\r\n -------\r\n popt : array\r\n Optimal values for the parameters so that the sum of the squared error\r\n of ``f(xdata, *popt) - ydata`` is minimized\r\n pcov : 2d array\r\n The estimated covariance of popt. The diagonals provide the variance\r\n of the parameter estimate. To compute one standard deviation errors\r\n on the parameters use ``perr = np.sqrt(np.diag(pcov))``.\r\n\r\n How the `sigma` parameter affects the estimated covariance\r\n depends on `absolute_sigma` argument, as described above.\r\n\r\n If the Jacobian matrix at the solution doesn't have a full rank, then\r\n 'lm' method returns a matrix filled with ``np.inf``, on the other hand\r\n 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute\r\n the covariance matrix.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n if either `ydata` or `xdata` contain NaNs, or if incompatible options\r\n are used.\r\n\r\n RuntimeError\r\n if the least-squares minimization fails.\r\n\r\n OptimizeWarning\r\n if covariance of the parameters can not be estimated.\r\n\r\n See Also\r\n --------\r\n least_squares : Minimize the sum of squares of nonlinear functions.\r\n stats.linregress : Calculate a linear least squares regression for two sets\r\n of measurements.\r\n\r\n Notes\r\n -----\r\n With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm\r\n through `leastsq`. Note that this algorithm can only deal with\r\n unconstrained problems.\r\n\r\n Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to\r\n the docstring of `least_squares` for more information.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from scipy.optimize import curve_fit\r\n >>> def func(x, a, b, c):\r\n ... return a * np.exp(-b * x) + c\r\n\r\n >>> xdata = np.linspace(0, 4, 50)\r\n >>> y = func(xdata, 2.5, 1.3, 0.5)\r\n >>> ydata = y + 0.2 * np.random.normal(size=len(xdata))\r\n\r\n >>> popt, pcov = curve_fit(func, xdata, ydata)\r\n\r\n Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``\r\n and ``0 < c < 1``:\r\n\r\n >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))\r\n\r\n \"\"\"\r\n if p0 is None:\r\n # determine number of parameters by inspecting the function\r\n from scipy._lib._util import getargspec_no_self as _getargspec\r\n args, varargs, varkw, defaults = _getargspec(f)\r\n if len(args) < 2:\r\n raise ValueError(\"Unable to determine number of fit parameters.\")\r\n n = len(args) - 1\r\n else:\r\n p0 = np.atleast_1d(p0)\r\n n = p0.size\r\n\r\n lb, ub = prepare_bounds(bounds, n)\r\n if p0 is None:\r\n p0 = _initialize_feasible(lb, ub)\r\n\r\n bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))\r\n if method is None:\r\n if bounded_problem:\r\n method = 'trf'\r\n else:\r\n method = 'lm'\r\n\r\n if method == 'lm' and bounded_problem:\r\n raise ValueError(\"Method 'lm' only works for unconstrained problems. \"\r\n \"Use 'trf' or 'dogbox' instead.\")\r\n\r\n # NaNs can not be handled\r\n if check_finite:\r\n ydata = np.asarray_chkfinite(ydata)\r\n else:\r\n ydata = np.asarray(ydata)\r\n\r\n if isinstance(xdata, (list, tuple, np.ndarray)):\r\n # `xdata` is passed straight to the user-defined `f`, so allow\r\n # non-array_like `xdata`.\r\n if check_finite:\r\n xdata = np.asarray_chkfinite(xdata)\r\n else:\r\n xdata = np.asarray(xdata)\r\n\r\n weights = 1.0 / asarray(sigma) if sigma is not None else None\r\n func = _wrap_func(f, xdata, ydata, weights)\r\n if callable(jac):\r\n jac = _wrap_jac(jac, xdata, weights)\r\n elif jac is None and method != 'lm':\r\n jac = '2-point'\r\n\r\n if method == 'lm':\r\n # Remove full_output from kwargs, otherwise we're passing it in twice.\r\n return_full = kwargs.pop('full_output', False)\r\n res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)\r\n popt, pcov, infodict, errmsg, ier = res\r\n cost = np.sum(infodict['fvec'] ** 2)\r\n if ier not in [1, 2, 3, 4]:\r\n raise RuntimeError(\"Optimal parameters not found: \" + errmsg)\r\n else:\r\n res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,\r\n **kwargs)\r\n\r\n if not res.success:\r\n raise RuntimeError(\"Optimal parameters not found: \" + res.message)\r\n\r\n cost = 2 * res.cost # res.cost is half sum of squares!\r\n popt = res.x\r\n\r\n # Do Moore-Penrose inverse discarding zero singular values.\r\n _, s, VT = svd(res.jac, full_matrices=False)\r\n threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]\r\n s = s[s > threshold]\r\n VT = VT[:s.size]\r\n pcov = np.dot(VT.T / s**2, VT)\r\n return_full = False\r\n\r\n warn_cov = False\r\n if pcov is None:\r\n # indeterminate covariance\r\n pcov = zeros((len(popt), len(popt)), dtype=float)\r\n pcov.fill(inf)\r\n warn_cov = True\r\n elif not absolute_sigma:\r\n if ydata.size > p0.size:\r\n s_sq = cost / (ydata.size - p0.size)\r\n pcov = pcov * s_sq\r\n else:\r\n pcov.fill(inf)\r\n warn_cov = True\r\n\r\n if warn_cov:\r\n warnings.warn('Covariance of the parameters could not be estimated',\r\n category=OptimizeWarning)\r\n\r\n if return_full:\r\n return popt, pcov, infodict, errmsg, ier\r\n else:\r\n return popt, pcov\r\n\r\n\r\ndef check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):\r\n \"\"\"Perform a simple check on the gradient for correctness.\r\n\r\n \"\"\"\r\n\r\n x = atleast_1d(x0)\r\n n = len(x)\r\n x = x.reshape((n,))\r\n fvec = atleast_1d(fcn(x, *args))\r\n m = len(fvec)\r\n fvec = fvec.reshape((m,))\r\n ldfjac = m\r\n fjac = atleast_1d(Dfcn(x, *args))\r\n fjac = fjac.reshape((m, n))\r\n if col_deriv == 0:\r\n fjac = transpose(fjac)\r\n\r\n xp = zeros((n,), float)\r\n err = zeros((m,), float)\r\n fvecp = None\r\n _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)\r\n\r\n fvecp = atleast_1d(fcn(xp, *args))\r\n fvecp = fvecp.reshape((m,))\r\n _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)\r\n\r\n good = (product(greater(err, 0.5), axis=0))\r\n\r\n return (good, err)\r\n\r\n\r\ndef _del2(p0, p1, d):\r\n return p0 - np.square(p1 - p0) / d\r\n\r\n\r\ndef _relerr(actual, desired):\r\n return (actual - desired) / desired\r\n\r\n\r\ndef _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):\r\n p0 = x0\r\n for i in range(maxiter):\r\n p1 = func(p0, *args)\r\n if use_accel:\r\n p2 = func(p1, *args)\r\n d = p2 - 2.0 * p1 + p0\r\n p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)\r\n else:\r\n p = p1\r\n relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)\r\n if np.all(np.abs(relerr) < xtol):\r\n return p\r\n p0 = p\r\n msg = \"Failed to converge after %d iterations, value is %s\" % (maxiter, p)\r\n raise RuntimeError(msg)\r\n\r\n\r\ndef fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):\r\n \"\"\"\r\n Find a fixed point of the function.\r\n\r\n Given a function of one or more variables and a starting point, find a\r\n fixed-point of the function: i.e. where ``func(x0) == x0``.\r\n\r\n Parameters\r\n ----------\r\n func : function\r\n Function to evaluate.\r\n x0 : array_like\r\n Fixed point of function.\r\n args : tuple, optional\r\n Extra arguments to `func`.\r\n xtol : float, optional\r\n Convergence tolerance, defaults to 1e-08.\r\n maxiter : int, optional\r\n Maximum number of iterations, defaults to 500.\r\n method : {\"del2\", \"iteration\"}, optional\r\n Method of finding the fixed-point, defaults to \"del2\"\r\n which uses Steffensen's Method with Aitken's ``Del^2``\r\n convergence acceleration [1]_. The \"iteration\" method simply iterates\r\n the function until convergence is detected, without attempting to\r\n accelerate the convergence.\r\n\r\n References\r\n ----------\r\n .. [1] Burden, Faires, \"Numerical Analysis\", 5th edition, pg. 80\r\n\r\n Examples\r\n --------\r\n >>> from scipy import optimize\r\n >>> def func(x, c1, c2):\r\n ... return np.sqrt(c1/(x+c2))\r\n >>> c1 = np.array([10,12.])\r\n >>> c2 = np.array([3, 5.])\r\n >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))\r\n array([ 1.4920333 , 1.37228132])\r\n\r\n \"\"\"\r\n use_accel = {'del2': True, 'iteration': False}[method]\r\n x0 = _asarray_validated(x0, as_inexact=True)\r\n return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)\r\n", "\"\"\"Tests for functions in special_matrices.py.\"\"\"\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport numpy as np\r\nfrom numpy import arange, add, array, eye, copy, sqrt\r\nfrom numpy.testing import (TestCase, run_module_suite, assert_raises,\r\n assert_equal, assert_array_equal, assert_array_almost_equal,\r\n assert_allclose)\r\n\r\nfrom scipy._lib.six import xrange\r\n\r\nfrom scipy import fftpack\r\nfrom scipy.special import comb\r\nfrom scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie,\r\n companion, tri, triu, tril, kron, block_diag,\r\n helmert, hilbert, invhilbert, pascal, invpascal, dft)\r\nfrom numpy.linalg import cond\r\n\r\n\r\ndef get_mat(n):\r\n data = arange(n)\r\n data = add.outer(data,data)\r\n return data\r\n\r\n\r\nclass TestTri(TestCase):\r\n def test_basic(self):\r\n assert_equal(tri(4),array([[1,0,0,0],\r\n [1,1,0,0],\r\n [1,1,1,0],\r\n [1,1,1,1]]))\r\n assert_equal(tri(4,dtype='f'),array([[1,0,0,0],\r\n [1,1,0,0],\r\n [1,1,1,0],\r\n [1,1,1,1]],'f'))\r\n\r\n def test_diag(self):\r\n assert_equal(tri(4,k=1),array([[1,1,0,0],\r\n [1,1,1,0],\r\n [1,1,1,1],\r\n [1,1,1,1]]))\r\n assert_equal(tri(4,k=-1),array([[0,0,0,0],\r\n [1,0,0,0],\r\n [1,1,0,0],\r\n [1,1,1,0]]))\r\n\r\n def test_2d(self):\r\n assert_equal(tri(4,3),array([[1,0,0],\r\n [1,1,0],\r\n [1,1,1],\r\n [1,1,1]]))\r\n assert_equal(tri(3,4),array([[1,0,0,0],\r\n [1,1,0,0],\r\n [1,1,1,0]]))\r\n\r\n def test_diag2d(self):\r\n assert_equal(tri(3,4,k=2),array([[1,1,1,0],\r\n [1,1,1,1],\r\n [1,1,1,1]]))\r\n assert_equal(tri(4,3,k=-2),array([[0,0,0],\r\n [0,0,0],\r\n [1,0,0],\r\n [1,1,0]]))\r\n\r\n\r\nclass TestTril(TestCase):\r\n def test_basic(self):\r\n a = (100*get_mat(5)).astype('l')\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(k+1,5):\r\n b[k,l] = 0\r\n assert_equal(tril(a),b)\r\n\r\n def test_diag(self):\r\n a = (100*get_mat(5)).astype('f')\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(k+3,5):\r\n b[k,l] = 0\r\n assert_equal(tril(a,k=2),b)\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(max((k-1,0)),5):\r\n b[k,l] = 0\r\n assert_equal(tril(a,k=-2),b)\r\n\r\n\r\nclass TestTriu(TestCase):\r\n def test_basic(self):\r\n a = (100*get_mat(5)).astype('l')\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(k+1,5):\r\n b[l,k] = 0\r\n assert_equal(triu(a),b)\r\n\r\n def test_diag(self):\r\n a = (100*get_mat(5)).astype('f')\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(max((k-1,0)),5):\r\n b[l,k] = 0\r\n assert_equal(triu(a,k=2),b)\r\n b = a.copy()\r\n for k in range(5):\r\n for l in range(k+3,5):\r\n b[l,k] = 0\r\n assert_equal(triu(a,k=-2),b)\r\n\r\n\r\nclass TestToeplitz(TestCase):\r\n\r\n def test_basic(self):\r\n y = toeplitz([1,2,3])\r\n assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]])\r\n y = toeplitz([1,2,3],[1,4,5])\r\n assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]])\r\n\r\n def test_complex_01(self):\r\n data = (1.0 + arange(3.0)) * (1.0 + 1.0j)\r\n x = copy(data)\r\n t = toeplitz(x)\r\n # Calling toeplitz should not change x.\r\n assert_array_equal(x, data)\r\n # According to the docstring, x should be the first column of t.\r\n col0 = t[:,0]\r\n assert_array_equal(col0, data)\r\n assert_array_equal(t[0,1:], data[1:].conj())\r\n\r\n def test_scalar_00(self):\r\n \"\"\"Scalar arguments still produce a 2D array.\"\"\"\r\n t = toeplitz(10)\r\n assert_array_equal(t, [[10]])\r\n t = toeplitz(10, 20)\r\n assert_array_equal(t, [[10]])\r\n\r\n def test_scalar_01(self):\r\n c = array([1,2,3])\r\n t = toeplitz(c, 1)\r\n assert_array_equal(t, [[1],[2],[3]])\r\n\r\n def test_scalar_02(self):\r\n c = array([1,2,3])\r\n t = toeplitz(c, array(1))\r\n assert_array_equal(t, [[1],[2],[3]])\r\n\r\n def test_scalar_03(self):\r\n c = array([1,2,3])\r\n t = toeplitz(c, array([1]))\r\n assert_array_equal(t, [[1],[2],[3]])\r\n\r\n def test_scalar_04(self):\r\n r = array([10,2,3])\r\n t = toeplitz(1, r)\r\n assert_array_equal(t, [[1,2,3]])\r\n\r\n\r\nclass TestHankel(TestCase):\r\n def test_basic(self):\r\n y = hankel([1,2,3])\r\n assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]])\r\n y = hankel([1,2,3], [3,4,5])\r\n assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]])\r\n\r\n\r\nclass TestCirculant(TestCase):\r\n def test_basic(self):\r\n y = circulant([1,2,3])\r\n assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]])\r\n\r\n\r\nclass TestHadamard(TestCase):\r\n\r\n def test_basic(self):\r\n\r\n y = hadamard(1)\r\n assert_array_equal(y, [[1]])\r\n\r\n y = hadamard(2, dtype=float)\r\n assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])\r\n\r\n y = hadamard(4)\r\n assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]])\r\n\r\n assert_raises(ValueError, hadamard, 0)\r\n assert_raises(ValueError, hadamard, 5)\r\n\r\n\r\nclass TestLeslie(TestCase):\r\n\r\n def test_bad_shapes(self):\r\n assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5])\r\n assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]])\r\n assert_raises(ValueError, leslie, [1,2], [1,2])\r\n assert_raises(ValueError, leslie, [1], [])\r\n\r\n def test_basic(self):\r\n a = leslie([1, 2, 3], [0.25, 0.5])\r\n expected = array([\r\n [1.0, 2.0, 3.0],\r\n [0.25, 0.0, 0.0],\r\n [0.0, 0.5, 0.0]])\r\n assert_array_equal(a, expected)\r\n\r\n\r\nclass TestCompanion(TestCase):\r\n\r\n def test_bad_shapes(self):\r\n assert_raises(ValueError, companion, [[1,1],[2,2]])\r\n assert_raises(ValueError, companion, [0,4,5])\r\n assert_raises(ValueError, companion, [1])\r\n assert_raises(ValueError, companion, [])\r\n\r\n def test_basic(self):\r\n c = companion([1, 2, 3])\r\n expected = array([\r\n [-2.0, -3.0],\r\n [1.0, 0.0]])\r\n assert_array_equal(c, expected)\r\n\r\n c = companion([2.0, 5.0, -10.0])\r\n expected = array([\r\n [-2.5, 5.0],\r\n [1.0, 0.0]])\r\n assert_array_equal(c, expected)\r\n\r\n\r\nclass TestBlockDiag:\r\n def test_basic(self):\r\n x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]])\r\n assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],\r\n [0, 1, 0, 0, 0, 0, 0],\r\n [0, 0, 1, 2, 0, 0, 0],\r\n [0, 0, 3, 4, 0, 0, 0],\r\n [0, 0, 5, 6, 0, 0, 0],\r\n [0, 0, 0, 0, 1, 2, 3]])\r\n\r\n def test_dtype(self):\r\n x = block_diag([[1.5]])\r\n assert_equal(x.dtype, float)\r\n\r\n x = block_diag([[True]])\r\n assert_equal(x.dtype, bool)\r\n\r\n def test_mixed_dtypes(self):\r\n actual = block_diag([[1]], [[1j]])\r\n desired = np.array([[1, 0], [0, 1j]])\r\n assert_array_equal(actual, desired)\r\n\r\n def test_scalar_and_1d_args(self):\r\n a = block_diag(1)\r\n assert_equal(a.shape, (1,1))\r\n assert_array_equal(a, [[1]])\r\n\r\n a = block_diag([2,3], 4)\r\n assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])\r\n\r\n def test_bad_arg(self):\r\n assert_raises(ValueError, block_diag, [[[1]]])\r\n\r\n def test_no_args(self):\r\n a = block_diag()\r\n assert_equal(a.ndim, 2)\r\n assert_equal(a.nbytes, 0)\r\n \r\n def test_empty_matrix_arg(self):\r\n # regression test for gh-4596: check the shape of the result for empty matrix inputs\r\n a = block_diag([[1, 0], [0, 1]],\r\n [],\r\n [[2, 3], [4, 5], [6, 7]])\r\n assert_array_equal(a, [[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, 2, 3],\r\n [0, 0, 4, 5],\r\n [0, 0, 6, 7]])\r\n\r\n\r\nclass TestKron:\r\n\r\n def test_basic(self):\r\n\r\n a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))\r\n assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],\r\n [3, 3, 3, 4, 4, 4]]))\r\n\r\n m1 = array([[1, 2], [3, 4]])\r\n m2 = array([[10], [11]])\r\n a = kron(m1, m2)\r\n expected = array([[10, 20],\r\n [11, 22],\r\n [30, 40],\r\n [33, 44]])\r\n assert_array_equal(a, expected)\r\n\r\n\r\nclass TestHelmert(TestCase):\r\n\r\n def test_orthogonality(self):\r\n for n in range(1, 7):\r\n H = helmert(n, full=True)\r\n I = np.eye(n)\r\n assert_allclose(H.dot(H.T), I, atol=1e-12)\r\n assert_allclose(H.T.dot(H), I, atol=1e-12)\r\n\r\n def test_subspace(self):\r\n for n in range(2, 7):\r\n H_full = helmert(n, full=True)\r\n H_partial = helmert(n)\r\n for U in H_full[1:, :].T, H_partial.T:\r\n C = np.eye(n) - np.ones((n, n)) / n\r\n assert_allclose(U.dot(U.T), C)\r\n assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)\r\n\r\n\r\nclass TestHilbert(TestCase):\r\n\r\n def test_basic(self):\r\n h3 = array([[1.0, 1/2., 1/3.],\r\n [1/2., 1/3., 1/4.],\r\n [1/3., 1/4., 1/5.]])\r\n assert_array_almost_equal(hilbert(3), h3)\r\n\r\n assert_array_equal(hilbert(1), [[1.0]])\r\n\r\n h0 = hilbert(0)\r\n assert_equal(h0.shape, (0,0))\r\n\r\n\r\nclass TestInvHilbert(TestCase):\r\n\r\n def test_basic(self):\r\n invh1 = array([[1]])\r\n assert_array_equal(invhilbert(1, exact=True), invh1)\r\n assert_array_equal(invhilbert(1), invh1)\r\n\r\n invh2 = array([[4, -6],\r\n [-6, 12]])\r\n assert_array_equal(invhilbert(2, exact=True), invh2)\r\n assert_array_almost_equal(invhilbert(2), invh2)\r\n\r\n invh3 = array([[9, -36, 30],\r\n [-36, 192, -180],\r\n [30, -180, 180]])\r\n assert_array_equal(invhilbert(3, exact=True), invh3)\r\n assert_array_almost_equal(invhilbert(3), invh3)\r\n\r\n invh4 = array([[16, -120, 240, -140],\r\n [-120, 1200, -2700, 1680],\r\n [240, -2700, 6480, -4200],\r\n [-140, 1680, -4200, 2800]])\r\n assert_array_equal(invhilbert(4, exact=True), invh4)\r\n assert_array_almost_equal(invhilbert(4), invh4)\r\n\r\n invh5 = array([[25, -300, 1050, -1400, 630],\r\n [-300, 4800, -18900, 26880, -12600],\r\n [1050, -18900, 79380, -117600, 56700],\r\n [-1400, 26880, -117600, 179200, -88200],\r\n [630, -12600, 56700, -88200, 44100]])\r\n assert_array_equal(invhilbert(5, exact=True), invh5)\r\n assert_array_almost_equal(invhilbert(5), invh5)\r\n\r\n invh17 = array([\r\n [289, -41616, 1976760, -46124400, 629598060, -5540462928,\r\n 33374693352, -143034400080, 446982500250, -1033026222800,\r\n 1774926873720, -2258997839280, 2099709530100, -1384423866000,\r\n 613101997800, -163493866080, 19835652870],\r\n [-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512,\r\n -8410422724704, 36616806420480, -115857864064800, 270465047424000,\r\n -468580694662080, 600545887119360, -561522320049600, 372133135180800,\r\n -165537539406000, 44316454993920, -5395297580640],\r\n [1976760, -426980160, 24337869120, -630981792000, 9228108708000,\r\n -85267724461920, 532660105897920, -2348052711713280, 7504429831470000,\r\n -17664748409880000, 30818191841236800, -39732544853164800,\r\n 37341234283298400, -24857330514030000, 11100752642520000,\r\n -2982128117299200, 364182586693200],\r\n [-46124400, 10627061760, -630981792000, 16826181120000,\r\n -251209625940000, 2358021022156800, -14914482965141760,\r\n 66409571644416000, -214015221119700000, 507295338950400000,\r\n -890303319857952000, 1153715376477081600, -1089119333262870000,\r\n 727848632044800000, -326170262829600000, 87894302404608000,\r\n -10763618673376800],\r\n [629598060, -151103534400, 9228108708000,\r\n -251209625940000, 3810012660090000, -36210360321495360,\r\n 231343968720664800, -1038687206500944000, 3370739732635275000,\r\n -8037460526495400000, 14178080368737885600, -18454939322943942000,\r\n 17489975175339030000, -11728977435138600000, 5272370630081100000,\r\n -1424711708039692800, 174908803442373000],\r\n [-5540462928, 1367702848512, -85267724461920, 2358021022156800,\r\n -36210360321495360, 347619459086355456, -2239409617216035264,\r\n 10124803292907663360, -33052510749726468000, 79217210949138662400,\r\n -140362995650505067440, 183420385176741672960, -174433352415381259200,\r\n 117339159519533952000, -52892422160973595200, 14328529177999196160,\r\n -1763080738699119840],\r\n [33374693352, -8410422724704, 532660105897920,\r\n -14914482965141760, 231343968720664800, -2239409617216035264,\r\n 14527452132196331328, -66072377044391477760, 216799987176909536400,\r\n -521925895055522958000, 928414062734059661760, -1217424500995626443520,\r\n 1161358898976091015200, -783401860847777371200, 354015418167362952000,\r\n -96120549902411274240, 11851820521255194480],\r\n [-143034400080, 36616806420480, -2348052711713280, 66409571644416000,\r\n -1038687206500944000, 10124803292907663360, -66072377044391477760,\r\n 302045152202932469760, -995510145200094810000, 2405996923185123840000,\r\n -4294704507885446054400, 5649058909023744614400,\r\n -5403874060541811254400, 3654352703663101440000,\r\n -1655137020003255360000, 450325202737117593600, -55630994283442749600],\r\n [446982500250, -115857864064800, 7504429831470000, -214015221119700000,\r\n 3370739732635275000, -33052510749726468000, 216799987176909536400,\r\n -995510145200094810000, 3293967392206196062500,\r\n -7988661659013106500000, 14303908928401362270000,\r\n -18866974090684772052000, 18093328327706957325000,\r\n -12263364009096700500000, 5565847995255512250000,\r\n -1517208935002984080000, 187754605706619279900],\r\n [-1033026222800, 270465047424000, -17664748409880000,\r\n 507295338950400000, -8037460526495400000, 79217210949138662400,\r\n -521925895055522958000, 2405996923185123840000,\r\n -7988661659013106500000, 19434404971634224000000,\r\n -34894474126569249192000, 46141453390504792320000,\r\n -44349976506971935800000, 30121928988527376000000,\r\n -13697025107665828500000, 3740200989399948902400,\r\n -463591619028689580000],\r\n [1774926873720, -468580694662080,\r\n 30818191841236800, -890303319857952000, 14178080368737885600,\r\n -140362995650505067440, 928414062734059661760, -4294704507885446054400,\r\n 14303908928401362270000, -34894474126569249192000,\r\n 62810053427824648545600, -83243376594051600326400,\r\n 80177044485212743068000, -54558343880470209780000,\r\n 24851882355348879230400, -6797096028813368678400, 843736746632215035600],\r\n [-2258997839280, 600545887119360, -39732544853164800,\r\n 1153715376477081600, -18454939322943942000, 183420385176741672960,\r\n -1217424500995626443520, 5649058909023744614400,\r\n -18866974090684772052000, 46141453390504792320000,\r\n -83243376594051600326400, 110552468520163390156800,\r\n -106681852579497947388000, 72720410752415168870400,\r\n -33177973900974346080000, 9087761081682520473600,\r\n -1129631016152221783200],\r\n [2099709530100, -561522320049600, 37341234283298400,\r\n -1089119333262870000, 17489975175339030000, -174433352415381259200,\r\n 1161358898976091015200, -5403874060541811254400,\r\n 18093328327706957325000, -44349976506971935800000,\r\n 80177044485212743068000, -106681852579497947388000,\r\n 103125790826848015808400, -70409051543137015800000,\r\n 32171029219823375700000, -8824053728865840192000,\r\n 1098252376814660067000],\r\n [-1384423866000, 372133135180800,\r\n -24857330514030000, 727848632044800000, -11728977435138600000,\r\n 117339159519533952000, -783401860847777371200, 3654352703663101440000,\r\n -12263364009096700500000, 30121928988527376000000,\r\n -54558343880470209780000, 72720410752415168870400,\r\n -70409051543137015800000, 48142941226076592000000,\r\n -22027500987368499000000, 6049545098753157120000,\r\n -753830033789944188000],\r\n [613101997800, -165537539406000,\r\n 11100752642520000, -326170262829600000, 5272370630081100000,\r\n -52892422160973595200, 354015418167362952000, -1655137020003255360000,\r\n 5565847995255512250000, -13697025107665828500000,\r\n 24851882355348879230400, -33177973900974346080000,\r\n 32171029219823375700000, -22027500987368499000000,\r\n 10091416708498869000000, -2774765838662800128000, 346146444087219270000],\r\n [-163493866080, 44316454993920, -2982128117299200, 87894302404608000,\r\n -1424711708039692800, 14328529177999196160, -96120549902411274240,\r\n 450325202737117593600, -1517208935002984080000, 3740200989399948902400,\r\n -6797096028813368678400, 9087761081682520473600,\r\n -8824053728865840192000, 6049545098753157120000,\r\n -2774765838662800128000, 763806510427609497600, -95382575704033754400],\r\n [19835652870, -5395297580640, 364182586693200, -10763618673376800,\r\n 174908803442373000, -1763080738699119840, 11851820521255194480,\r\n -55630994283442749600, 187754605706619279900, -463591619028689580000,\r\n 843736746632215035600, -1129631016152221783200, 1098252376814660067000,\r\n -753830033789944188000, 346146444087219270000, -95382575704033754400,\r\n 11922821963004219300]\r\n ])\r\n assert_array_equal(invhilbert(17, exact=True), invh17)\r\n assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)\r\n\r\n def test_inverse(self):\r\n for n in xrange(1, 10):\r\n a = hilbert(n)\r\n b = invhilbert(n)\r\n # The Hilbert matrix is increasingly badly conditioned,\r\n # so take that into account in the test\r\n c = cond(a)\r\n assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)\r\n\r\n\r\nclass TestPascal(TestCase):\r\n\r\n cases = [\r\n (1, array([[1]]), array([[1]])),\r\n (2, array([[1, 1],\r\n [1, 2]]),\r\n array([[1, 0],\r\n [1, 1]])),\r\n (3, array([[1, 1, 1],\r\n [1, 2, 3],\r\n [1, 3, 6]]),\r\n array([[1, 0, 0],\r\n [1, 1, 0],\r\n [1, 2, 1]])),\r\n (4, array([[1, 1, 1, 1],\r\n [1, 2, 3, 4],\r\n [1, 3, 6, 10],\r\n [1, 4, 10, 20]]),\r\n array([[1, 0, 0, 0],\r\n [1, 1, 0, 0],\r\n [1, 2, 1, 0],\r\n [1, 3, 3, 1]])),\r\n ]\r\n\r\n def check_case(self, n, sym, low):\r\n assert_array_equal(pascal(n), sym)\r\n assert_array_equal(pascal(n, kind='lower'), low)\r\n assert_array_equal(pascal(n, kind='upper'), low.T)\r\n assert_array_almost_equal(pascal(n, exact=False), sym)\r\n assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)\r\n assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)\r\n\r\n def test_cases(self):\r\n for n, sym, low in self.cases:\r\n self.check_case(n, sym, low)\r\n\r\n def test_big(self):\r\n p = pascal(50)\r\n assert_equal(p[-1, -1], comb(98, 49, exact=True))\r\n\r\n def test_threshold(self):\r\n # Regression test. An early version of `pascal` returned an\r\n # array of type np.uint64 for n=35, but that data type is too small\r\n # to hold p[-1, -1]. The second assert_equal below would fail\r\n # because p[-1, -1] overflowed.\r\n p = pascal(34)\r\n assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg=\"n = 34\")\r\n p = pascal(35)\r\n assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg=\"n = 35\")\r\n\r\n\r\ndef test_invpascal():\r\n\r\n def check_invpascal(n, kind, exact):\r\n ip = invpascal(n, kind=kind, exact=exact)\r\n p = pascal(n, kind=kind, exact=exact)\r\n # Matrix-multiply ip and p, and check that we get the identity matrix.\r\n # We can't use the simple expression e = ip.dot(p), because when\r\n # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is\r\n # np.int64. The product of those dtypes is np.float64, which loses\r\n # precision when n is greater than 18. Instead we'll cast both to\r\n # object arrays, and then multiply.\r\n e = ip.astype(object).dot(p.astype(object))\r\n assert_array_equal(e, eye(n), err_msg=\"n=%d kind=%r exact=%r\" %\r\n (n, kind, exact))\r\n\r\n kinds = ['symmetric', 'lower', 'upper']\r\n\r\n ns = [1, 2, 5, 18]\r\n for n in ns:\r\n for kind in kinds:\r\n for exact in [True, False]:\r\n yield check_invpascal, n, kind, exact\r\n\r\n ns = [19, 34, 35, 50]\r\n for n in ns:\r\n for kind in kinds:\r\n yield check_invpascal, n, kind, True\r\n\r\n\r\ndef test_dft():\r\n m = dft(2)\r\n expected = array([[1.0, 1.0], [1.0, -1.0]])\r\n yield (assert_array_almost_equal, m, expected)\r\n m = dft(2, scale='n')\r\n yield (assert_array_almost_equal, m, expected/2.0)\r\n m = dft(2, scale='sqrtn')\r\n yield (assert_array_almost_equal, m, expected/sqrt(2.0))\r\n\r\n x = array([0, 1, 2, 3, 4, 5, 0, 1])\r\n m = dft(8)\r\n mx = m.dot(x)\r\n fx = fftpack.fft(x)\r\n yield (assert_array_almost_equal, mx, fx)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\"Matrix equation solver routines\"\"\"\r\n\r\n# Author: Jeffrey Armstrong <[email protected]>\r\n# February 24, 2012\r\n\r\n# Modified: Chad Fulton <[email protected]>\r\n# June 19, 2014\r\n\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nimport numpy as np\r\nfrom numpy.linalg import inv, LinAlgError\r\n\r\nfrom .basic import solve\r\nfrom .lapack import get_lapack_funcs\r\nfrom .decomp_schur import schur\r\nfrom .special_matrices import kron\r\n\r\n__all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov',\r\n 'solve_continuous_are', 'solve_discrete_are']\r\n\r\n\r\ndef solve_sylvester(a, b, q):\r\n \"\"\"\r\n Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`.\r\n\r\n Parameters\r\n ----------\r\n a : (M, M) array_like\r\n Leading matrix of the Sylvester equation\r\n b : (N, N) array_like\r\n Trailing matrix of the Sylvester equation\r\n q : (M, N) array_like\r\n Right-hand side\r\n\r\n Returns\r\n -------\r\n x : (M, N) ndarray\r\n The solution to the Sylvester equation.\r\n\r\n Raises\r\n ------\r\n LinAlgError\r\n If solution was not found\r\n\r\n Notes\r\n -----\r\n Computes a solution to the Sylvester matrix equation via the Bartels-\r\n Stewart algorithm. The A and B matrices first undergo Schur\r\n decompositions. The resulting matrices are used to construct an\r\n alternative Sylvester equation (``RY + YS^T = F``) where the R and S\r\n matrices are in quasi-triangular form (or, when R, S or F are complex,\r\n triangular form). The simplified equation is then solved using\r\n ``*TRSYL`` from LAPACK directly.\r\n\r\n .. versionadded:: 0.11.0\r\n\r\n \"\"\"\r\n\r\n # Compute the Schur decomp form of a\r\n r, u = schur(a, output='real')\r\n\r\n # Compute the Schur decomp of b\r\n s, v = schur(b.conj().transpose(), output='real')\r\n\r\n # Construct f = u'*q*v\r\n f = np.dot(np.dot(u.conj().transpose(), q), v)\r\n\r\n # Call the Sylvester equation solver\r\n trsyl, = get_lapack_funcs(('trsyl',), (r, s, f))\r\n if trsyl is None:\r\n raise RuntimeError('LAPACK implementation does not contain a proper '\r\n 'Sylvester equation solver (TRSYL)')\r\n y, scale, info = trsyl(r, s, f, tranb='C')\r\n\r\n y = scale*y\r\n\r\n if info < 0:\r\n raise LinAlgError(\"Illegal value encountered in \"\r\n \"the %d term\" % (-info,))\r\n\r\n return np.dot(np.dot(u, y), v.conj().transpose())\r\n\r\n\r\ndef solve_lyapunov(a, q):\r\n \"\"\"\r\n Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`.\r\n\r\n Uses the Bartels-Stewart algorithm to find :math:`X`.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n A square matrix\r\n\r\n q : array_like\r\n Right-hand side square matrix\r\n\r\n Returns\r\n -------\r\n x : array_like\r\n Solution to the continuous Lyapunov equation\r\n\r\n See Also\r\n --------\r\n solve_sylvester : computes the solution to the Sylvester equation\r\n\r\n Notes\r\n -----\r\n Because the continuous Lyapunov equation is just a special form of the\r\n Sylvester equation, this solver relies entirely on solve_sylvester for a\r\n solution.\r\n\r\n .. versionadded:: 0.11.0\r\n\r\n \"\"\"\r\n\r\n return solve_sylvester(a, a.conj().transpose(), q)\r\n\r\n\r\ndef _solve_discrete_lyapunov_direct(a, q):\r\n \"\"\"\r\n Solves the discrete Lyapunov equation directly.\r\n\r\n This function is called by the `solve_discrete_lyapunov` function with\r\n `method=direct`. It is not supposed to be called directly.\r\n \"\"\"\r\n\r\n lhs = kron(a, a.conj())\r\n lhs = np.eye(lhs.shape[0]) - lhs\r\n x = solve(lhs, q.flatten())\r\n\r\n return np.reshape(x, q.shape)\r\n\r\n\r\ndef _solve_discrete_lyapunov_bilinear(a, q):\r\n \"\"\"\r\n Solves the discrete Lyapunov equation using a bilinear transformation.\r\n\r\n This function is called by the `solve_discrete_lyapunov` function with\r\n `method=bilinear`. It is not supposed to be called directly.\r\n \"\"\"\r\n eye = np.eye(a.shape[0])\r\n aH = a.conj().transpose()\r\n aHI_inv = inv(aH + eye)\r\n b = np.dot(aH - eye, aHI_inv)\r\n c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv)\r\n return solve_lyapunov(b.conj().transpose(), -c)\r\n\r\n\r\ndef solve_discrete_lyapunov(a, q, method=None):\r\n \"\"\"\r\n Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`.\r\n\r\n Parameters\r\n ----------\r\n a, q : (M, M) array_like\r\n Square matrices corresponding to A and Q in the equation\r\n above respectively. Must have the same shape.\r\n\r\n method : {'direct', 'bilinear'}, optional\r\n Type of solver.\r\n\r\n If not given, chosen to be ``direct`` if ``M`` is less than 10 and\r\n ``bilinear`` otherwise.\r\n\r\n Returns\r\n -------\r\n x : ndarray\r\n Solution to the discrete Lyapunov equation\r\n\r\n See Also\r\n --------\r\n solve_lyapunov : computes the solution to the continuous Lyapunov equation\r\n\r\n Notes\r\n -----\r\n This section describes the available solvers that can be selected by the\r\n 'method' parameter. The default method is *direct* if ``M`` is less than 10\r\n and ``bilinear`` otherwise.\r\n\r\n Method *direct* uses a direct analytical solution to the discrete Lyapunov\r\n equation. The algorithm is given in, for example, [1]_. However it requires\r\n the linear solution of a system with dimension :math:`M^2` so that\r\n performance degrades rapidly for even moderately sized matrices.\r\n\r\n Method *bilinear* uses a bilinear transformation to convert the discrete\r\n Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)`\r\n where :math:`B=(A-I)(A+I)^{-1}` and\r\n :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be\r\n efficiently solved since it is a special case of a Sylvester equation.\r\n The transformation algorithm is from Popov (1964) as described in [2]_.\r\n\r\n .. versionadded:: 0.11.0\r\n\r\n References\r\n ----------\r\n .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton\r\n University Press, 1994. 265. Print.\r\n http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis\r\n .. [2] Gajic, Z., and M.T.J. Qureshi. 2008.\r\n Lyapunov Matrix Equation in System Stability and Control.\r\n Dover Books on Engineering Series. Dover Publications.\r\n\r\n \"\"\"\r\n a = np.asarray(a)\r\n q = np.asarray(q)\r\n if method is None:\r\n # Select automatically based on size of matrices\r\n if a.shape[0] >= 10:\r\n method = 'bilinear'\r\n else:\r\n method = 'direct'\r\n\r\n meth = method.lower()\r\n\r\n if meth == 'direct':\r\n x = _solve_discrete_lyapunov_direct(a, q)\r\n elif meth == 'bilinear':\r\n x = _solve_discrete_lyapunov_bilinear(a, q)\r\n else:\r\n raise ValueError('Unknown solver %s' % method)\r\n\r\n return x\r\n\r\n\r\ndef solve_continuous_are(a, b, q, r):\r\n \"\"\"\r\n Solves the continuous algebraic Riccati equation (CARE).\r\n\r\n The CARE is defined as\r\n\r\n .. math::\r\n (A'X + XA - XBR^-1B'X+Q=0)\r\n\r\n It is solved directly using a Schur decomposition method.\r\n\r\n Parameters\r\n ----------\r\n a : (M, M) array_like\r\n Input\r\n b : (M, N) array_like\r\n Input\r\n q : (M, M) array_like\r\n Input\r\n r : (N, N) array_like\r\n Non-singular, square matrix\r\n\r\n Returns\r\n -------\r\n x : (M, M) ndarray\r\n Solution to the continuous algebraic Riccati equation\r\n\r\n See Also\r\n --------\r\n solve_discrete_are : Solves the discrete algebraic Riccati equation\r\n\r\n Notes\r\n -----\r\n Method taken from:\r\n Laub, \"A Schur Method for Solving Algebraic Riccati Equations.\"\r\n U.S. Energy Research and Development Agency under contract\r\n ERDA-E(49-18)-2087.\r\n http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf\r\n\r\n .. versionadded:: 0.11.0\r\n\r\n \"\"\"\r\n\r\n try:\r\n g = inv(r)\r\n except LinAlgError:\r\n raise ValueError('Matrix R in the algebraic Riccati equation solver '\r\n 'is ill-conditioned')\r\n\r\n g = np.dot(np.dot(b, g), b.conj().transpose())\r\n\r\n z11 = a\r\n z12 = -1.0*g\r\n z21 = -1.0*q\r\n z22 = -1.0*a.conj().transpose()\r\n\r\n z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))\r\n\r\n # Note: we need to sort the upper left of s to have negative real parts,\r\n # while the lower right is positive real components (Laub, p. 7)\r\n s, u, _ = schur(z, sort='lhp')\r\n\r\n (m, n) = u.shape\r\n\r\n u11 = u[0:m//2, 0:n//2]\r\n u21 = u[m//2:m, 0:n//2]\r\n u11i = inv(u11)\r\n\r\n return np.dot(u21, u11i)\r\n\r\n\r\ndef solve_discrete_are(a, b, q, r):\r\n \"\"\"\r\n Solves the discrete algebraic Riccati equation (DARE).\r\n\r\n The DARE is defined as\r\n\r\n .. math::\r\n X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q\r\n\r\n It is solved directly using a Schur decomposition method.\r\n\r\n Parameters\r\n ----------\r\n a : (M, M) array_like\r\n Non-singular, square matrix\r\n b : (M, N) array_like\r\n Input\r\n q : (M, M) array_like\r\n Input\r\n r : (N, N) array_like\r\n Non-singular, square matrix\r\n\r\n Returns\r\n -------\r\n x : ndarray\r\n Solution to the continuous Lyapunov equation\r\n\r\n See Also\r\n --------\r\n solve_continuous_are : Solves the continuous algebraic Riccati equation\r\n\r\n Notes\r\n -----\r\n Method taken from:\r\n Laub, \"A Schur Method for Solving Algebraic Riccati Equations.\"\r\n U.S. Energy Research and Development Agency under contract\r\n ERDA-E(49-18)-2087.\r\n http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf\r\n\r\n .. versionadded:: 0.11.0\r\n\r\n \"\"\"\r\n\r\n try:\r\n g = inv(r)\r\n except LinAlgError:\r\n raise ValueError('Matrix R in the algebraic Riccati equation solver '\r\n 'is ill-conditioned')\r\n\r\n g = np.dot(np.dot(b, g), b.conj().transpose())\r\n\r\n try:\r\n ait = inv(a).conj().transpose() # ait is \"A inverse transpose\"\r\n except LinAlgError:\r\n raise ValueError('Matrix A in the algebraic Riccati equation solver '\r\n 'is ill-conditioned')\r\n\r\n z11 = a+np.dot(np.dot(g, ait), q)\r\n z12 = -1.0*np.dot(g, ait)\r\n z21 = -1.0*np.dot(ait, q)\r\n z22 = ait\r\n\r\n z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))\r\n\r\n # Note: we need to sort the upper left of s to lie within the unit circle,\r\n # while the lower right is outside (Laub, p. 7)\r\n s, u, _ = schur(z, sort='iuc')\r\n\r\n (m, n) = u.shape\r\n\r\n u11 = u[0:m//2, 0:n//2]\r\n u21 = u[m//2:m, 0:n//2]\r\n u11i = inv(u11)\r\n\r\n return np.dot(u21, u11i)\r\n", "\"\"\":mod:`numpy.ma..mrecords`\r\n\r\nDefines the equivalent of :class:`numpy.recarrays` for masked arrays,\r\nwhere fields can be accessed as attributes.\r\nNote that :class:`numpy.ma.MaskedArray` already supports structured datatypes\r\nand the masking of individual fields.\r\n\r\n.. moduleauthor:: Pierre Gerard-Marchant\r\n\r\n\"\"\"\r\nfrom __future__ import division, absolute_import, print_function\r\n\r\n# We should make sure that no field is called '_mask','mask','_fieldmask',\r\n# or whatever restricted keywords. An idea would be to no bother in the\r\n# first place, and then rename the invalid fields with a trailing\r\n# underscore. Maybe we could just overload the parser function ?\r\n\r\nimport sys\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport numpy.core.numerictypes as ntypes\r\nfrom numpy.compat import basestring\r\nfrom numpy import (\r\n bool_, dtype, ndarray, recarray, array as narray\r\n )\r\nfrom numpy.core.records import (\r\n fromarrays as recfromarrays, fromrecords as recfromrecords\r\n )\r\n\r\n_byteorderconv = np.core.records._byteorderconv\r\n_typestr = ntypes._typestr\r\n\r\nimport numpy.ma as ma\r\nfrom numpy.ma import (\r\n MAError, MaskedArray, masked, nomask, masked_array, getdata,\r\n getmaskarray, filled\r\n )\r\n\r\n_check_fill_value = ma.core._check_fill_value\r\n\r\n\r\n__all__ = [\r\n 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',\r\n 'fromtextfile', 'addfield',\r\n ]\r\n\r\nreserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']\r\n\r\n\r\ndef _getformats(data):\r\n \"\"\"\r\n Returns the formats of arrays in arraylist as a comma-separated string.\r\n\r\n \"\"\"\r\n if hasattr(data, 'dtype'):\r\n return \",\".join([desc[1] for desc in data.dtype.descr])\r\n\r\n formats = ''\r\n for obj in data:\r\n obj = np.asarray(obj)\r\n formats += _typestr[obj.dtype.type]\r\n if issubclass(obj.dtype.type, ntypes.flexible):\r\n formats += repr(obj.itemsize)\r\n formats += ','\r\n return formats[:-1]\r\n\r\n\r\ndef _checknames(descr, names=None):\r\n \"\"\"\r\n Checks that field names ``descr`` are not reserved keywords.\r\n\r\n If this is the case, a default 'f%i' is substituted. If the argument\r\n `names` is not None, updates the field names to valid names.\r\n\r\n \"\"\"\r\n ndescr = len(descr)\r\n default_names = ['f%i' % i for i in range(ndescr)]\r\n if names is None:\r\n new_names = default_names\r\n else:\r\n if isinstance(names, (tuple, list)):\r\n new_names = names\r\n elif isinstance(names, str):\r\n new_names = names.split(',')\r\n else:\r\n raise NameError(\"illegal input names %s\" % repr(names))\r\n nnames = len(new_names)\r\n if nnames < ndescr:\r\n new_names += default_names[nnames:]\r\n ndescr = []\r\n for (n, d, t) in zip(new_names, default_names, descr.descr):\r\n if n in reserved_fields:\r\n if t[0] in reserved_fields:\r\n ndescr.append((d, t[1]))\r\n else:\r\n ndescr.append(t)\r\n else:\r\n ndescr.append((n, t[1]))\r\n return np.dtype(ndescr)\r\n\r\n\r\ndef _get_fieldmask(self):\r\n mdescr = [(n, '|b1') for n in self.dtype.names]\r\n fdmask = np.empty(self.shape, dtype=mdescr)\r\n fdmask.flat = tuple([False] * len(mdescr))\r\n return fdmask\r\n\r\n\r\nclass MaskedRecords(MaskedArray, object):\r\n \"\"\"\r\n\r\n Attributes\r\n ----------\r\n _data : recarray\r\n Underlying data, as a record array.\r\n _mask : boolean array\r\n Mask of the records. A record is masked when all its fields are\r\n masked.\r\n _fieldmask : boolean recarray\r\n Record array of booleans, setting the mask of each individual field\r\n of each record.\r\n _fill_value : record\r\n Filling values for each field.\r\n\r\n \"\"\"\r\n\r\n def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,\r\n formats=None, names=None, titles=None,\r\n byteorder=None, aligned=False,\r\n mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,\r\n copy=False,\r\n **options):\r\n\r\n self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,\r\n strides=strides, formats=formats, names=names,\r\n titles=titles, byteorder=byteorder,\r\n aligned=aligned,)\r\n\r\n mdtype = ma.make_mask_descr(self.dtype)\r\n if mask is nomask or not np.size(mask):\r\n if not keep_mask:\r\n self._mask = tuple([False] * len(mdtype))\r\n else:\r\n mask = np.array(mask, copy=copy)\r\n if mask.shape != self.shape:\r\n (nd, nm) = (self.size, mask.size)\r\n if nm == 1:\r\n mask = np.resize(mask, self.shape)\r\n elif nm == nd:\r\n mask = np.reshape(mask, self.shape)\r\n else:\r\n msg = \"Mask and data not compatible: data size is %i, \" + \\\r\n \"mask size is %i.\"\r\n raise MAError(msg % (nd, nm))\r\n copy = True\r\n if not keep_mask:\r\n self.__setmask__(mask)\r\n self._sharedmask = True\r\n else:\r\n if mask.dtype == mdtype:\r\n _mask = mask\r\n else:\r\n _mask = np.array([tuple([m] * len(mdtype)) for m in mask],\r\n dtype=mdtype)\r\n self._mask = _mask\r\n return self\r\n\r\n def __array_finalize__(self, obj):\r\n # Make sure we have a _fieldmask by default\r\n _mask = getattr(obj, '_mask', None)\r\n if _mask is None:\r\n objmask = getattr(obj, '_mask', nomask)\r\n _dtype = ndarray.__getattribute__(self, 'dtype')\r\n if objmask is nomask:\r\n _mask = ma.make_mask_none(self.shape, dtype=_dtype)\r\n else:\r\n mdescr = ma.make_mask_descr(_dtype)\r\n _mask = narray([tuple([m] * len(mdescr)) for m in objmask],\r\n dtype=mdescr).view(recarray)\r\n # Update some of the attributes\r\n _dict = self.__dict__\r\n _dict.update(_mask=_mask)\r\n self._update_from(obj)\r\n if _dict['_baseclass'] == ndarray:\r\n _dict['_baseclass'] = recarray\r\n return\r\n\r\n def _getdata(self):\r\n \"\"\"\r\n Returns the data as a recarray.\r\n\r\n \"\"\"\r\n return ndarray.view(self, recarray)\r\n\r\n _data = property(fget=_getdata)\r\n\r\n def _getfieldmask(self):\r\n \"\"\"\r\n Alias to mask.\r\n\r\n \"\"\"\r\n return self._mask\r\n\r\n _fieldmask = property(fget=_getfieldmask)\r\n\r\n def __len__(self):\r\n \"\"\"\r\n Returns the length\r\n\r\n \"\"\"\r\n # We have more than one record\r\n if self.ndim:\r\n return len(self._data)\r\n # We have only one record: return the nb of fields\r\n return len(self.dtype)\r\n\r\n def __getattribute__(self, attr):\r\n try:\r\n return object.__getattribute__(self, attr)\r\n except AttributeError:\r\n # attr must be a fieldname\r\n pass\r\n fielddict = ndarray.__getattribute__(self, 'dtype').fields\r\n try:\r\n res = fielddict[attr][:2]\r\n except (TypeError, KeyError):\r\n raise AttributeError(\"record array has no attribute %s\" % attr)\r\n # So far, so good\r\n _localdict = ndarray.__getattribute__(self, '__dict__')\r\n _data = ndarray.view(self, _localdict['_baseclass'])\r\n obj = _data.getfield(*res)\r\n if obj.dtype.fields:\r\n raise NotImplementedError(\"MaskedRecords is currently limited to\"\r\n \"simple records.\")\r\n # Get some special attributes\r\n # Reset the object's mask\r\n hasmasked = False\r\n _mask = _localdict.get('_mask', None)\r\n if _mask is not None:\r\n try:\r\n _mask = _mask[attr]\r\n except IndexError:\r\n # Couldn't find a mask: use the default (nomask)\r\n pass\r\n hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()\r\n if (obj.shape or hasmasked):\r\n obj = obj.view(MaskedArray)\r\n obj._baseclass = ndarray\r\n obj._isfield = True\r\n obj._mask = _mask\r\n # Reset the field values\r\n _fill_value = _localdict.get('_fill_value', None)\r\n if _fill_value is not None:\r\n try:\r\n obj._fill_value = _fill_value[attr]\r\n except ValueError:\r\n obj._fill_value = None\r\n else:\r\n obj = obj.item()\r\n return obj\r\n\r\n def __setattr__(self, attr, val):\r\n \"\"\"\r\n Sets the attribute attr to the value val.\r\n\r\n \"\"\"\r\n # Should we call __setmask__ first ?\r\n if attr in ['mask', 'fieldmask']:\r\n self.__setmask__(val)\r\n return\r\n # Create a shortcut (so that we don't have to call getattr all the time)\r\n _localdict = object.__getattribute__(self, '__dict__')\r\n # Check whether we're creating a new field\r\n newattr = attr not in _localdict\r\n try:\r\n # Is attr a generic attribute ?\r\n ret = object.__setattr__(self, attr, val)\r\n except:\r\n # Not a generic attribute: exit if it's not a valid field\r\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\r\n optinfo = ndarray.__getattribute__(self, '_optinfo') or {}\r\n if not (attr in fielddict or attr in optinfo):\r\n exctype, value = sys.exc_info()[:2]\r\n raise exctype(value)\r\n else:\r\n # Get the list of names\r\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\r\n # Check the attribute\r\n if attr not in fielddict:\r\n return ret\r\n if newattr:\r\n # We just added this one or this setattr worked on an\r\n # internal attribute.\r\n try:\r\n object.__delattr__(self, attr)\r\n except:\r\n return ret\r\n # Let's try to set the field\r\n try:\r\n res = fielddict[attr][:2]\r\n except (TypeError, KeyError):\r\n raise AttributeError(\"record array has no attribute %s\" % attr)\r\n\r\n if val is masked:\r\n _fill_value = _localdict['_fill_value']\r\n if _fill_value is not None:\r\n dval = _localdict['_fill_value'][attr]\r\n else:\r\n dval = val\r\n mval = True\r\n else:\r\n dval = filled(val)\r\n mval = getmaskarray(val)\r\n obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)\r\n _localdict['_mask'].__setitem__(attr, mval)\r\n return obj\r\n\r\n def __getitem__(self, indx):\r\n \"\"\"\r\n Returns all the fields sharing the same fieldname base.\r\n\r\n The fieldname base is either `_data` or `_mask`.\r\n\r\n \"\"\"\r\n _localdict = self.__dict__\r\n _mask = ndarray.__getattribute__(self, '_mask')\r\n _data = ndarray.view(self, _localdict['_baseclass'])\r\n # We want a field\r\n if isinstance(indx, basestring):\r\n # Make sure _sharedmask is True to propagate back to _fieldmask\r\n # Don't use _set_mask, there are some copies being made that\r\n # break propagation Don't force the mask to nomask, that wreaks\r\n # easy masking\r\n obj = _data[indx].view(MaskedArray)\r\n obj._mask = _mask[indx]\r\n obj._sharedmask = True\r\n fval = _localdict['_fill_value']\r\n if fval is not None:\r\n obj._fill_value = fval[indx]\r\n # Force to masked if the mask is True\r\n if not obj.ndim and obj._mask:\r\n return masked\r\n return obj\r\n # We want some elements.\r\n # First, the data.\r\n obj = np.array(_data[indx], copy=False).view(mrecarray)\r\n obj._mask = np.array(_mask[indx], copy=False).view(recarray)\r\n return obj\r\n\r\n def __setitem__(self, indx, value):\r\n \"\"\"\r\n Sets the given record to value.\r\n\r\n \"\"\"\r\n MaskedArray.__setitem__(self, indx, value)\r\n if isinstance(indx, basestring):\r\n self._mask[indx] = ma.getmaskarray(value)\r\n\r\n def __str__(self):\r\n \"\"\"\r\n Calculates the string representation.\r\n\r\n \"\"\"\r\n if self.size > 1:\r\n mstr = [\"(%s)\" % \",\".join([str(i) for i in s])\r\n for s in zip(*[getattr(self, f) for f in self.dtype.names])]\r\n return \"[%s]\" % \", \".join(mstr)\r\n else:\r\n mstr = [\"%s\" % \",\".join([str(i) for i in s])\r\n for s in zip([getattr(self, f) for f in self.dtype.names])]\r\n return \"(%s)\" % \", \".join(mstr)\r\n\r\n def __repr__(self):\r\n \"\"\"\r\n Calculates the repr representation.\r\n\r\n \"\"\"\r\n _names = self.dtype.names\r\n fmt = \"%%%is : %%s\" % (max([len(n) for n in _names]) + 4,)\r\n reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]\r\n reprstr.insert(0, 'masked_records(')\r\n reprstr.extend([fmt % (' fill_value', self.fill_value),\r\n ' )'])\r\n return str(\"\\n\".join(reprstr))\r\n\r\n def view(self, dtype=None, type=None):\r\n \"\"\"\r\n Returns a view of the mrecarray.\r\n\r\n \"\"\"\r\n # OK, basic copy-paste from MaskedArray.view.\r\n if dtype is None:\r\n if type is None:\r\n output = ndarray.view(self)\r\n else:\r\n output = ndarray.view(self, type)\r\n # Here again.\r\n elif type is None:\r\n try:\r\n if issubclass(dtype, ndarray):\r\n output = ndarray.view(self, dtype)\r\n dtype = None\r\n else:\r\n output = ndarray.view(self, dtype)\r\n # OK, there's the change\r\n except TypeError:\r\n dtype = np.dtype(dtype)\r\n # we need to revert to MaskedArray, but keeping the possibility\r\n # of subclasses (eg, TimeSeriesRecords), so we'll force a type\r\n # set to the first parent\r\n if dtype.fields is None:\r\n basetype = self.__class__.__bases__[0]\r\n output = self.__array__().view(dtype, basetype)\r\n output._update_from(self)\r\n else:\r\n output = ndarray.view(self, dtype)\r\n output._fill_value = None\r\n else:\r\n output = ndarray.view(self, dtype, type)\r\n # Update the mask, just like in MaskedArray.view\r\n if (getattr(output, '_mask', nomask) is not nomask):\r\n mdtype = ma.make_mask_descr(output.dtype)\r\n output._mask = self._mask.view(mdtype, ndarray)\r\n output._mask.shape = output.shape\r\n return output\r\n\r\n def harden_mask(self):\r\n \"\"\"\r\n Forces the mask to hard.\r\n\r\n \"\"\"\r\n self._hardmask = True\r\n\r\n def soften_mask(self):\r\n \"\"\"\r\n Forces the mask to soft\r\n\r\n \"\"\"\r\n self._hardmask = False\r\n\r\n def copy(self):\r\n \"\"\"\r\n Returns a copy of the masked record.\r\n\r\n \"\"\"\r\n copied = self._data.copy().view(type(self))\r\n copied._mask = self._mask.copy()\r\n return copied\r\n\r\n def tolist(self, fill_value=None):\r\n \"\"\"\r\n Return the data portion of the array as a list.\r\n\r\n Data items are converted to the nearest compatible Python type.\r\n Masked values are converted to fill_value. If fill_value is None,\r\n the corresponding entries in the output list will be ``None``.\r\n\r\n \"\"\"\r\n if fill_value is not None:\r\n return self.filled(fill_value).tolist()\r\n result = narray(self.filled().tolist(), dtype=object)\r\n mask = narray(self._mask.tolist())\r\n result[mask] = None\r\n return result.tolist()\r\n\r\n def __getstate__(self):\r\n \"\"\"Return the internal state of the masked array.\r\n\r\n This is for pickling.\r\n\r\n \"\"\"\r\n state = (1,\r\n self.shape,\r\n self.dtype,\r\n self.flags.fnc,\r\n self._data.tobytes(),\r\n self._mask.tobytes(),\r\n self._fill_value,\r\n )\r\n return state\r\n\r\n def __setstate__(self, state):\r\n \"\"\"\r\n Restore the internal state of the masked array.\r\n\r\n This is for pickling. ``state`` is typically the output of the\r\n ``__getstate__`` output, and is a 5-tuple:\r\n\r\n - class name\r\n - a tuple giving the shape of the data\r\n - a typecode for the data\r\n - a binary string for the data\r\n - a binary string for the mask.\r\n\r\n \"\"\"\r\n (ver, shp, typ, isf, raw, msk, flv) = state\r\n ndarray.__setstate__(self, (shp, typ, isf, raw))\r\n mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])\r\n self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))\r\n self.fill_value = flv\r\n\r\n def __reduce__(self):\r\n \"\"\"\r\n Return a 3-tuple for pickling a MaskedArray.\r\n\r\n \"\"\"\r\n return (_mrreconstruct,\r\n (self.__class__, self._baseclass, (0,), 'b',),\r\n self.__getstate__())\r\n\r\ndef _mrreconstruct(subtype, baseclass, baseshape, basetype,):\r\n \"\"\"\r\n Build a new MaskedArray from the information stored in a pickle.\r\n\r\n \"\"\"\r\n _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)\r\n _mask = ndarray.__new__(ndarray, baseshape, 'b1')\r\n return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)\r\n\r\nmrecarray = MaskedRecords\r\n\r\n\r\n###############################################################################\r\n# Constructors #\r\n###############################################################################\r\n\r\n\r\ndef fromarrays(arraylist, dtype=None, shape=None, formats=None,\r\n names=None, titles=None, aligned=False, byteorder=None,\r\n fill_value=None):\r\n \"\"\"\r\n Creates a mrecarray from a (flat) list of masked arrays.\r\n\r\n Parameters\r\n ----------\r\n arraylist : sequence\r\n A list of (masked) arrays. Each element of the sequence is first converted\r\n to a masked array if needed. If a 2D array is passed as argument, it is\r\n processed line by line\r\n dtype : {None, dtype}, optional\r\n Data type descriptor.\r\n shape : {None, integer}, optional\r\n Number of records. If None, shape is defined from the shape of the\r\n first array in the list.\r\n formats : {None, sequence}, optional\r\n Sequence of formats for each individual field. If None, the formats will\r\n be autodetected by inspecting the fields and selecting the highest dtype\r\n possible.\r\n names : {None, sequence}, optional\r\n Sequence of the names of each field.\r\n fill_value : {None, sequence}, optional\r\n Sequence of data to be used as filling values.\r\n\r\n Notes\r\n -----\r\n Lists of tuples should be preferred over lists of lists for faster processing.\r\n\r\n \"\"\"\r\n datalist = [getdata(x) for x in arraylist]\r\n masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]\r\n _array = recfromarrays(datalist,\r\n dtype=dtype, shape=shape, formats=formats,\r\n names=names, titles=titles, aligned=aligned,\r\n byteorder=byteorder).view(mrecarray)\r\n _array._mask.flat = list(zip(*masklist))\r\n if fill_value is not None:\r\n _array.fill_value = fill_value\r\n return _array\r\n\r\n\r\ndef fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,\r\n titles=None, aligned=False, byteorder=None,\r\n fill_value=None, mask=nomask):\r\n \"\"\"\r\n Creates a MaskedRecords from a list of records.\r\n\r\n Parameters\r\n ----------\r\n reclist : sequence\r\n A list of records. Each element of the sequence is first converted\r\n to a masked array if needed. If a 2D array is passed as argument, it is\r\n processed line by line\r\n dtype : {None, dtype}, optional\r\n Data type descriptor.\r\n shape : {None,int}, optional\r\n Number of records. If None, ``shape`` is defined from the shape of the\r\n first array in the list.\r\n formats : {None, sequence}, optional\r\n Sequence of formats for each individual field. If None, the formats will\r\n be autodetected by inspecting the fields and selecting the highest dtype\r\n possible.\r\n names : {None, sequence}, optional\r\n Sequence of the names of each field.\r\n fill_value : {None, sequence}, optional\r\n Sequence of data to be used as filling values.\r\n mask : {nomask, sequence}, optional.\r\n External mask to apply on the data.\r\n\r\n Notes\r\n -----\r\n Lists of tuples should be preferred over lists of lists for faster processing.\r\n\r\n \"\"\"\r\n # Grab the initial _fieldmask, if needed:\r\n _mask = getattr(reclist, '_mask', None)\r\n # Get the list of records.\r\n if isinstance(reclist, ndarray):\r\n # Make sure we don't have some hidden mask\r\n if isinstance(reclist, MaskedArray):\r\n reclist = reclist.filled().view(ndarray)\r\n # Grab the initial dtype, just in case\r\n if dtype is None:\r\n dtype = reclist.dtype\r\n reclist = reclist.tolist()\r\n mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,\r\n names=names, titles=titles,\r\n aligned=aligned, byteorder=byteorder).view(mrecarray)\r\n # Set the fill_value if needed\r\n if fill_value is not None:\r\n mrec.fill_value = fill_value\r\n # Now, let's deal w/ the mask\r\n if mask is not nomask:\r\n mask = np.array(mask, copy=False)\r\n maskrecordlength = len(mask.dtype)\r\n if maskrecordlength:\r\n mrec._mask.flat = mask\r\n elif len(mask.shape) == 2:\r\n mrec._mask.flat = [tuple(m) for m in mask]\r\n else:\r\n mrec.__setmask__(mask)\r\n if _mask is not None:\r\n mrec._mask[:] = _mask\r\n return mrec\r\n\r\n\r\ndef _guessvartypes(arr):\r\n \"\"\"\r\n Tries to guess the dtypes of the str_ ndarray `arr`.\r\n\r\n Guesses by testing element-wise conversion. Returns a list of dtypes.\r\n The array is first converted to ndarray. If the array is 2D, the test\r\n is performed on the first line. An exception is raised if the file is\r\n 3D or more.\r\n\r\n \"\"\"\r\n vartypes = []\r\n arr = np.asarray(arr)\r\n if len(arr.shape) == 2:\r\n arr = arr[0]\r\n elif len(arr.shape) > 2:\r\n raise ValueError(\"The array should be 2D at most!\")\r\n # Start the conversion loop.\r\n for f in arr:\r\n try:\r\n int(f)\r\n except ValueError:\r\n try:\r\n float(f)\r\n except ValueError:\r\n try:\r\n complex(f)\r\n except ValueError:\r\n vartypes.append(arr.dtype)\r\n else:\r\n vartypes.append(np.dtype(complex))\r\n else:\r\n vartypes.append(np.dtype(float))\r\n else:\r\n vartypes.append(np.dtype(int))\r\n return vartypes\r\n\r\n\r\ndef openfile(fname):\r\n \"\"\"\r\n Opens the file handle of file `fname`.\r\n\r\n \"\"\"\r\n # A file handle\r\n if hasattr(fname, 'readline'):\r\n return fname\r\n # Try to open the file and guess its type\r\n try:\r\n f = open(fname)\r\n except IOError:\r\n raise IOError(\"No such file: '%s'\" % fname)\r\n if f.readline()[:2] != \"\\\\x\":\r\n f.seek(0, 0)\r\n return f\r\n f.close()\r\n raise NotImplementedError(\"Wow, binary file\")\r\n\r\n\r\ndef fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',\r\n varnames=None, vartypes=None):\r\n \"\"\"\r\n Creates a mrecarray from data stored in the file `filename`.\r\n\r\n Parameters\r\n ----------\r\n fname : {file name/handle}\r\n Handle of an opened file.\r\n delimitor : {None, string}, optional\r\n Alphanumeric character used to separate columns in the file.\r\n If None, any (group of) white spacestring(s) will be used.\r\n commentchar : {'#', string}, optional\r\n Alphanumeric character used to mark the start of a comment.\r\n missingchar : {'', string}, optional\r\n String indicating missing data, and used to create the masks.\r\n varnames : {None, sequence}, optional\r\n Sequence of the variable names. If None, a list will be created from\r\n the first non empty line of the file.\r\n vartypes : {None, sequence}, optional\r\n Sequence of the variables dtypes. If None, it will be estimated from\r\n the first non-commented line.\r\n\r\n\r\n Ultra simple: the varnames are in the header, one line\"\"\"\r\n # Try to open the file.\r\n ftext = openfile(fname)\r\n\r\n # Get the first non-empty line as the varnames\r\n while True:\r\n line = ftext.readline()\r\n firstline = line[:line.find(commentchar)].strip()\r\n _varnames = firstline.split(delimitor)\r\n if len(_varnames) > 1:\r\n break\r\n if varnames is None:\r\n varnames = _varnames\r\n\r\n # Get the data.\r\n _variables = masked_array([line.strip().split(delimitor) for line in ftext\r\n if line[0] != commentchar and len(line) > 1])\r\n (_, nfields) = _variables.shape\r\n ftext.close()\r\n\r\n # Try to guess the dtype.\r\n if vartypes is None:\r\n vartypes = _guessvartypes(_variables[0])\r\n else:\r\n vartypes = [np.dtype(v) for v in vartypes]\r\n if len(vartypes) != nfields:\r\n msg = \"Attempting to %i dtypes for %i fields!\"\r\n msg += \" Reverting to default.\"\r\n warnings.warn(msg % (len(vartypes), nfields))\r\n vartypes = _guessvartypes(_variables[0])\r\n\r\n # Construct the descriptor.\r\n mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]\r\n mfillv = [ma.default_fill_value(f) for f in vartypes]\r\n\r\n # Get the data and the mask.\r\n # We just need a list of masked_arrays. It's easier to create it like that:\r\n _mask = (_variables.T == missingchar)\r\n _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)\r\n for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]\r\n\r\n return fromarrays(_datalist, dtype=mdescr)\r\n\r\n\r\ndef addfield(mrecord, newfield, newfieldname=None):\r\n \"\"\"Adds a new field to the masked record array\r\n\r\n Uses `newfield` as data and `newfieldname` as name. If `newfieldname`\r\n is None, the new field name is set to 'fi', where `i` is the number of\r\n existing fields.\r\n\r\n \"\"\"\r\n _data = mrecord._data\r\n _mask = mrecord._mask\r\n if newfieldname is None or newfieldname in reserved_fields:\r\n newfieldname = 'f%i' % len(_data.dtype)\r\n newfield = ma.array(newfield)\r\n # Get the new data.\r\n # Create a new empty recarray\r\n newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])\r\n newdata = recarray(_data.shape, newdtype)\r\n # Add the exisintg field\r\n [newdata.setfield(_data.getfield(*f), *f)\r\n for f in _data.dtype.fields.values()]\r\n # Add the new field\r\n newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])\r\n newdata = newdata.view(MaskedRecords)\r\n # Get the new mask\r\n # Create a new empty recarray\r\n newmdtype = np.dtype([(n, bool_) for n in newdtype.names])\r\n newmask = recarray(_data.shape, newmdtype)\r\n # Add the old masks\r\n [newmask.setfield(_mask.getfield(*f), *f)\r\n for f in _mask.dtype.fields.values()]\r\n # Add the mask of the new field\r\n newmask.setfield(getmaskarray(newfield),\r\n *newmask.dtype.fields[newfieldname])\r\n newdata._mask = newmask\r\n return newdata\r\n", "\"\"\"\r\n========================================================\r\nHierarchical clustering (:mod:`scipy.cluster.hierarchy`)\r\n========================================================\r\n\r\n.. currentmodule:: scipy.cluster.hierarchy\r\n\r\nThese functions cut hierarchical clusterings into flat clusterings\r\nor find the roots of the forest formed by a cut by providing the flat\r\ncluster ids of each observation.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n fcluster\r\n fclusterdata\r\n leaders\r\n\r\nThese are routines for agglomerative clustering.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n linkage\r\n single\r\n complete\r\n average\r\n weighted\r\n centroid\r\n median\r\n ward\r\n\r\nThese routines compute statistics on hierarchies.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n cophenet\r\n from_mlab_linkage\r\n inconsistent\r\n maxinconsts\r\n maxdists\r\n maxRstat\r\n to_mlab_linkage\r\n\r\nRoutines for visualizing flat clusters.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n dendrogram\r\n\r\nThese are data structures and routines for representing hierarchies as\r\ntree objects.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n ClusterNode\r\n leaves_list\r\n to_tree\r\n cut_tree\r\n\r\nThese are predicates for checking the validity of linkage and\r\ninconsistency matrices as well as for checking isomorphism of two\r\nflat cluster assignments.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n is_valid_im\r\n is_valid_linkage\r\n is_isomorphic\r\n is_monotonic\r\n correspond\r\n num_obs_linkage\r\n\r\nUtility routines for plotting:\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n set_link_color_palette\r\n\r\nReferences\r\n----------\r\n\r\n.. [1] \"Statistics toolbox.\" API Reference Documentation. The MathWorks.\r\n http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.\r\n Accessed October 1, 2007.\r\n\r\n.. [2] \"Hierarchical clustering.\" API Reference Documentation.\r\n The Wolfram Research, Inc.\r\n http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/\r\n HierarchicalClustering.html.\r\n Accessed October 1, 2007.\r\n\r\n.. [3] Gower, JC and Ross, GJS. \"Minimum Spanning Trees and Single Linkage\r\n Cluster Analysis.\" Applied Statistics. 18(1): pp. 54--64. 1969.\r\n\r\n.. [4] Ward Jr, JH. \"Hierarchical grouping to optimize an objective\r\n function.\" Journal of the American Statistical Association. 58(301):\r\n pp. 236--44. 1963.\r\n\r\n.. [5] Johnson, SC. \"Hierarchical clustering schemes.\" Psychometrika.\r\n 32(2): pp. 241--54. 1966.\r\n\r\n.. [6] Sneath, PH and Sokal, RR. \"Numerical taxonomy.\" Nature. 193: pp.\r\n 855--60. 1962.\r\n\r\n.. [7] Batagelj, V. \"Comparing resemblance measures.\" Journal of\r\n Classification. 12: pp. 73--90. 1995.\r\n\r\n.. [8] Sokal, RR and Michener, CD. \"A statistical method for evaluating\r\n systematic relationships.\" Scientific Bulletins. 38(22):\r\n pp. 1409--38. 1958.\r\n\r\n.. [9] Edelbrock, C. \"Mixture model tests of hierarchical clustering\r\n algorithms: the problem of classifying everybody.\" Multivariate\r\n Behavioral Research. 14: pp. 367--84. 1979.\r\n\r\n.. [10] Jain, A., and Dubes, R., \"Algorithms for Clustering Data.\"\r\n Prentice-Hall. Englewood Cliffs, NJ. 1988.\r\n\r\n.. [11] Fisher, RA \"The use of multiple measurements in taxonomic\r\n problems.\" Annals of Eugenics, 7(2): 179-188. 1936\r\n\r\n\r\n* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.\r\n\r\n* Mathematica is a registered trademark of The Wolfram Research, Inc.\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n# Copyright (C) Damian Eads, 2007-2008. New BSD License.\r\n\r\n# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)\r\n#\r\n# Author: Damian Eads\r\n# Date: September 22, 2007\r\n#\r\n# Copyright (c) 2007, 2008, Damian Eads\r\n#\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions\r\n# are met:\r\n# - Redistributions of source code must retain the above\r\n# copyright notice, this list of conditions and the\r\n# following disclaimer.\r\n# - Redistributions in binary form must reproduce the above copyright\r\n# notice, this list of conditions and the following disclaimer\r\n# in the documentation and/or other materials provided with the\r\n# distribution.\r\n# - Neither the name of the author nor the names of its\r\n# contributors may be used to endorse or promote products derived\r\n# from this software without specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\nimport warnings\r\nimport bisect\r\nfrom collections import deque\r\n\r\nimport numpy as np\r\nfrom . import _hierarchy\r\nimport scipy.spatial.distance as distance\r\n\r\nfrom scipy._lib.six import string_types\r\nfrom scipy._lib.six import xrange\r\n\r\n_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,\r\n 'median': 4, 'ward': 5, 'weighted': 6}\r\n_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')\r\n\r\n\r\n__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',\r\n 'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',\r\n 'from_mlab_linkage', 'inconsistent', 'is_isomorphic',\r\n 'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',\r\n 'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',\r\n 'median', 'num_obs_linkage', 'set_link_color_palette', 'single',\r\n 'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']\r\n\r\n\r\ndef _warning(s):\r\n warnings.warn('scipy.cluster: %s' % s, stacklevel=3)\r\n\r\n\r\ndef _copy_array_if_base_present(a):\r\n \"\"\"\r\n Copies the array if its base points to a parent array.\r\n \"\"\"\r\n if a.base is not None:\r\n return a.copy()\r\n elif np.issubsctype(a, np.float32):\r\n return np.array(a, dtype=np.double)\r\n else:\r\n return a\r\n\r\n\r\ndef _copy_arrays_if_base_present(T):\r\n \"\"\"\r\n Accepts a tuple of arrays T. Copies the array T[i] if its base array\r\n points to an actual array. Otherwise, the reference is just copied.\r\n This is useful if the arrays are being passed to a C function that\r\n does not do proper striding.\r\n \"\"\"\r\n l = [_copy_array_if_base_present(a) for a in T]\r\n return l\r\n\r\n\r\ndef _randdm(pnts):\r\n \"\"\" Generates a random distance matrix stored in condensed form. A\r\n pnts * (pnts - 1) / 2 sized vector is returned.\r\n \"\"\"\r\n if pnts >= 2:\r\n D = np.random.rand(pnts * (pnts - 1) / 2)\r\n else:\r\n raise ValueError(\"The number of points in the distance matrix \"\r\n \"must be at least 2.\")\r\n return D\r\n\r\n\r\ndef single(y):\r\n \"\"\"\r\n Performs single/min/nearest linkage on the condensed distance matrix ``y``\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n The upper triangular of the distance matrix. The result of\r\n ``pdist`` is returned in this form.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n The linkage matrix.\r\n\r\n See Also\r\n --------\r\n linkage: for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='single', metric='euclidean')\r\n\r\n\r\ndef complete(y):\r\n \"\"\"\r\n Performs complete/max/farthest point linkage on a condensed distance matrix\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n The upper triangular of the distance matrix. The result of\r\n ``pdist`` is returned in this form.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n A linkage matrix containing the hierarchical clustering. See\r\n the ``linkage`` function documentation for more information\r\n on its structure.\r\n\r\n See Also\r\n --------\r\n linkage\r\n\r\n \"\"\"\r\n return linkage(y, method='complete', metric='euclidean')\r\n\r\n\r\ndef average(y):\r\n \"\"\"\r\n Performs average/UPGMA linkage on a condensed distance matrix\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n The upper triangular of the distance matrix. The result of\r\n ``pdist`` is returned in this form.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n A linkage matrix containing the hierarchical clustering. See\r\n the ``linkage`` function documentation for more information\r\n on its structure.\r\n\r\n See Also\r\n --------\r\n linkage: for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='average', metric='euclidean')\r\n\r\n\r\ndef weighted(y):\r\n \"\"\"\r\n Performs weighted/WPGMA linkage on the condensed distance matrix.\r\n\r\n See ``linkage`` for more information on the return\r\n structure and algorithm.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n The upper triangular of the distance matrix. The result of\r\n ``pdist`` is returned in this form.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n A linkage matrix containing the hierarchical clustering. See\r\n the ``linkage`` function documentation for more information\r\n on its structure.\r\n\r\n See Also\r\n --------\r\n linkage : for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='weighted', metric='euclidean')\r\n\r\n\r\ndef centroid(y):\r\n \"\"\"\r\n Performs centroid/UPGMC linkage.\r\n\r\n See ``linkage`` for more information on the return structure\r\n and algorithm.\r\n\r\n The following are common calling conventions:\r\n\r\n 1. ``Z = centroid(y)``\r\n\r\n Performs centroid/UPGMC linkage on the condensed distance\r\n matrix ``y``. See ``linkage`` for more information on the return\r\n structure and algorithm.\r\n\r\n 2. ``Z = centroid(X)``\r\n\r\n Performs centroid/UPGMC linkage on the observation matrix ``X``\r\n using Euclidean distance as the distance metric. See ``linkage``\r\n for more information on the return structure and algorithm.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n A condensed or redundant distance matrix. A condensed\r\n distance matrix is a flat array containing the upper\r\n triangular of the distance matrix. This is the form that\r\n ``pdist`` returns. Alternatively, a collection of\r\n m observation vectors in n dimensions may be passed as\r\n a m by n array.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n A linkage matrix containing the hierarchical clustering. See\r\n the ``linkage`` function documentation for more information\r\n on its structure.\r\n\r\n See Also\r\n --------\r\n linkage: for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='centroid', metric='euclidean')\r\n\r\n\r\ndef median(y):\r\n \"\"\"\r\n Performs median/WPGMC linkage.\r\n\r\n See ``linkage`` for more information on the return structure\r\n and algorithm.\r\n\r\n The following are common calling conventions:\r\n\r\n 1. ``Z = median(y)``\r\n\r\n Performs median/WPGMC linkage on the condensed distance matrix\r\n ``y``. See ``linkage`` for more information on the return\r\n structure and algorithm.\r\n\r\n 2. ``Z = median(X)``\r\n\r\n Performs median/WPGMC linkage on the observation matrix ``X``\r\n using Euclidean distance as the distance metric. See linkage\r\n for more information on the return structure and algorithm.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n A condensed or redundant distance matrix. A condensed\r\n distance matrix is a flat array containing the upper\r\n triangular of the distance matrix. This is the form that\r\n ``pdist`` returns. Alternatively, a collection of\r\n m observation vectors in n dimensions may be passed as\r\n a m by n array.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a linkage matrix.\r\n\r\n See Also\r\n --------\r\n linkage: for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='median', metric='euclidean')\r\n\r\n\r\ndef ward(y):\r\n \"\"\"\r\n Performs Ward's linkage on a condensed or redundant distance matrix.\r\n\r\n See linkage for more information on the return structure\r\n and algorithm.\r\n\r\n The following are common calling conventions:\r\n\r\n 1. ``Z = ward(y)``\r\n Performs Ward's linkage on the condensed distance matrix ``Z``. See\r\n linkage for more information on the return structure and\r\n algorithm.\r\n\r\n 2. ``Z = ward(X)``\r\n Performs Ward's linkage on the observation matrix ``X`` using\r\n Euclidean distance as the distance metric. See linkage for more\r\n information on the return structure and algorithm.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n A condensed or redundant distance matrix. A condensed\r\n distance matrix is a flat array containing the upper\r\n triangular of the distance matrix. This is the form that\r\n ``pdist`` returns. Alternatively, a collection of\r\n m observation vectors in n dimensions may be passed as\r\n a m by n array.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a linkage matrix.\r\n\r\n See Also\r\n --------\r\n linkage: for advanced creation of hierarchical clusterings.\r\n\r\n \"\"\"\r\n return linkage(y, method='ward', metric='euclidean')\r\n\r\n\r\ndef linkage(y, method='single', metric='euclidean'):\r\n \"\"\"\r\n Performs hierarchical/agglomerative clustering on the condensed\r\n distance matrix y.\r\n\r\n y must be a :math:`{n \\\\choose 2}` sized\r\n vector where n is the number of original observations paired\r\n in the distance matrix. The behavior of this function is very\r\n similar to the MATLAB linkage function.\r\n\r\n An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the\r\n :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and\r\n ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A\r\n cluster with an index less than :math:`n` corresponds to one of\r\n the :math:`n` original observations. The distance between\r\n clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The\r\n fourth value ``Z[i, 3]`` represents the number of original\r\n observations in the newly formed cluster.\r\n\r\n The following linkage methods are used to compute the distance\r\n :math:`d(s, t)` between two clusters :math:`s` and\r\n :math:`t`. The algorithm begins with a forest of clusters that\r\n have yet to be used in the hierarchy being formed. When two\r\n clusters :math:`s` and :math:`t` from this forest are combined\r\n into a single cluster :math:`u`, :math:`s` and :math:`t` are\r\n removed from the forest, and :math:`u` is added to the\r\n forest. When only one cluster remains in the forest, the algorithm\r\n stops, and this cluster becomes the root.\r\n\r\n A distance matrix is maintained at each iteration. The ``d[i,j]``\r\n entry corresponds to the distance between cluster :math:`i` and\r\n :math:`j` in the original forest.\r\n\r\n At each iteration, the algorithm must update the distance matrix\r\n to reflect the distance of the newly formed cluster u with the\r\n remaining clusters in the forest.\r\n\r\n Suppose there are :math:`|u|` original observations\r\n :math:`u[0], \\\\ldots, u[|u|-1]` in cluster :math:`u` and\r\n :math:`|v|` original objects :math:`v[0], \\\\ldots, v[|v|-1]` in\r\n cluster :math:`v`. Recall :math:`s` and :math:`t` are\r\n combined to form cluster :math:`u`. Let :math:`v` be any\r\n remaining cluster in the forest that is not :math:`u`.\r\n\r\n The following are methods for calculating the distance between the\r\n newly formed cluster :math:`u` and each :math:`v`.\r\n\r\n * method='single' assigns\r\n\r\n .. math::\r\n d(u,v) = \\\\min(dist(u[i],v[j]))\r\n\r\n for all points :math:`i` in cluster :math:`u` and\r\n :math:`j` in cluster :math:`v`. This is also known as the\r\n Nearest Point Algorithm.\r\n\r\n * method='complete' assigns\r\n\r\n .. math::\r\n d(u, v) = \\\\max(dist(u[i],v[j]))\r\n\r\n for all points :math:`i` in cluster u and :math:`j` in\r\n cluster :math:`v`. This is also known by the Farthest Point\r\n Algorithm or Voor Hees Algorithm.\r\n\r\n * method='average' assigns\r\n\r\n .. math::\r\n d(u,v) = \\\\sum_{ij} \\\\frac{d(u[i], v[j])}\r\n {(|u|*|v|)}\r\n\r\n for all points :math:`i` and :math:`j` where :math:`|u|`\r\n and :math:`|v|` are the cardinalities of clusters :math:`u`\r\n and :math:`v`, respectively. This is also called the UPGMA\r\n algorithm.\r\n\r\n * method='weighted' assigns\r\n\r\n .. math::\r\n d(u,v) = (dist(s,v) + dist(t,v))/2\r\n\r\n where cluster u was formed with cluster s and t and v\r\n is a remaining cluster in the forest. (also called WPGMA)\r\n\r\n * method='centroid' assigns\r\n\r\n .. math::\r\n dist(s,t) = ||c_s-c_t||_2\r\n\r\n where :math:`c_s` and :math:`c_t` are the centroids of\r\n clusters :math:`s` and :math:`t`, respectively. When two\r\n clusters :math:`s` and :math:`t` are combined into a new\r\n cluster :math:`u`, the new centroid is computed over all the\r\n original objects in clusters :math:`s` and :math:`t`. The\r\n distance then becomes the Euclidean distance between the\r\n centroid of :math:`u` and the centroid of a remaining cluster\r\n :math:`v` in the forest. This is also known as the UPGMC\r\n algorithm.\r\n\r\n * method='median' assigns :math:`d(s,t)` like the ``centroid``\r\n method. When two clusters :math:`s` and :math:`t` are combined\r\n into a new cluster :math:`u`, the average of centroids s and t\r\n give the new centroid :math:`u`. This is also known as the\r\n WPGMC algorithm.\r\n\r\n * method='ward' uses the Ward variance minimization algorithm.\r\n The new entry :math:`d(u,v)` is computed as follows,\r\n\r\n .. math::\r\n\r\n d(u,v) = \\\\sqrt{\\\\frac{|v|+|s|}\r\n {T}d(v,s)^2\r\n + \\\\frac{|v|+|t|}\r\n {T}d(v,t)^2\r\n - \\\\frac{|v|}\r\n {T}d(s,t)^2}\r\n\r\n where :math:`u` is the newly joined cluster consisting of\r\n clusters :math:`s` and :math:`t`, :math:`v` is an unused\r\n cluster in the forest, :math:`T=|v|+|s|+|t|`, and\r\n :math:`|*|` is the cardinality of its argument. This is also\r\n known as the incremental algorithm.\r\n\r\n Warning: When the minimum distance pair in the forest is chosen, there\r\n may be two or more pairs with the same minimum distance. This\r\n implementation may chose a different minimum than the MATLAB\r\n version.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n A condensed or redundant distance matrix. A condensed distance matrix\r\n is a flat array containing the upper triangular of the distance matrix.\r\n This is the form that ``pdist`` returns. Alternatively, a collection of\r\n :math:`m` observation vectors in n dimensions may be passed as an\r\n :math:`m` by :math:`n` array.\r\n method : str, optional\r\n The linkage algorithm to use. See the ``Linkage Methods`` section below\r\n for full descriptions.\r\n metric : str or function, optional\r\n The distance metric to use in the case that y is a collection of\r\n observation vectors; ignored otherwise. See the ``distance.pdist``\r\n function for a list of valid distance metrics. A custom distance\r\n function can also be used. See the ``distance.pdist`` function for\r\n details.\r\n\r\n Returns\r\n -------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a linkage matrix.\r\n\r\n Notes\r\n -----\r\n 1. For method 'single' an optimized algorithm called SLINK is implemented,\r\n which has :math:`O(n^2)` time complexity.\r\n For methods 'complete', 'average', 'weighted' and 'ward' an algorithm\r\n called nearest-neighbors chain is implemented, which too has time\r\n complexity :math:`O(n^2)`.\r\n For other methods a naive algorithm is implemented with :math:`O(n^3)`\r\n time complexity.\r\n All algorithms use :math:`O(n^2)` memory.\r\n Refer to [1]_ for details about the algorithms.\r\n 2. Methods 'centroid', 'median' and 'ward' are correctly defined only if\r\n Euclidean pairwise metric is used. If `y` is passed as precomputed\r\n pairwise distances, then it is a user responsibility to assure that\r\n these distances are in fact Euclidean, otherwise the produced result\r\n will be incorrect.\r\n\r\n References\r\n ----------\r\n .. [1] Daniel Mullner, \"Modern hierarchical, agglomerative clustering\r\n algorithms\", `arXiv:1109.2378v1 <http://arxiv.org/abs/1109.2378v1>`_\r\n , 2011.\r\n \"\"\"\r\n if method not in _LINKAGE_METHODS:\r\n raise ValueError(\"Invalid method: {0}\".format(method))\r\n\r\n y = _convert_to_double(np.asarray(y, order='c'))\r\n\r\n if y.ndim == 1:\r\n distance.is_valid_y(y, throw=True, name='y')\r\n [y] = _copy_arrays_if_base_present([y])\r\n elif y.ndim == 2:\r\n if method in _EUCLIDEAN_METHODS and metric != 'euclidean':\r\n raise ValueError(\"Method '{0}' requires the distance metric \"\r\n \"to be Euclidean\".format(method))\r\n y = distance.pdist(y, metric)\r\n else:\r\n raise ValueError(\"`y` must be 1 or 2 dimensional.\")\r\n\r\n n = int(distance.num_obs_y(y))\r\n method_code = _LINKAGE_METHODS[method]\r\n if method == 'single':\r\n return _hierarchy.slink(y, n)\r\n elif method in ['complete', 'average', 'weighted', 'ward']:\r\n return _hierarchy.nn_chain(y, n, method_code)\r\n else:\r\n return _hierarchy.linkage(y, n, method_code)\r\n\r\n\r\nclass ClusterNode:\r\n \"\"\"\r\n A tree node class for representing a cluster.\r\n\r\n Leaf nodes correspond to original observations, while non-leaf nodes\r\n correspond to non-singleton clusters.\r\n\r\n The to_tree function converts a matrix returned by the linkage\r\n function into an easy-to-use tree representation.\r\n\r\n See Also\r\n --------\r\n to_tree : for converting a linkage matrix ``Z`` into a tree object.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, id, left=None, right=None, dist=0, count=1):\r\n if id < 0:\r\n raise ValueError('The id must be non-negative.')\r\n if dist < 0:\r\n raise ValueError('The distance must be non-negative.')\r\n if (left is None and right is not None) or \\\r\n (left is not None and right is None):\r\n raise ValueError('Only full or proper binary trees are permitted.'\r\n ' This node has one child.')\r\n if count < 1:\r\n raise ValueError('A cluster must contain at least one original '\r\n 'observation.')\r\n self.id = id\r\n self.left = left\r\n self.right = right\r\n self.dist = dist\r\n if self.left is None:\r\n self.count = count\r\n else:\r\n self.count = left.count + right.count\r\n\r\n def __lt__(self, node):\r\n if not isinstance(node, ClusterNode):\r\n raise ValueError(\"Can't compare ClusterNode \"\r\n \"to type {}\".format(type(node)))\r\n return self.dist < node.dist\r\n\r\n def __gt__(self, node):\r\n if not isinstance(node, ClusterNode):\r\n raise ValueError(\"Can't compare ClusterNode \"\r\n \"to type {}\".format(type(node)))\r\n return self.dist > node.dist\r\n\r\n def __eq__(self, node):\r\n if not isinstance(node, ClusterNode):\r\n raise ValueError(\"Can't compare ClusterNode \"\r\n \"to type {}\".format(type(node)))\r\n return self.dist == node.dist\r\n\r\n def get_id(self):\r\n \"\"\"\r\n The identifier of the target node.\r\n\r\n For ``0 <= i < n``, `i` corresponds to original observation i.\r\n For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed\r\n at iteration ``i-n``.\r\n\r\n Returns\r\n -------\r\n id : int\r\n The identifier of the target node.\r\n\r\n \"\"\"\r\n return self.id\r\n\r\n def get_count(self):\r\n \"\"\"\r\n The number of leaf nodes (original observations) belonging to\r\n the cluster node nd. If the target node is a leaf, 1 is\r\n returned.\r\n\r\n Returns\r\n -------\r\n get_count : int\r\n The number of leaf nodes below the target node.\r\n\r\n \"\"\"\r\n return self.count\r\n\r\n def get_left(self):\r\n \"\"\"\r\n Return a reference to the left child tree object.\r\n\r\n Returns\r\n -------\r\n left : ClusterNode\r\n The left child of the target node. If the node is a leaf,\r\n None is returned.\r\n\r\n \"\"\"\r\n return self.left\r\n\r\n def get_right(self):\r\n \"\"\"\r\n Returns a reference to the right child tree object.\r\n\r\n Returns\r\n -------\r\n right : ClusterNode\r\n The left child of the target node. If the node is a leaf,\r\n None is returned.\r\n\r\n \"\"\"\r\n return self.right\r\n\r\n def is_leaf(self):\r\n \"\"\"\r\n Returns True if the target node is a leaf.\r\n\r\n Returns\r\n -------\r\n leafness : bool\r\n True if the target node is a leaf node.\r\n\r\n \"\"\"\r\n return self.left is None\r\n\r\n def pre_order(self, func=(lambda x: x.id)):\r\n \"\"\"\r\n Performs pre-order traversal without recursive function calls.\r\n\r\n When a leaf node is first encountered, ``func`` is called with\r\n the leaf node as its argument, and its result is appended to\r\n the list.\r\n\r\n For example, the statement::\r\n\r\n ids = root.pre_order(lambda x: x.id)\r\n\r\n returns a list of the node ids corresponding to the leaf nodes\r\n of the tree as they appear from left to right.\r\n\r\n Parameters\r\n ----------\r\n func : function\r\n Applied to each leaf ClusterNode object in the pre-order traversal.\r\n Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the\r\n result of func(n[i]) is stored in L[i]. If not provided, the index\r\n of the original observation to which the node corresponds is used.\r\n\r\n Returns\r\n -------\r\n L : list\r\n The pre-order traversal.\r\n\r\n \"\"\"\r\n\r\n # Do a preorder traversal, caching the result. To avoid having to do\r\n # recursion, we'll store the previous index we've visited in a vector.\r\n n = self.count\r\n\r\n curNode = [None] * (2 * n)\r\n lvisited = set()\r\n rvisited = set()\r\n curNode[0] = self\r\n k = 0\r\n preorder = []\r\n while k >= 0:\r\n nd = curNode[k]\r\n ndid = nd.id\r\n if nd.is_leaf():\r\n preorder.append(func(nd))\r\n k = k - 1\r\n else:\r\n if ndid not in lvisited:\r\n curNode[k + 1] = nd.left\r\n lvisited.add(ndid)\r\n k = k + 1\r\n elif ndid not in rvisited:\r\n curNode[k + 1] = nd.right\r\n rvisited.add(ndid)\r\n k = k + 1\r\n # If we've visited the left and right of this non-leaf\r\n # node already, go up in the tree.\r\n else:\r\n k = k - 1\r\n\r\n return preorder\r\n\r\n\r\n_cnode_bare = ClusterNode(0)\r\n_cnode_type = type(ClusterNode)\r\n\r\n\r\ndef _order_cluster_tree(Z):\r\n \"\"\"\r\n Returns clustering nodes in bottom-up order by distance.\r\n\r\n Parameters\r\n ----------\r\n Z : scipy.cluster.linkage array\r\n The linkage matrix.\r\n\r\n Returns\r\n -------\r\n nodes : list\r\n A list of ClusterNode objects.\r\n \"\"\"\r\n q = deque()\r\n tree = to_tree(Z)\r\n q.append(tree)\r\n nodes = []\r\n\r\n while q:\r\n node = q.popleft()\r\n if not node.is_leaf():\r\n bisect.insort_left(nodes, node)\r\n q.append(node.get_right())\r\n q.append(node.get_left())\r\n return nodes\r\n\r\n\r\ndef cut_tree(Z, n_clusters=None, height=None):\r\n \"\"\"\r\n Given a linkage matrix Z, return the cut tree.\r\n\r\n Parameters\r\n ----------\r\n Z : scipy.cluster.linkage array\r\n The linkage matrix.\r\n n_clusters : array_like, optional\r\n Number of clusters in the tree at the cut point.\r\n height : array_like, optional\r\n The height at which to cut the tree. Only possible for ultrametric\r\n trees.\r\n\r\n Returns\r\n -------\r\n cutree : array\r\n An array indicating group membership at each agglomeration step. I.e.,\r\n for a full cut tree, in the first column each data point is in its own\r\n cluster. At the next step, two nodes are merged. Finally all singleton\r\n and non-singleton clusters are in one group. If `n_clusters` or\r\n `height` is given, the columns correspond to the columns of `n_clusters` or\r\n `height`.\r\n\r\n Examples\r\n --------\r\n >>> from scipy import cluster\r\n >>> np.random.seed(23)\r\n >>> X = np.random.randn(50, 4)\r\n >>> Z = cluster.hierarchy.ward(X)\r\n >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])\r\n >>> cutree[:10]\r\n array([[0, 0],\r\n [1, 1],\r\n [2, 2],\r\n [3, 3],\r\n [3, 4],\r\n [2, 2],\r\n [0, 0],\r\n [1, 5],\r\n [3, 6],\r\n [4, 7]])\r\n\r\n \"\"\"\r\n nobs = num_obs_linkage(Z)\r\n nodes = _order_cluster_tree(Z)\r\n\r\n if height is not None and n_clusters is not None:\r\n raise ValueError(\"At least one of either height or n_clusters \"\r\n \"must be None\")\r\n elif height is None and n_clusters is None: # return the full cut tree\r\n cols_idx = np.arange(nobs)\r\n elif height is not None:\r\n heights = np.array([x.dist for x in nodes])\r\n cols_idx = np.searchsorted(heights, height)\r\n else:\r\n cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)\r\n\r\n try:\r\n n_cols = len(cols_idx)\r\n except TypeError: # scalar\r\n n_cols = 1\r\n cols_idx = np.array([cols_idx])\r\n\r\n groups = np.zeros((n_cols, nobs), dtype=int)\r\n last_group = np.arange(nobs)\r\n if 0 in cols_idx:\r\n groups[0] = last_group\r\n\r\n for i, node in enumerate(nodes):\r\n idx = node.pre_order()\r\n this_group = last_group.copy()\r\n this_group[idx] = last_group[idx].min()\r\n this_group[this_group > last_group[idx].max()] -= 1\r\n if i + 1 in cols_idx:\r\n groups[np.where(i + 1 == cols_idx)[0]] = this_group\r\n last_group = this_group\r\n\r\n return groups.T\r\n\r\n\r\ndef to_tree(Z, rd=False):\r\n \"\"\"\r\n Converts a hierarchical clustering encoded in the matrix ``Z`` (by\r\n linkage) into an easy-to-use tree object.\r\n\r\n The reference r to the root ClusterNode object is returned.\r\n\r\n Each ClusterNode object has a left, right, dist, id, and count\r\n attribute. The left and right attributes point to ClusterNode objects\r\n that were combined to generate the cluster. If both are None then\r\n the ClusterNode object is a leaf node, its count must be 1, and its\r\n distance is meaningless but set to 0.\r\n\r\n Note: This function is provided for the convenience of the library\r\n user. ClusterNodes are not used as input to any of the functions in this\r\n library.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The linkage matrix in proper form (see the ``linkage``\r\n function documentation).\r\n rd : bool, optional\r\n When False, a reference to the root ClusterNode object is\r\n returned. Otherwise, a tuple (r,d) is returned. ``r`` is a\r\n reference to the root node while ``d`` is a dictionary\r\n mapping cluster ids to ClusterNode references. If a cluster id is\r\n less than n, then it corresponds to a singleton cluster\r\n (leaf node). See ``linkage`` for more information on the\r\n assignment of cluster ids to clusters.\r\n\r\n Returns\r\n -------\r\n L : list\r\n The pre-order traversal.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n\r\n # Number of original objects is equal to the number of rows minus 1.\r\n n = Z.shape[0] + 1\r\n\r\n # Create a list full of None's to store the node objects\r\n d = [None] * (n * 2 - 1)\r\n\r\n # Create the nodes corresponding to the n original objects.\r\n for i in xrange(0, n):\r\n d[i] = ClusterNode(i)\r\n\r\n nd = None\r\n\r\n for i in xrange(0, n - 1):\r\n fi = int(Z[i, 0])\r\n fj = int(Z[i, 1])\r\n if fi > i + n:\r\n raise ValueError(('Corrupt matrix Z. Index to derivative cluster '\r\n 'is used before it is formed. See row %d, '\r\n 'column 0') % fi)\r\n if fj > i + n:\r\n raise ValueError(('Corrupt matrix Z. Index to derivative cluster '\r\n 'is used before it is formed. See row %d, '\r\n 'column 1') % fj)\r\n nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])\r\n # ^ id ^ left ^ right ^ dist\r\n if Z[i, 3] != nd.count:\r\n raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '\r\n 'incorrect.') % i)\r\n d[n + i] = nd\r\n\r\n if rd:\r\n return (nd, d)\r\n else:\r\n return nd\r\n\r\n\r\ndef _convert_to_bool(X):\r\n if X.dtype != bool:\r\n X = X.astype(bool)\r\n if not X.flags.contiguous:\r\n X = X.copy()\r\n return X\r\n\r\n\r\ndef _convert_to_double(X):\r\n if X.dtype != np.double:\r\n X = X.astype(np.double)\r\n if not X.flags.contiguous:\r\n X = X.copy()\r\n return X\r\n\r\n\r\ndef cophenet(Z, Y=None):\r\n \"\"\"\r\n Calculates the cophenetic distances between each observation in\r\n the hierarchical clustering defined by the linkage ``Z``.\r\n\r\n Suppose ``p`` and ``q`` are original observations in\r\n disjoint clusters ``s`` and ``t``, respectively and\r\n ``s`` and ``t`` are joined by a direct parent cluster\r\n ``u``. The cophenetic distance between observations\r\n ``i`` and ``j`` is simply the distance between\r\n clusters ``s`` and ``t``.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded as an array\r\n (see `linkage` function).\r\n Y : ndarray (optional)\r\n Calculates the cophenetic correlation coefficient ``c`` of a\r\n hierarchical clustering defined by the linkage matrix `Z`\r\n of a set of :math:`n` observations in :math:`m`\r\n dimensions. `Y` is the condensed distance matrix from which\r\n `Z` was generated.\r\n\r\n Returns\r\n -------\r\n c : ndarray\r\n The cophentic correlation distance (if ``y`` is passed).\r\n d : ndarray\r\n The cophenetic distance matrix in condensed form. The\r\n :math:`ij` th entry is the cophenetic distance between\r\n original observations :math:`i` and :math:`j`.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n Zs = Z.shape\r\n n = Zs[0] + 1\r\n\r\n zz = np.zeros((n * (n-1)) // 2, dtype=np.double)\r\n # Since the C code does not support striding using strides.\r\n # The dimensions are used instead.\r\n Z = _convert_to_double(Z)\r\n\r\n _hierarchy.cophenetic_distances(Z, zz, int(n))\r\n if Y is None:\r\n return zz\r\n\r\n Y = np.asarray(Y, order='c')\r\n distance.is_valid_y(Y, throw=True, name='Y')\r\n\r\n z = zz.mean()\r\n y = Y.mean()\r\n Yy = Y - y\r\n Zz = zz - z\r\n numerator = (Yy * Zz)\r\n denomA = Yy**2\r\n denomB = Zz**2\r\n c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))\r\n return (c, zz)\r\n\r\n\r\ndef inconsistent(Z, d=2):\r\n r\"\"\"\r\n Calculates inconsistency statistics on a linkage.\r\n\r\n Note: This function behaves similarly to the MATLAB(TM)\r\n inconsistent function.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical\r\n clustering). See `linkage` documentation for more information on its\r\n form.\r\n d : int, optional\r\n The number of links up to `d` levels below each non-singleton cluster.\r\n\r\n Returns\r\n -------\r\n R : ndarray\r\n A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link\r\n statistics for the non-singleton cluster ``i``. The link statistics are\r\n computed over the link heights for links :math:`d` levels below the\r\n cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard\r\n deviation of the link heights, respectively; ``R[i,2]`` is the number\r\n of links included in the calculation; and ``R[i,3]`` is the\r\n inconsistency coefficient,\r\n\r\n .. math:: \\frac{\\mathtt{Z[i,2]} - \\mathtt{R[i,0]}} {R[i,1]}\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n\r\n Zs = Z.shape\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n if (not d == np.floor(d)) or d < 0:\r\n raise ValueError('The second argument d must be a nonnegative '\r\n 'integer value.')\r\n\r\n # Since the C code does not support striding using strides.\r\n # The dimensions are used instead.\r\n [Z] = _copy_arrays_if_base_present([Z])\r\n\r\n n = Zs[0] + 1\r\n R = np.zeros((n - 1, 4), dtype=np.double)\r\n\r\n _hierarchy.inconsistent(Z, R, int(n), int(d))\r\n return R\r\n\r\n\r\ndef from_mlab_linkage(Z):\r\n \"\"\"\r\n Converts a linkage matrix generated by MATLAB(TM) to a new\r\n linkage matrix compatible with this module.\r\n\r\n The conversion does two things:\r\n\r\n * the indices are converted from ``1..N`` to ``0..(N-1)`` form,\r\n and\r\n\r\n * a fourth column Z[:,3] is added where Z[i,3] is represents the\r\n number of original observations (leaves) in the non-singleton\r\n cluster i.\r\n\r\n This function is useful when loading in linkages from legacy data\r\n files generated by MATLAB.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n A linkage matrix generated by MATLAB(TM).\r\n\r\n Returns\r\n -------\r\n ZS : ndarray\r\n A linkage matrix compatible with this library.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, dtype=np.double, order='c')\r\n Zs = Z.shape\r\n\r\n # If it's empty, return it.\r\n if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):\r\n return Z.copy()\r\n\r\n if len(Zs) != 2:\r\n raise ValueError(\"The linkage array must be rectangular.\")\r\n\r\n # If it contains no rows, return it.\r\n if Zs[0] == 0:\r\n return Z.copy()\r\n\r\n Zpart = Z.copy()\r\n if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:\r\n raise ValueError('The format of the indices is not 1..N')\r\n\r\n Zpart[:, 0:2] -= 1.0\r\n CS = np.zeros((Zs[0],), dtype=np.double)\r\n _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)\r\n return np.hstack([Zpart, CS.reshape(Zs[0], 1)])\r\n\r\n\r\ndef to_mlab_linkage(Z):\r\n \"\"\"\r\n Converts a linkage matrix to a MATLAB(TM) compatible one.\r\n\r\n Converts a linkage matrix ``Z`` generated by the linkage function\r\n of this module to a MATLAB(TM) compatible one. The return linkage\r\n matrix has the last column removed and the cluster indices are\r\n converted to ``1..N`` indexing.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n A linkage matrix generated by this library.\r\n\r\n Returns\r\n -------\r\n to_mlab_linkage : ndarray\r\n A linkage matrix compatible with MATLAB(TM)'s hierarchical\r\n clustering functions.\r\n\r\n The return linkage matrix has the last column removed\r\n and the cluster indices are converted to ``1..N`` indexing.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c', dtype=np.double)\r\n Zs = Z.shape\r\n if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):\r\n return Z.copy()\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n\r\n ZP = Z[:, 0:3].copy()\r\n ZP[:, 0:2] += 1.0\r\n\r\n return ZP\r\n\r\n\r\ndef is_monotonic(Z):\r\n \"\"\"\r\n Returns True if the linkage passed is monotonic.\r\n\r\n The linkage is monotonic if for every cluster :math:`s` and :math:`t`\r\n joined, the distance between them is no less than the distance\r\n between any previously joined clusters.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The linkage matrix to check for monotonicity.\r\n\r\n Returns\r\n -------\r\n b : bool\r\n A boolean indicating whether the linkage is monotonic.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n\r\n # We expect the i'th value to be greater than its successor.\r\n return (Z[1:, 2] >= Z[:-1, 2]).all()\r\n\r\n\r\ndef is_valid_im(R, warning=False, throw=False, name=None):\r\n \"\"\"Returns True if the inconsistency matrix passed is valid.\r\n\r\n It must be a :math:`n` by 4 numpy array of doubles. The standard\r\n deviations ``R[:,1]`` must be nonnegative. The link counts\r\n ``R[:,2]`` must be positive and no greater than :math:`n-1`.\r\n\r\n Parameters\r\n ----------\r\n R : ndarray\r\n The inconsistency matrix to check for validity.\r\n warning : bool, optional\r\n When True, issues a Python warning if the linkage\r\n matrix passed is invalid.\r\n throw : bool, optional\r\n When True, throws a Python exception if the linkage\r\n matrix passed is invalid.\r\n name : str, optional\r\n This string refers to the variable name of the invalid\r\n linkage matrix.\r\n\r\n Returns\r\n -------\r\n b : bool\r\n True if the inconsistency matrix is valid.\r\n\r\n \"\"\"\r\n R = np.asarray(R, order='c')\r\n valid = True\r\n name_str = \"%r \" % name if name else ''\r\n try:\r\n if type(R) != np.ndarray:\r\n raise TypeError('Variable %spassed as inconsistency matrix is not '\r\n 'a numpy array.' % name_str)\r\n if R.dtype != np.double:\r\n raise TypeError('Inconsistency matrix %smust contain doubles '\r\n '(double).' % name_str)\r\n if len(R.shape) != 2:\r\n raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '\r\n 'be two-dimensional).' % name_str)\r\n if R.shape[1] != 4:\r\n raise ValueError('Inconsistency matrix %smust have 4 columns.' %\r\n name_str)\r\n if R.shape[0] < 1:\r\n raise ValueError('Inconsistency matrix %smust have at least one '\r\n 'row.' % name_str)\r\n if (R[:, 0] < 0).any():\r\n raise ValueError('Inconsistency matrix %scontains negative link '\r\n 'height means.' % name_str)\r\n if (R[:, 1] < 0).any():\r\n raise ValueError('Inconsistency matrix %scontains negative link '\r\n 'height standard deviations.' % name_str)\r\n if (R[:, 2] < 0).any():\r\n raise ValueError('Inconsistency matrix %scontains negative link '\r\n 'counts.' % name_str)\r\n except Exception as e:\r\n if throw:\r\n raise\r\n if warning:\r\n _warning(str(e))\r\n valid = False\r\n\r\n return valid\r\n\r\n\r\ndef is_valid_linkage(Z, warning=False, throw=False, name=None):\r\n \"\"\"\r\n Checks the validity of a linkage matrix.\r\n\r\n A linkage matrix is valid if it is a two dimensional array (type double)\r\n with :math:`n` rows and 4 columns. The first two columns must contain\r\n indices between 0 and :math:`2n-1`. For a given row ``i``, the following\r\n two expressions have to hold:\r\n\r\n .. math::\r\n\r\n 0 \\\\leq \\\\mathtt{Z[i,0]} \\\\leq i+n-1\r\n 0 \\\\leq Z[i,1] \\\\leq i+n-1\r\n\r\n I.e. a cluster cannot join another cluster unless the cluster being joined\r\n has been generated.\r\n\r\n Parameters\r\n ----------\r\n Z : array_like\r\n Linkage matrix.\r\n warning : bool, optional\r\n When True, issues a Python warning if the linkage\r\n matrix passed is invalid.\r\n throw : bool, optional\r\n When True, throws a Python exception if the linkage\r\n matrix passed is invalid.\r\n name : str, optional\r\n This string refers to the variable name of the invalid\r\n linkage matrix.\r\n\r\n Returns\r\n -------\r\n b : bool\r\n True if the inconsistency matrix is valid.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n valid = True\r\n name_str = \"%r \" % name if name else ''\r\n try:\r\n if type(Z) != np.ndarray:\r\n raise TypeError('Passed linkage argument %sis not a valid array.' %\r\n name_str)\r\n if Z.dtype != np.double:\r\n raise TypeError('Linkage matrix %smust contain doubles.' % name_str)\r\n if len(Z.shape) != 2:\r\n raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '\r\n 'two-dimensional).' % name_str)\r\n if Z.shape[1] != 4:\r\n raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)\r\n if Z.shape[0] == 0:\r\n raise ValueError('Linkage must be computed on at least two '\r\n 'observations.')\r\n n = Z.shape[0]\r\n if n > 1:\r\n if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):\r\n raise ValueError('Linkage %scontains negative indices.' %\r\n name_str)\r\n if (Z[:, 2] < 0).any():\r\n raise ValueError('Linkage %scontains negative distances.' %\r\n name_str)\r\n if (Z[:, 3] < 0).any():\r\n raise ValueError('Linkage %scontains negative counts.' %\r\n name_str)\r\n if _check_hierarchy_uses_cluster_before_formed(Z):\r\n raise ValueError('Linkage %suses non-singleton cluster before '\r\n 'it is formed.' % name_str)\r\n if _check_hierarchy_uses_cluster_more_than_once(Z):\r\n raise ValueError('Linkage %suses the same cluster more than once.'\r\n % name_str)\r\n except Exception as e:\r\n if throw:\r\n raise\r\n if warning:\r\n _warning(str(e))\r\n valid = False\r\n\r\n return valid\r\n\r\n\r\ndef _check_hierarchy_uses_cluster_before_formed(Z):\r\n n = Z.shape[0] + 1\r\n for i in xrange(0, n - 1):\r\n if Z[i, 0] >= n + i or Z[i, 1] >= n + i:\r\n return True\r\n return False\r\n\r\n\r\ndef _check_hierarchy_uses_cluster_more_than_once(Z):\r\n n = Z.shape[0] + 1\r\n chosen = set([])\r\n for i in xrange(0, n - 1):\r\n if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:\r\n return True\r\n chosen.add(Z[i, 0])\r\n chosen.add(Z[i, 1])\r\n return False\r\n\r\n\r\ndef _check_hierarchy_not_all_clusters_used(Z):\r\n n = Z.shape[0] + 1\r\n chosen = set([])\r\n for i in xrange(0, n - 1):\r\n chosen.add(int(Z[i, 0]))\r\n chosen.add(int(Z[i, 1]))\r\n must_chosen = set(range(0, 2 * n - 2))\r\n return len(must_chosen.difference(chosen)) > 0\r\n\r\n\r\ndef num_obs_linkage(Z):\r\n \"\"\"\r\n Returns the number of original observations of the linkage matrix\r\n passed.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The linkage matrix on which to perform the operation.\r\n\r\n Returns\r\n -------\r\n n : int\r\n The number of original observations in the linkage.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n return (Z.shape[0] + 1)\r\n\r\n\r\ndef correspond(Z, Y):\r\n \"\"\"\r\n Checks for correspondence between linkage and condensed distance matrices\r\n\r\n They must have the same number of original observations for\r\n the check to succeed.\r\n\r\n This function is useful as a sanity check in algorithms that make\r\n extensive use of linkage and distance matrices that must\r\n correspond to the same set of original observations.\r\n\r\n Parameters\r\n ----------\r\n Z : array_like\r\n The linkage matrix to check for correspondence.\r\n Y : array_like\r\n The condensed distance matrix to check for correspondence.\r\n\r\n Returns\r\n -------\r\n b : bool\r\n A boolean indicating whether the linkage matrix and distance\r\n matrix could possibly correspond to one another.\r\n\r\n \"\"\"\r\n is_valid_linkage(Z, throw=True)\r\n distance.is_valid_y(Y, throw=True)\r\n Z = np.asarray(Z, order='c')\r\n Y = np.asarray(Y, order='c')\r\n return distance.num_obs_y(Y) == num_obs_linkage(Z)\r\n\r\n\r\ndef fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):\r\n \"\"\"\r\n Forms flat clusters from the hierarchical clustering defined by\r\n the linkage matrix ``Z``.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded with the matrix returned\r\n by the `linkage` function.\r\n t : float\r\n The threshold to apply when forming flat clusters.\r\n criterion : str, optional\r\n The criterion to use in forming flat clusters. This can\r\n be any of the following values:\r\n\r\n ``inconsistent`` : If a cluster node and all its\r\n descendants have an inconsistent value less than or equal\r\n to `t` then all its leaf descendants belong to the\r\n same flat cluster. When no non-singleton cluster meets\r\n this criterion, every node is assigned to its own\r\n cluster. (Default)\r\n\r\n ``distance`` : Forms flat clusters so that the original\r\n observations in each flat cluster have no greater a\r\n cophenetic distance than `t`.\r\n\r\n ``maxclust`` : Finds a minimum threshold ``r`` so that\r\n the cophenetic distance between any two original\r\n observations in the same flat cluster is no more than\r\n ``r`` and no more than `t` flat clusters are formed.\r\n\r\n ``monocrit`` : Forms a flat cluster from a cluster node c\r\n with index i when ``monocrit[j] <= t``.\r\n\r\n For example, to threshold on the maximum mean distance\r\n as computed in the inconsistency matrix R with a\r\n threshold of 0.8 do::\r\n\r\n MR = maxRstat(Z, R, 3)\r\n cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)\r\n\r\n ``maxclust_monocrit`` : Forms a flat cluster from a\r\n non-singleton cluster node ``c`` when ``monocrit[i] <=\r\n r`` for all cluster indices ``i`` below and including\r\n ``c``. ``r`` is minimized such that no more than ``t``\r\n flat clusters are formed. monocrit must be\r\n monotonic. For example, to minimize the threshold t on\r\n maximum inconsistency values so that no more than 3 flat\r\n clusters are formed, do::\r\n\r\n MI = maxinconsts(Z, R)\r\n cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)\r\n\r\n depth : int, optional\r\n The maximum depth to perform the inconsistency calculation.\r\n It has no meaning for the other criteria. Default is 2.\r\n R : ndarray, optional\r\n The inconsistency matrix to use for the 'inconsistent'\r\n criterion. This matrix is computed if not provided.\r\n monocrit : ndarray, optional\r\n An array of length n-1. `monocrit[i]` is the\r\n statistics upon which non-singleton i is thresholded. The\r\n monocrit vector must be monotonic, i.e. given a node c with\r\n index i, for all node indices j corresponding to nodes\r\n below c, ``monocrit[i] >= monocrit[j]``.\r\n\r\n Returns\r\n -------\r\n fcluster : ndarray\r\n An array of length n. T[i] is the flat cluster number to\r\n which original observation i belongs.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n\r\n n = Z.shape[0] + 1\r\n T = np.zeros((n,), dtype='i')\r\n\r\n # Since the C code does not support striding using strides.\r\n # The dimensions are used instead.\r\n [Z] = _copy_arrays_if_base_present([Z])\r\n\r\n if criterion == 'inconsistent':\r\n if R is None:\r\n R = inconsistent(Z, depth)\r\n else:\r\n R = np.asarray(R, order='c')\r\n is_valid_im(R, throw=True, name='R')\r\n # Since the C code does not support striding using strides.\r\n # The dimensions are used instead.\r\n [R] = _copy_arrays_if_base_present([R])\r\n _hierarchy.cluster_in(Z, R, T, float(t), int(n))\r\n elif criterion == 'distance':\r\n _hierarchy.cluster_dist(Z, T, float(t), int(n))\r\n elif criterion == 'maxclust':\r\n _hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))\r\n elif criterion == 'monocrit':\r\n [monocrit] = _copy_arrays_if_base_present([monocrit])\r\n _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))\r\n elif criterion == 'maxclust_monocrit':\r\n [monocrit] = _copy_arrays_if_base_present([monocrit])\r\n _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))\r\n else:\r\n raise ValueError('Invalid cluster formation criterion: %s'\r\n % str(criterion))\r\n return T\r\n\r\n\r\ndef fclusterdata(X, t, criterion='inconsistent',\r\n metric='euclidean', depth=2, method='single', R=None):\r\n \"\"\"\r\n Cluster observation data using a given metric.\r\n\r\n Clusters the original observations in the n-by-m data\r\n matrix X (n observations in m dimensions), using the euclidean\r\n distance metric to calculate distances between original observations,\r\n performs hierarchical clustering using the single linkage algorithm,\r\n and forms flat clusters using the inconsistency method with `t` as the\r\n cut-off threshold.\r\n\r\n A one-dimensional array T of length n is returned. T[i] is the index\r\n of the flat cluster to which the original observation i belongs.\r\n\r\n Parameters\r\n ----------\r\n X : (N, M) ndarray\r\n N by M data matrix with N observations in M dimensions.\r\n t : float\r\n The threshold to apply when forming flat clusters.\r\n criterion : str, optional\r\n Specifies the criterion for forming flat clusters. Valid\r\n values are 'inconsistent' (default), 'distance', or 'maxclust'\r\n cluster formation algorithms. See `fcluster` for descriptions.\r\n metric : str, optional\r\n The distance metric for calculating pairwise distances. See\r\n `distance.pdist` for descriptions and linkage to verify\r\n compatibility with the linkage method.\r\n depth : int, optional\r\n The maximum depth for the inconsistency calculation. See\r\n `inconsistent` for more information.\r\n method : str, optional\r\n The linkage method to use (single, complete, average,\r\n weighted, median centroid, ward). See `linkage` for more\r\n information. Default is \"single\".\r\n R : ndarray, optional\r\n The inconsistency matrix. It will be computed if necessary\r\n if it is not passed.\r\n\r\n Returns\r\n -------\r\n fclusterdata : ndarray\r\n A vector of length n. T[i] is the flat cluster number to\r\n which original observation i belongs.\r\n\r\n Notes\r\n -----\r\n This function is similar to the MATLAB function clusterdata.\r\n\r\n \"\"\"\r\n X = np.asarray(X, order='c', dtype=np.double)\r\n\r\n if type(X) != np.ndarray or len(X.shape) != 2:\r\n raise TypeError('The observation matrix X must be an n by m numpy '\r\n 'array.')\r\n\r\n Y = distance.pdist(X, metric=metric)\r\n Z = linkage(Y, method=method)\r\n if R is None:\r\n R = inconsistent(Z, d=depth)\r\n else:\r\n R = np.asarray(R, order='c')\r\n T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)\r\n return T\r\n\r\n\r\ndef leaves_list(Z):\r\n \"\"\"\r\n Returns a list of leaf node ids\r\n\r\n The return corresponds to the observation vector index as it appears\r\n in the tree from left to right. Z is a linkage matrix.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a matrix. `Z` is\r\n a linkage matrix. See ``linkage`` for more information.\r\n\r\n Returns\r\n -------\r\n leaves_list : ndarray\r\n The list of leaf node ids.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n n = Z.shape[0] + 1\r\n ML = np.zeros((n,), dtype='i')\r\n [Z] = _copy_arrays_if_base_present([Z])\r\n _hierarchy.prelist(Z, ML, int(n))\r\n return ML\r\n\r\n\r\n# Maps number of leaves to text size.\r\n#\r\n# p <= 20, size=\"12\"\r\n# 20 < p <= 30, size=\"10\"\r\n# 30 < p <= 50, size=\"8\"\r\n# 50 < p <= np.inf, size=\"6\"\r\n\r\n_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}\r\n_drotation = {20: 0, 40: 45, np.inf: 90}\r\n_dtextsortedkeys = list(_dtextsizes.keys())\r\n_dtextsortedkeys.sort()\r\n_drotationsortedkeys = list(_drotation.keys())\r\n_drotationsortedkeys.sort()\r\n\r\n\r\ndef _remove_dups(L):\r\n \"\"\"\r\n Removes duplicates AND preserves the original order of the elements.\r\n The set class is not guaranteed to do this.\r\n \"\"\"\r\n seen_before = set([])\r\n L2 = []\r\n for i in L:\r\n if i not in seen_before:\r\n seen_before.add(i)\r\n L2.append(i)\r\n return L2\r\n\r\n\r\ndef _get_tick_text_size(p):\r\n for k in _dtextsortedkeys:\r\n if p <= k:\r\n return _dtextsizes[k]\r\n\r\n\r\ndef _get_tick_rotation(p):\r\n for k in _drotationsortedkeys:\r\n if p <= k:\r\n return _drotation[k]\r\n\r\n\r\ndef _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,\r\n no_labels, color_list, leaf_font_size=None,\r\n leaf_rotation=None, contraction_marks=None,\r\n ax=None, above_threshold_color='b'):\r\n # Import matplotlib here so that it's not imported unless dendrograms\r\n # are plotted. Raise an informative error if importing fails.\r\n try:\r\n # if an axis is provided, don't use pylab at all\r\n if ax is None:\r\n import matplotlib.pylab\r\n import matplotlib.patches\r\n import matplotlib.collections\r\n except ImportError:\r\n raise ImportError(\"You must install the matplotlib library to plot \"\r\n \"the dendrogram. Use no_plot=True to calculate the \"\r\n \"dendrogram without plotting.\")\r\n\r\n if ax is None:\r\n ax = matplotlib.pylab.gca()\r\n # if we're using pylab, we want to trigger a draw at the end\r\n trigger_redraw = True\r\n else:\r\n trigger_redraw = False\r\n\r\n # Independent variable plot width\r\n ivw = len(ivl) * 10\r\n # Dependent variable plot height\r\n dvw = mh + mh * 0.05\r\n\r\n iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)\r\n if orientation in ('top', 'bottom'):\r\n if orientation == 'top':\r\n ax.set_ylim([0, dvw])\r\n ax.set_xlim([0, ivw])\r\n else:\r\n ax.set_ylim([dvw, 0])\r\n ax.set_xlim([0, ivw])\r\n\r\n xlines = icoords\r\n ylines = dcoords\r\n if no_labels:\r\n ax.set_xticks([])\r\n ax.set_xticklabels([])\r\n else:\r\n ax.set_xticks(iv_ticks)\r\n\r\n if orientation == 'top':\r\n ax.xaxis.set_ticks_position('bottom')\r\n else:\r\n ax.xaxis.set_ticks_position('top')\r\n\r\n # Make the tick marks invisible because they cover up the links\r\n for line in ax.get_xticklines():\r\n line.set_visible(False)\r\n\r\n leaf_rot = float(_get_tick_rotation(len(ivl))) if (\r\n leaf_rotation is None) else leaf_rotation\r\n leaf_font = float(_get_tick_text_size(len(ivl))) if (\r\n leaf_font_size is None) else leaf_font_size\r\n ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)\r\n\r\n elif orientation in ('left', 'right'):\r\n if orientation == 'left':\r\n ax.set_xlim([dvw, 0])\r\n ax.set_ylim([0, ivw])\r\n else:\r\n ax.set_xlim([0, dvw])\r\n ax.set_ylim([0, ivw])\r\n\r\n xlines = dcoords\r\n ylines = icoords\r\n if no_labels:\r\n ax.set_yticks([])\r\n ax.set_yticklabels([])\r\n else:\r\n ax.set_yticks(iv_ticks)\r\n\r\n if orientation == 'left':\r\n ax.yaxis.set_ticks_position('right')\r\n else:\r\n ax.yaxis.set_ticks_position('left')\r\n\r\n # Make the tick marks invisible because they cover up the links\r\n for line in ax.get_yticklines():\r\n line.set_visible(False)\r\n\r\n leaf_font = float(_get_tick_text_size(len(ivl))) if (\r\n leaf_font_size is None) else leaf_font_size\r\n\r\n if leaf_rotation is not None:\r\n ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)\r\n else:\r\n ax.set_yticklabels(ivl, size=leaf_font)\r\n\r\n # Let's use collections instead. This way there is a separate legend item\r\n # for each tree grouping, rather than stupidly one for each line segment.\r\n colors_used = _remove_dups(color_list)\r\n color_to_lines = {}\r\n for color in colors_used:\r\n color_to_lines[color] = []\r\n for (xline, yline, color) in zip(xlines, ylines, color_list):\r\n color_to_lines[color].append(list(zip(xline, yline)))\r\n\r\n colors_to_collections = {}\r\n # Construct the collections.\r\n for color in colors_used:\r\n coll = matplotlib.collections.LineCollection(color_to_lines[color],\r\n colors=(color,))\r\n colors_to_collections[color] = coll\r\n\r\n # Add all the groupings below the color threshold.\r\n for color in colors_used:\r\n if color != above_threshold_color:\r\n ax.add_collection(colors_to_collections[color])\r\n # If there's a grouping of links above the color threshold, it goes last.\r\n if above_threshold_color in colors_to_collections:\r\n ax.add_collection(colors_to_collections[above_threshold_color])\r\n\r\n if contraction_marks is not None:\r\n Ellipse = matplotlib.patches.Ellipse\r\n for (x, y) in contraction_marks:\r\n if orientation in ('left', 'right'):\r\n e = Ellipse((y, x), width=dvw / 100, height=1.0)\r\n else:\r\n e = Ellipse((x, y), width=1.0, height=dvw / 100)\r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.5)\r\n e.set_facecolor('k')\r\n\r\n if trigger_redraw:\r\n matplotlib.pylab.draw_if_interactive()\r\n\r\n\r\n_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']\r\n\r\n\r\ndef set_link_color_palette(palette):\r\n \"\"\"\r\n Set list of matplotlib color codes for use by dendrogram.\r\n\r\n Note that this palette is global (i.e. setting it once changes the colors\r\n for all subsequent calls to `dendrogram`) and that it affects only the\r\n the colors below ``color_threshold``.\r\n\r\n Note that `dendrogram` also accepts a custom coloring function through its\r\n ``link_color_func`` keyword, which is more flexible and non-global.\r\n\r\n Parameters\r\n ----------\r\n palette : list of str or None\r\n A list of matplotlib color codes. The order of the color codes is the\r\n order in which the colors are cycled through when color thresholding in\r\n the dendrogram.\r\n\r\n If ``None``, resets the palette to its default (which is\r\n ``['g', 'r', 'c', 'm', 'y', 'k']``).\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n See Also\r\n --------\r\n dendrogram\r\n\r\n Notes\r\n -----\r\n Ability to reset the palette with ``None`` added in Scipy 0.17.0.\r\n\r\n Examples\r\n --------\r\n >>> from scipy.cluster import hierarchy\r\n >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,\r\n ... 754., 564., 138., 219., 869., 669.])\r\n >>> Z = hierarchy.linkage(ytdist, 'single')\r\n >>> dn = hierarchy.dendrogram(Z, no_plot=True)\r\n >>> dn['color_list']\r\n ['g', 'b', 'b', 'b', 'b']\r\n >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])\r\n >>> dn = hierarchy.dendrogram(Z, no_plot=True)\r\n >>> dn['color_list']\r\n ['c', 'b', 'b', 'b', 'b']\r\n >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,\r\n ... above_threshold_color='k')\r\n >>> dn['color_list']\r\n ['c', 'm', 'm', 'k', 'k']\r\n\r\n Now reset the color palette to its default:\r\n\r\n >>> hierarchy.set_link_color_palette(None)\r\n\r\n \"\"\"\r\n if palette is None:\r\n # reset to its default\r\n palette = ['g', 'r', 'c', 'm', 'y', 'k']\r\n elif type(palette) not in (list, tuple):\r\n raise TypeError(\"palette must be a list or tuple\")\r\n _ptypes = [isinstance(p, string_types) for p in palette]\r\n\r\n if False in _ptypes:\r\n raise TypeError(\"all palette list elements must be color strings\")\r\n\r\n for i in list(_link_line_colors):\r\n _link_line_colors.remove(i)\r\n _link_line_colors.extend(list(palette))\r\n\r\n\r\ndef dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,\r\n get_leaves=True, orientation='top', labels=None,\r\n count_sort=False, distance_sort=False, show_leaf_counts=True,\r\n no_plot=False, no_labels=False, leaf_font_size=None,\r\n leaf_rotation=None, leaf_label_func=None,\r\n show_contracted=False, link_color_func=None, ax=None,\r\n above_threshold_color='b'):\r\n \"\"\"\r\n Plots the hierarchical clustering as a dendrogram.\r\n\r\n The dendrogram illustrates how each cluster is\r\n composed by drawing a U-shaped link between a non-singleton\r\n cluster and its children. The height of the top of the U-link is\r\n the distance between its children clusters. It is also the\r\n cophenetic distance between original observations in the two\r\n children clusters. It is expected that the distances in Z[:,2] be\r\n monotonic, otherwise crossings appear in the dendrogram.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The linkage matrix encoding the hierarchical clustering to\r\n render as a dendrogram. See the ``linkage`` function for more\r\n information on the format of ``Z``.\r\n p : int, optional\r\n The ``p`` parameter for ``truncate_mode``.\r\n truncate_mode : str, optional\r\n The dendrogram can be hard to read when the original\r\n observation matrix from which the linkage is derived is\r\n large. Truncation is used to condense the dendrogram. There\r\n are several modes:\r\n\r\n ``None/'none'``\r\n No truncation is performed (Default).\r\n\r\n ``'lastp'``\r\n The last ``p`` non-singleton formed in the linkage are the only\r\n non-leaf nodes in the linkage; they correspond to rows\r\n ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are\r\n contracted into leaf nodes.\r\n\r\n ``'mlab'``\r\n This corresponds to MATLAB(TM) behavior. (not implemented yet)\r\n\r\n ``'level'/'mtica'``\r\n No more than ``p`` levels of the dendrogram tree are displayed.\r\n This corresponds to Mathematica(TM) behavior.\r\n\r\n color_threshold : double, optional\r\n For brevity, let :math:`t` be the ``color_threshold``.\r\n Colors all the descendent links below a cluster node\r\n :math:`k` the same color if :math:`k` is the first node below\r\n the cut threshold :math:`t`. All links connecting nodes with\r\n distances greater than or equal to the threshold are colored\r\n blue. If :math:`t` is less than or equal to zero, all nodes\r\n are colored blue. If ``color_threshold`` is None or\r\n 'default', corresponding with MATLAB(TM) behavior, the\r\n threshold is set to ``0.7*max(Z[:,2])``.\r\n get_leaves : bool, optional\r\n Includes a list ``R['leaves']=H`` in the result\r\n dictionary. For each :math:`i`, ``H[i] == j``, cluster node\r\n ``j`` appears in position ``i`` in the left-to-right traversal\r\n of the leaves, where :math:`j < 2n-1` and :math:`i < n`.\r\n orientation : str, optional\r\n The direction to plot the dendrogram, which can be any\r\n of the following strings:\r\n\r\n ``'top'``\r\n Plots the root at the top, and plot descendent links going downwards.\r\n (default).\r\n\r\n ``'bottom'``\r\n Plots the root at the bottom, and plot descendent links going\r\n upwards.\r\n\r\n ``'left'``\r\n Plots the root at the left, and plot descendent links going right.\r\n\r\n ``'right'``\r\n Plots the root at the right, and plot descendent links going left.\r\n\r\n labels : ndarray, optional\r\n By default ``labels`` is None so the index of the original observation\r\n is used to label the leaf nodes. Otherwise, this is an :math:`n`\r\n -sized list (or tuple). The ``labels[i]`` value is the text to put\r\n under the :math:`i` th leaf node only if it corresponds to an original\r\n observation and not a non-singleton cluster.\r\n count_sort : str or bool, optional\r\n For each node n, the order (visually, from left-to-right) n's\r\n two descendent links are plotted is determined by this\r\n parameter, which can be any of the following values:\r\n\r\n ``False``\r\n Nothing is done.\r\n\r\n ``'ascending'`` or ``True``\r\n The child with the minimum number of original objects in its cluster\r\n is plotted first.\r\n\r\n ``'descendent'``\r\n The child with the maximum number of original objects in its cluster\r\n is plotted first.\r\n\r\n Note ``distance_sort`` and ``count_sort`` cannot both be True.\r\n distance_sort : str or bool, optional\r\n For each node n, the order (visually, from left-to-right) n's\r\n two descendent links are plotted is determined by this\r\n parameter, which can be any of the following values:\r\n\r\n ``False``\r\n Nothing is done.\r\n\r\n ``'ascending'`` or ``True``\r\n The child with the minimum distance between its direct descendents is\r\n plotted first.\r\n\r\n ``'descending'``\r\n The child with the maximum distance between its direct descendents is\r\n plotted first.\r\n\r\n Note ``distance_sort`` and ``count_sort`` cannot both be True.\r\n show_leaf_counts : bool, optional\r\n When True, leaf nodes representing :math:`k>1` original\r\n observation are labeled with the number of observations they\r\n contain in parentheses.\r\n no_plot : bool, optional\r\n When True, the final rendering is not performed. This is\r\n useful if only the data structures computed for the rendering\r\n are needed or if matplotlib is not available.\r\n no_labels : bool, optional\r\n When True, no labels appear next to the leaf nodes in the\r\n rendering of the dendrogram.\r\n leaf_rotation : double, optional\r\n Specifies the angle (in degrees) to rotate the leaf\r\n labels. When unspecified, the rotation is based on the number of\r\n nodes in the dendrogram (default is 0).\r\n leaf_font_size : int, optional\r\n Specifies the font size (in points) of the leaf labels. When\r\n unspecified, the size based on the number of nodes in the\r\n dendrogram.\r\n leaf_label_func : lambda or function, optional\r\n When leaf_label_func is a callable function, for each\r\n leaf with cluster index :math:`k < 2n-1`. The function\r\n is expected to return a string with the label for the\r\n leaf.\r\n\r\n Indices :math:`k < n` correspond to original observations\r\n while indices :math:`k \\\\geq n` correspond to non-singleton\r\n clusters.\r\n\r\n For example, to label singletons with their node id and\r\n non-singletons with their id, count, and inconsistency\r\n coefficient, simply do::\r\n\r\n # First define the leaf label function.\r\n def llf(id):\r\n if id < n:\r\n return str(id)\r\n else:\r\n return '[%d %d %1.2f]' % (id, count, R[n-id,3])\r\n # The text for the leaf nodes is going to be big so force\r\n # a rotation of 90 degrees.\r\n dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)\r\n\r\n show_contracted : bool, optional\r\n When True the heights of non-singleton nodes contracted\r\n into a leaf node are plotted as crosses along the link\r\n connecting that leaf node. This really is only useful when\r\n truncation is used (see ``truncate_mode`` parameter).\r\n link_color_func : callable, optional\r\n If given, `link_color_function` is called with each non-singleton id\r\n corresponding to each U-shaped link it will paint. The function is\r\n expected to return the color to paint the link, encoded as a matplotlib\r\n color string code. For example::\r\n\r\n dendrogram(Z, link_color_func=lambda k: colors[k])\r\n\r\n colors the direct links below each untruncated non-singleton node\r\n ``k`` using ``colors[k]``.\r\n ax : matplotlib Axes instance, optional\r\n If None and `no_plot` is not True, the dendrogram will be plotted\r\n on the current axes. Otherwise if `no_plot` is not True the\r\n dendrogram will be plotted on the given ``Axes`` instance. This can be\r\n useful if the dendrogram is part of a more complex figure.\r\n above_threshold_color : str, optional\r\n This matplotlib color string sets the color of the links above the\r\n color_threshold. The default is 'b'.\r\n\r\n Returns\r\n -------\r\n R : dict\r\n A dictionary of data structures computed to render the\r\n dendrogram. Its has the following keys:\r\n\r\n ``'color_list'``\r\n A list of color names. The k'th element represents the color of the\r\n k'th link.\r\n\r\n ``'icoord'`` and ``'dcoord'``\r\n Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``\r\n where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``\r\n where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is\r\n ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.\r\n\r\n ``'ivl'``\r\n A list of labels corresponding to the leaf nodes.\r\n\r\n ``'leaves'``\r\n For each i, ``H[i] == j``, cluster node ``j`` appears in position\r\n ``i`` in the left-to-right traversal of the leaves, where\r\n :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the\r\n ``i``-th leaf node corresponds to an original observation.\r\n Otherwise, it corresponds to a non-singleton cluster.\r\n\r\n See Also\r\n --------\r\n linkage, set_link_color_palette\r\n\r\n Examples\r\n --------\r\n >>> from scipy.cluster import hierarchy\r\n >>> import matplotlib.pyplot as plt\r\n\r\n A very basic example:\r\n\r\n >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,\r\n ... 400., 754., 564., 138., 219., 869., 669.])\r\n >>> Z = hierarchy.linkage(ytdist, 'single')\r\n >>> plt.figure()\r\n >>> dn = hierarchy.dendrogram(Z)\r\n\r\n Now plot in given axes, improve the color scheme and use both vertical and\r\n horizontal orientations:\r\n\r\n >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])\r\n >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))\r\n >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',\r\n ... orientation='top')\r\n >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',\r\n ... orientation='right')\r\n >>> hierarchy.set_link_color_palette(None) # reset to default after use\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n # This feature was thought about but never implemented (still useful?):\r\n #\r\n # ... = dendrogram(..., leaves_order=None)\r\n #\r\n # Plots the leaves in the order specified by a vector of\r\n # original observation indices. If the vector contains duplicates\r\n # or results in a crossing, an exception will be thrown. Passing\r\n # None orders leaf nodes based on the order they appear in the\r\n # pre-order traversal.\r\n Z = np.asarray(Z, order='c')\r\n\r\n if orientation not in [\"top\", \"left\", \"bottom\", \"right\"]:\r\n raise ValueError(\"orientation must be one of 'top', 'left', \"\r\n \"'bottom', or 'right'\")\r\n\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n Zs = Z.shape\r\n n = Zs[0] + 1\r\n if type(p) in (int, float):\r\n p = int(p)\r\n else:\r\n raise TypeError('The second argument must be a number')\r\n\r\n if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):\r\n raise ValueError('Invalid truncation mode.')\r\n\r\n if truncate_mode == 'lastp' or truncate_mode == 'mlab':\r\n if p > n or p == 0:\r\n p = n\r\n\r\n if truncate_mode == 'mtica' or truncate_mode == 'level':\r\n if p <= 0:\r\n p = np.inf\r\n\r\n if get_leaves:\r\n lvs = []\r\n else:\r\n lvs = None\r\n\r\n icoord_list = []\r\n dcoord_list = []\r\n color_list = []\r\n current_color = [0]\r\n currently_below_threshold = [False]\r\n ivl = [] # list of leaves\r\n\r\n if color_threshold is None or (isinstance(color_threshold, string_types) and\r\n color_threshold == 'default'):\r\n color_threshold = max(Z[:, 2]) * 0.7\r\n\r\n R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,\r\n 'leaves': lvs, 'color_list': color_list}\r\n\r\n # Empty list will be filled in _dendrogram_calculate_info\r\n contraction_marks = [] if show_contracted else None\r\n\r\n _dendrogram_calculate_info(\r\n Z=Z, p=p,\r\n truncate_mode=truncate_mode,\r\n color_threshold=color_threshold,\r\n get_leaves=get_leaves,\r\n orientation=orientation,\r\n labels=labels,\r\n count_sort=count_sort,\r\n distance_sort=distance_sort,\r\n show_leaf_counts=show_leaf_counts,\r\n i=2*n - 2,\r\n iv=0.0,\r\n ivl=ivl,\r\n n=n,\r\n icoord_list=icoord_list,\r\n dcoord_list=dcoord_list,\r\n lvs=lvs,\r\n current_color=current_color,\r\n color_list=color_list,\r\n currently_below_threshold=currently_below_threshold,\r\n leaf_label_func=leaf_label_func,\r\n contraction_marks=contraction_marks,\r\n link_color_func=link_color_func,\r\n above_threshold_color=above_threshold_color)\r\n\r\n if not no_plot:\r\n mh = max(Z[:, 2])\r\n _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,\r\n no_labels, color_list,\r\n leaf_font_size=leaf_font_size,\r\n leaf_rotation=leaf_rotation,\r\n contraction_marks=contraction_marks,\r\n ax=ax,\r\n above_threshold_color=above_threshold_color)\r\n\r\n return R\r\n\r\n\r\ndef _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,\r\n i, labels):\r\n # If the leaf id structure is not None and is a list then the caller\r\n # to dendrogram has indicated that cluster id's corresponding to the\r\n # leaf nodes should be recorded.\r\n\r\n if lvs is not None:\r\n lvs.append(int(i))\r\n\r\n # If leaf node labels are to be displayed...\r\n if ivl is not None:\r\n # If a leaf_label_func has been provided, the label comes from the\r\n # string returned from the leaf_label_func, which is a function\r\n # passed to dendrogram.\r\n if leaf_label_func:\r\n ivl.append(leaf_label_func(int(i)))\r\n else:\r\n # Otherwise, if the dendrogram caller has passed a labels list\r\n # for the leaf nodes, use it.\r\n if labels is not None:\r\n ivl.append(labels[int(i - n)])\r\n else:\r\n # Otherwise, use the id as the label for the leaf.x\r\n ivl.append(str(int(i)))\r\n\r\n\r\ndef _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,\r\n i, labels, show_leaf_counts):\r\n # If the leaf id structure is not None and is a list then the caller\r\n # to dendrogram has indicated that cluster id's corresponding to the\r\n # leaf nodes should be recorded.\r\n\r\n if lvs is not None:\r\n lvs.append(int(i))\r\n if ivl is not None:\r\n if leaf_label_func:\r\n ivl.append(leaf_label_func(int(i)))\r\n else:\r\n if show_leaf_counts:\r\n ivl.append(\"(\" + str(int(Z[i - n, 3])) + \")\")\r\n else:\r\n ivl.append(\"\")\r\n\r\n\r\ndef _append_contraction_marks(Z, iv, i, n, contraction_marks):\r\n _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)\r\n _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)\r\n\r\n\r\ndef _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):\r\n if i >= n:\r\n contraction_marks.append((iv, Z[i - n, 2]))\r\n _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)\r\n _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)\r\n\r\n\r\ndef _dendrogram_calculate_info(Z, p, truncate_mode,\r\n color_threshold=np.inf, get_leaves=True,\r\n orientation='top', labels=None,\r\n count_sort=False, distance_sort=False,\r\n show_leaf_counts=False, i=-1, iv=0.0,\r\n ivl=[], n=0, icoord_list=[], dcoord_list=[],\r\n lvs=None, mhr=False,\r\n current_color=[], color_list=[],\r\n currently_below_threshold=[],\r\n leaf_label_func=None, level=0,\r\n contraction_marks=None,\r\n link_color_func=None,\r\n above_threshold_color='b'):\r\n \"\"\"\r\n Calculates the endpoints of the links as well as the labels for the\r\n the dendrogram rooted at the node with index i. iv is the independent\r\n variable value to plot the left-most leaf node below the root node i\r\n (if orientation='top', this would be the left-most x value where the\r\n plotting of this root node i and its descendents should begin).\r\n\r\n ivl is a list to store the labels of the leaf nodes. The leaf_label_func\r\n is called whenever ivl != None, labels == None, and\r\n leaf_label_func != None. When ivl != None and labels != None, the\r\n labels list is used only for labeling the leaf nodes. When\r\n ivl == None, no labels are generated for leaf nodes.\r\n\r\n When get_leaves==True, a list of leaves is built as they are visited\r\n in the dendrogram.\r\n\r\n Returns a tuple with l being the independent variable coordinate that\r\n corresponds to the midpoint of cluster to the left of cluster i if\r\n i is non-singleton, otherwise the independent coordinate of the leaf\r\n node if i is a leaf node.\r\n\r\n Returns\r\n -------\r\n A tuple (left, w, h, md), where:\r\n\r\n * left is the independent variable coordinate of the center of the\r\n the U of the subtree\r\n\r\n * w is the amount of space used for the subtree (in independent\r\n variable units)\r\n\r\n * h is the height of the subtree in dependent variable units\r\n\r\n * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including\r\n the target node.\r\n\r\n \"\"\"\r\n if n == 0:\r\n raise ValueError(\"Invalid singleton cluster count n.\")\r\n\r\n if i == -1:\r\n raise ValueError(\"Invalid root cluster index i.\")\r\n\r\n if truncate_mode == 'lastp':\r\n # If the node is a leaf node but corresponds to a non-single cluster,\r\n # its label is either the empty string or the number of original\r\n # observations belonging to cluster i.\r\n if 2 * n - p > i >= n:\r\n d = Z[i - n, 2]\r\n _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,\r\n leaf_label_func, i, labels,\r\n show_leaf_counts)\r\n if contraction_marks is not None:\r\n _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)\r\n return (iv + 5.0, 10.0, 0.0, d)\r\n elif i < n:\r\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\r\n leaf_label_func, i, labels)\r\n return (iv + 5.0, 10.0, 0.0, 0.0)\r\n elif truncate_mode in ('mtica', 'level'):\r\n if i > n and level > p:\r\n d = Z[i - n, 2]\r\n _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,\r\n leaf_label_func, i, labels,\r\n show_leaf_counts)\r\n if contraction_marks is not None:\r\n _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)\r\n return (iv + 5.0, 10.0, 0.0, d)\r\n elif i < n:\r\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\r\n leaf_label_func, i, labels)\r\n return (iv + 5.0, 10.0, 0.0, 0.0)\r\n elif truncate_mode in ('mlab',):\r\n pass\r\n\r\n # Otherwise, only truncate if we have a leaf node.\r\n #\r\n # If the truncate_mode is mlab, the linkage has been modified\r\n # with the truncated tree.\r\n #\r\n # Only place leaves if they correspond to original observations.\r\n if i < n:\r\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\r\n leaf_label_func, i, labels)\r\n return (iv + 5.0, 10.0, 0.0, 0.0)\r\n\r\n # !!! Otherwise, we don't have a leaf node, so work on plotting a\r\n # non-leaf node.\r\n # Actual indices of a and b\r\n aa = int(Z[i - n, 0])\r\n ab = int(Z[i - n, 1])\r\n if aa > n:\r\n # The number of singletons below cluster a\r\n na = Z[aa - n, 3]\r\n # The distance between a's two direct children.\r\n da = Z[aa - n, 2]\r\n else:\r\n na = 1\r\n da = 0.0\r\n if ab > n:\r\n nb = Z[ab - n, 3]\r\n db = Z[ab - n, 2]\r\n else:\r\n nb = 1\r\n db = 0.0\r\n\r\n if count_sort == 'ascending' or count_sort == True:\r\n # If a has a count greater than b, it and its descendents should\r\n # be drawn to the right. Otherwise, to the left.\r\n if na > nb:\r\n # The cluster index to draw to the left (ua) will be ab\r\n # and the one to draw to the right (ub) will be aa\r\n ua = ab\r\n ub = aa\r\n else:\r\n ua = aa\r\n ub = ab\r\n elif count_sort == 'descending':\r\n # If a has a count less than or equal to b, it and its\r\n # descendents should be drawn to the left. Otherwise, to\r\n # the right.\r\n if na > nb:\r\n ua = aa\r\n ub = ab\r\n else:\r\n ua = ab\r\n ub = aa\r\n elif distance_sort == 'ascending' or distance_sort == True:\r\n # If a has a distance greater than b, it and its descendents should\r\n # be drawn to the right. Otherwise, to the left.\r\n if da > db:\r\n ua = ab\r\n ub = aa\r\n else:\r\n ua = aa\r\n ub = ab\r\n elif distance_sort == 'descending':\r\n # If a has a distance less than or equal to b, it and its\r\n # descendents should be drawn to the left. Otherwise, to\r\n # the right.\r\n if da > db:\r\n ua = aa\r\n ub = ab\r\n else:\r\n ua = ab\r\n ub = aa\r\n else:\r\n ua = aa\r\n ub = ab\r\n\r\n # Updated iv variable and the amount of space used.\r\n (uiva, uwa, uah, uamd) = \\\r\n _dendrogram_calculate_info(\r\n Z=Z, p=p,\r\n truncate_mode=truncate_mode,\r\n color_threshold=color_threshold,\r\n get_leaves=get_leaves,\r\n orientation=orientation,\r\n labels=labels,\r\n count_sort=count_sort,\r\n distance_sort=distance_sort,\r\n show_leaf_counts=show_leaf_counts,\r\n i=ua, iv=iv, ivl=ivl, n=n,\r\n icoord_list=icoord_list,\r\n dcoord_list=dcoord_list, lvs=lvs,\r\n current_color=current_color,\r\n color_list=color_list,\r\n currently_below_threshold=currently_below_threshold,\r\n leaf_label_func=leaf_label_func,\r\n level=level + 1, contraction_marks=contraction_marks,\r\n link_color_func=link_color_func,\r\n above_threshold_color=above_threshold_color)\r\n\r\n h = Z[i - n, 2]\r\n if h >= color_threshold or color_threshold <= 0:\r\n c = above_threshold_color\r\n\r\n if currently_below_threshold[0]:\r\n current_color[0] = (current_color[0] + 1) % len(_link_line_colors)\r\n currently_below_threshold[0] = False\r\n else:\r\n currently_below_threshold[0] = True\r\n c = _link_line_colors[current_color[0]]\r\n\r\n (uivb, uwb, ubh, ubmd) = \\\r\n _dendrogram_calculate_info(\r\n Z=Z, p=p,\r\n truncate_mode=truncate_mode,\r\n color_threshold=color_threshold,\r\n get_leaves=get_leaves,\r\n orientation=orientation,\r\n labels=labels,\r\n count_sort=count_sort,\r\n distance_sort=distance_sort,\r\n show_leaf_counts=show_leaf_counts,\r\n i=ub, iv=iv + uwa, ivl=ivl, n=n,\r\n icoord_list=icoord_list,\r\n dcoord_list=dcoord_list, lvs=lvs,\r\n current_color=current_color,\r\n color_list=color_list,\r\n currently_below_threshold=currently_below_threshold,\r\n leaf_label_func=leaf_label_func,\r\n level=level + 1, contraction_marks=contraction_marks,\r\n link_color_func=link_color_func,\r\n above_threshold_color=above_threshold_color)\r\n\r\n max_dist = max(uamd, ubmd, h)\r\n\r\n icoord_list.append([uiva, uiva, uivb, uivb])\r\n dcoord_list.append([uah, h, h, ubh])\r\n if link_color_func is not None:\r\n v = link_color_func(int(i))\r\n if not isinstance(v, string_types):\r\n raise TypeError(\"link_color_func must return a matplotlib \"\r\n \"color string!\")\r\n color_list.append(v)\r\n else:\r\n color_list.append(c)\r\n\r\n return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)\r\n\r\n\r\ndef is_isomorphic(T1, T2):\r\n \"\"\"\r\n Determines if two different cluster assignments are equivalent.\r\n\r\n Parameters\r\n ----------\r\n T1 : array_like\r\n An assignment of singleton cluster ids to flat cluster ids.\r\n T2 : array_like\r\n An assignment of singleton cluster ids to flat cluster ids.\r\n\r\n Returns\r\n -------\r\n b : bool\r\n Whether the flat cluster assignments `T1` and `T2` are\r\n equivalent.\r\n\r\n \"\"\"\r\n T1 = np.asarray(T1, order='c')\r\n T2 = np.asarray(T2, order='c')\r\n\r\n if type(T1) != np.ndarray:\r\n raise TypeError('T1 must be a numpy array.')\r\n if type(T2) != np.ndarray:\r\n raise TypeError('T2 must be a numpy array.')\r\n\r\n T1S = T1.shape\r\n T2S = T2.shape\r\n\r\n if len(T1S) != 1:\r\n raise ValueError('T1 must be one-dimensional.')\r\n if len(T2S) != 1:\r\n raise ValueError('T2 must be one-dimensional.')\r\n if T1S[0] != T2S[0]:\r\n raise ValueError('T1 and T2 must have the same number of elements.')\r\n n = T1S[0]\r\n d = {}\r\n for i in xrange(0, n):\r\n if T1[i] in d:\r\n if d[T1[i]] != T2[i]:\r\n return False\r\n else:\r\n d[T1[i]] = T2[i]\r\n return True\r\n\r\n\r\ndef maxdists(Z):\r\n \"\"\"\r\n Returns the maximum distance between any non-singleton cluster.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a matrix. See\r\n ``linkage`` for more information.\r\n\r\n Returns\r\n -------\r\n maxdists : ndarray\r\n A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents\r\n the maximum distance between any cluster (including\r\n singletons) below and including the node with index i. More\r\n specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the\r\n set of all node indices below and including node i.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c', dtype=np.double)\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n\r\n n = Z.shape[0] + 1\r\n MD = np.zeros((n - 1,))\r\n [Z] = _copy_arrays_if_base_present([Z])\r\n _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))\r\n return MD\r\n\r\n\r\ndef maxinconsts(Z, R):\r\n \"\"\"\r\n Returns the maximum inconsistency coefficient for each\r\n non-singleton cluster and its descendents.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a matrix. See\r\n ``linkage`` for more information.\r\n R : ndarray\r\n The inconsistency matrix.\r\n\r\n Returns\r\n -------\r\n MI : ndarray\r\n A monotonic ``(n-1)``-sized numpy array of doubles.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n R = np.asarray(R, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n is_valid_im(R, throw=True, name='R')\r\n\r\n n = Z.shape[0] + 1\r\n if Z.shape[0] != R.shape[0]:\r\n raise ValueError(\"The inconsistency matrix and linkage matrix each \"\r\n \"have a different number of rows.\")\r\n MI = np.zeros((n - 1,))\r\n [Z, R] = _copy_arrays_if_base_present([Z, R])\r\n _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)\r\n return MI\r\n\r\n\r\ndef maxRstat(Z, R, i):\r\n \"\"\"\r\n Returns the maximum statistic for each non-singleton cluster and\r\n its descendents.\r\n\r\n Parameters\r\n ----------\r\n Z : array_like\r\n The hierarchical clustering encoded as a matrix. See `linkage` for more\r\n information.\r\n R : array_like\r\n The inconsistency matrix.\r\n i : int\r\n The column of `R` to use as the statistic.\r\n\r\n Returns\r\n -------\r\n MR : ndarray\r\n Calculates the maximum statistic for the i'th column of the\r\n inconsistency matrix `R` for each non-singleton cluster\r\n node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where\r\n ``Q(j)`` the set of all node ids corresponding to nodes below\r\n and including ``j``.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n R = np.asarray(R, order='c')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n is_valid_im(R, throw=True, name='R')\r\n if type(i) is not int:\r\n raise TypeError('The third argument must be an integer.')\r\n if i < 0 or i > 3:\r\n raise ValueError('i must be an integer between 0 and 3 inclusive.')\r\n\r\n if Z.shape[0] != R.shape[0]:\r\n raise ValueError(\"The inconsistency matrix and linkage matrix each \"\r\n \"have a different number of rows.\")\r\n\r\n n = Z.shape[0] + 1\r\n MR = np.zeros((n - 1,))\r\n [Z, R] = _copy_arrays_if_base_present([Z, R])\r\n _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)\r\n return MR\r\n\r\n\r\ndef leaders(Z, T):\r\n \"\"\"\r\n Returns the root nodes in a hierarchical clustering.\r\n\r\n Returns the root nodes in a hierarchical clustering corresponding\r\n to a cut defined by a flat cluster assignment vector ``T``. See\r\n the ``fcluster`` function for more information on the format of ``T``.\r\n\r\n For each flat cluster :math:`j` of the :math:`k` flat clusters\r\n represented in the n-sized flat cluster assignment vector ``T``,\r\n this function finds the lowest cluster node :math:`i` in the linkage\r\n tree Z such that:\r\n\r\n * leaf descendents belong only to flat cluster j\r\n (i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where\r\n :math:`S(i)` is the set of leaf ids of leaf nodes descendent\r\n with cluster node :math:`i`)\r\n\r\n * there does not exist a leaf that is not descendent with\r\n :math:`i` that also belongs to cluster :math:`j`\r\n (i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If\r\n this condition is violated, ``T`` is not a valid cluster\r\n assignment vector, and an exception will be thrown.\r\n\r\n Parameters\r\n ----------\r\n Z : ndarray\r\n The hierarchical clustering encoded as a matrix. See\r\n ``linkage`` for more information.\r\n T : ndarray\r\n The flat cluster assignment vector.\r\n\r\n Returns\r\n -------\r\n L : ndarray\r\n The leader linkage node id's stored as a k-element 1-D array\r\n where ``k`` is the number of flat clusters found in ``T``.\r\n\r\n ``L[j]=i`` is the linkage cluster node id that is the\r\n leader of flat cluster with id M[j]. If ``i < n``, ``i``\r\n corresponds to an original observation, otherwise it\r\n corresponds to a non-singleton cluster.\r\n\r\n For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with\r\n id 8's leader is linkage node 2.\r\n M : ndarray\r\n The leader linkage node id's stored as a k-element 1-D array where\r\n ``k`` is the number of flat clusters found in ``T``. This allows the\r\n set of flat cluster ids to be any arbitrary set of ``k`` integers.\r\n\r\n \"\"\"\r\n Z = np.asarray(Z, order='c')\r\n T = np.asarray(T, order='c')\r\n if type(T) != np.ndarray or T.dtype != 'i':\r\n raise TypeError('T must be a one-dimensional numpy array of integers.')\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n if len(T) != Z.shape[0] + 1:\r\n raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')\r\n\r\n Cl = np.unique(T)\r\n kk = len(Cl)\r\n L = np.zeros((kk,), dtype='i')\r\n M = np.zeros((kk,), dtype='i')\r\n n = Z.shape[0] + 1\r\n [Z, T] = _copy_arrays_if_base_present([Z, T])\r\n s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))\r\n if s >= 0:\r\n raise ValueError(('T is not a valid assignment vector. Error found '\r\n 'when examining linkage node %d (< 2n-1).') % s)\r\n return (L, M)\r\n", "\"\"\"\r\nObjects for dealing with polynomials.\r\n\r\nThis module provides a number of objects (mostly functions) useful for\r\ndealing with polynomials, including a `Polynomial` class that\r\nencapsulates the usual arithmetic operations. (General information\r\non how this module represents and works with polynomial objects is in\r\nthe docstring for its \"parent\" sub-package, `numpy.polynomial`).\r\n\r\nConstants\r\n---------\r\n- `polydomain` -- Polynomial default domain, [-1,1].\r\n- `polyzero` -- (Coefficients of the) \"zero polynomial.\"\r\n- `polyone` -- (Coefficients of the) constant polynomial 1.\r\n- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.\r\n\r\nArithmetic\r\n----------\r\n- `polyadd` -- add two polynomials.\r\n- `polysub` -- subtract one polynomial from another.\r\n- `polymul` -- multiply two polynomials.\r\n- `polydiv` -- divide one polynomial by another.\r\n- `polypow` -- raise a polynomial to an positive integer power\r\n- `polyval` -- evaluate a polynomial at given points.\r\n- `polyval2d` -- evaluate a 2D polynomial at given points.\r\n- `polyval3d` -- evaluate a 3D polynomial at given points.\r\n- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.\r\n- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.\r\n\r\nCalculus\r\n--------\r\n- `polyder` -- differentiate a polynomial.\r\n- `polyint` -- integrate a polynomial.\r\n\r\nMisc Functions\r\n--------------\r\n- `polyfromroots` -- create a polynomial with specified roots.\r\n- `polyroots` -- find the roots of a polynomial.\r\n- `polyvander` -- Vandermonde-like matrix for powers.\r\n- `polyvander2d` -- Vandermonde-like matrix for 2D power series.\r\n- `polyvander3d` -- Vandermonde-like matrix for 3D power series.\r\n- `polycompanion` -- companion matrix in power series form.\r\n- `polyfit` -- least-squares fit returning a polynomial.\r\n- `polytrim` -- trim leading coefficients from a polynomial.\r\n- `polyline` -- polynomial representing given straight line.\r\n\r\nClasses\r\n-------\r\n- `Polynomial` -- polynomial class.\r\n\r\nSee Also\r\n--------\r\n`numpy.polynomial`\r\n\r\n\"\"\"\r\nfrom __future__ import division, absolute_import, print_function\r\n\r\n__all__ = [\r\n 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',\r\n 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',\r\n 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit',\r\n 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',\r\n 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']\r\n\r\nimport warnings\r\nimport numpy as np\r\nimport numpy.linalg as la\r\n\r\nfrom . import polyutils as pu\r\nfrom ._polybase import ABCPolyBase\r\n\r\npolytrim = pu.trimcoef\r\n\r\n#\r\n# These are constant arrays are of integer type so as to be compatible\r\n# with the widest range of other types, such as Decimal.\r\n#\r\n\r\n# Polynomial default domain.\r\npolydomain = np.array([-1, 1])\r\n\r\n# Polynomial coefficients representing zero.\r\npolyzero = np.array([0])\r\n\r\n# Polynomial coefficients representing one.\r\npolyone = np.array([1])\r\n\r\n# Polynomial coefficients representing the identity x.\r\npolyx = np.array([0, 1])\r\n\r\n#\r\n# Polynomial series functions\r\n#\r\n\r\n\r\ndef polyline(off, scl):\r\n \"\"\"\r\n Returns an array representing a linear polynomial.\r\n\r\n Parameters\r\n ----------\r\n off, scl : scalars\r\n The \"y-intercept\" and \"slope\" of the line, respectively.\r\n\r\n Returns\r\n -------\r\n y : ndarray\r\n This module's representation of the linear polynomial ``off +\r\n scl*x``.\r\n\r\n See Also\r\n --------\r\n chebline\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> P.polyline(1,-1)\r\n array([ 1, -1])\r\n >>> P.polyval(1, P.polyline(1,-1)) # should be 0\r\n 0.0\r\n\r\n \"\"\"\r\n if scl != 0:\r\n return np.array([off, scl])\r\n else:\r\n return np.array([off])\r\n\r\n\r\ndef polyfromroots(roots):\r\n \"\"\"\r\n Generate a monic polynomial with given roots.\r\n\r\n Return the coefficients of the polynomial\r\n\r\n .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),\r\n\r\n where the `r_n` are the roots specified in `roots`. If a zero has\r\n multiplicity n, then it must appear in `roots` n times. For instance,\r\n if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,\r\n then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear\r\n in any order.\r\n\r\n If the returned coefficients are `c`, then\r\n\r\n .. math:: p(x) = c_0 + c_1 * x + ... + x^n\r\n\r\n The coefficient of the last term is 1 for monic polynomials in this\r\n form.\r\n\r\n Parameters\r\n ----------\r\n roots : array_like\r\n Sequence containing the roots.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n 1-D array of the polynomial's coefficients If all the roots are\r\n real, then `out` is also real, otherwise it is complex. (see\r\n Examples below).\r\n\r\n See Also\r\n --------\r\n chebfromroots, legfromroots, lagfromroots, hermfromroots\r\n hermefromroots\r\n\r\n Notes\r\n -----\r\n The coefficients are determined by multiplying together linear factors\r\n of the form `(x - r_i)`, i.e.\r\n\r\n .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)\r\n\r\n where ``n == len(roots) - 1``; note that this implies that `1` is always\r\n returned for :math:`a_n`.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x\r\n array([ 0., -1., 0., 1.])\r\n >>> j = complex(0,1)\r\n >>> P.polyfromroots((-j,j)) # complex returned, though values are real\r\n array([ 1.+0.j, 0.+0.j, 1.+0.j])\r\n\r\n \"\"\"\r\n if len(roots) == 0:\r\n return np.ones(1)\r\n else:\r\n [roots] = pu.as_series([roots], trim=False)\r\n roots.sort()\r\n p = [polyline(-r, 1) for r in roots]\r\n n = len(p)\r\n while n > 1:\r\n m, r = divmod(n, 2)\r\n tmp = [polymul(p[i], p[i+m]) for i in range(m)]\r\n if r:\r\n tmp[0] = polymul(tmp[0], p[-1])\r\n p = tmp\r\n n = m\r\n return p[0]\r\n\r\n\r\ndef polyadd(c1, c2):\r\n \"\"\"\r\n Add one polynomial to another.\r\n\r\n Returns the sum of two polynomials `c1` + `c2`. The arguments are\r\n sequences of coefficients from lowest order term to highest, i.e.,\r\n [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of polynomial coefficients ordered from low to high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The coefficient array representing their sum.\r\n\r\n See Also\r\n --------\r\n polysub, polymul, polydiv, polypow\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> sum = P.polyadd(c1,c2); sum\r\n array([ 4., 4., 4.])\r\n >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)\r\n 28.0\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] += c2\r\n ret = c1\r\n else:\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef polysub(c1, c2):\r\n \"\"\"\r\n Subtract one polynomial from another.\r\n\r\n Returns the difference of two polynomials `c1` - `c2`. The arguments\r\n are sequences of coefficients from lowest order term to highest, i.e.,\r\n [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of polynomial coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Of coefficients representing their difference.\r\n\r\n See Also\r\n --------\r\n polyadd, polymul, polydiv, polypow\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> P.polysub(c1,c2)\r\n array([-2., 0., 2.])\r\n >>> P.polysub(c2,c1) # -P.polysub(c1,c2)\r\n array([ 2., 0., -2.])\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] -= c2\r\n ret = c1\r\n else:\r\n c2 = -c2\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef polymulx(c):\r\n \"\"\"Multiply a polynomial by x.\r\n\r\n Multiply the polynomial `c` by x, where x is the independent\r\n variable.\r\n\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of polynomial coefficients ordered from low to\r\n high.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Array representing the result of the multiplication.\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n # The zero series needs special treatment\r\n if len(c) == 1 and c[0] == 0:\r\n return c\r\n\r\n prd = np.empty(len(c) + 1, dtype=c.dtype)\r\n prd[0] = c[0]*0\r\n prd[1:] = c\r\n return prd\r\n\r\n\r\ndef polymul(c1, c2):\r\n \"\"\"\r\n Multiply one polynomial by another.\r\n\r\n Returns the product of two polynomials `c1` * `c2`. The arguments are\r\n sequences of coefficients, from lowest order term to highest, e.g.,\r\n [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of coefficients representing a polynomial, relative to the\r\n \"standard\" basis, and ordered from lowest order term to highest.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Of the coefficients of their product.\r\n\r\n See Also\r\n --------\r\n polyadd, polysub, polydiv, polypow\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> P.polymul(c1,c2)\r\n array([ 3., 8., 14., 8., 3.])\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n ret = np.convolve(c1, c2)\r\n return pu.trimseq(ret)\r\n\r\n\r\ndef polydiv(c1, c2):\r\n \"\"\"\r\n Divide one polynomial by another.\r\n\r\n Returns the quotient-with-remainder of two polynomials `c1` / `c2`.\r\n The arguments are sequences of coefficients, from lowest order term\r\n to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.\r\n\r\n Parameters\r\n ----------\r\n c1, c2 : array_like\r\n 1-D arrays of polynomial coefficients ordered from low to high.\r\n\r\n Returns\r\n -------\r\n [quo, rem] : ndarrays\r\n Of coefficient series representing the quotient and remainder.\r\n\r\n See Also\r\n --------\r\n polyadd, polysub, polymul, polypow\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c1 = (1,2,3)\r\n >>> c2 = (3,2,1)\r\n >>> P.polydiv(c1,c2)\r\n (array([ 3.]), array([-8., -4.]))\r\n >>> P.polydiv(c2,c1)\r\n (array([ 0.33333333]), array([ 2.66666667, 1.33333333]))\r\n\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = pu.as_series([c1, c2])\r\n if c2[-1] == 0:\r\n raise ZeroDivisionError()\r\n\r\n len1 = len(c1)\r\n len2 = len(c2)\r\n if len2 == 1:\r\n return c1/c2[-1], c1[:1]*0\r\n elif len1 < len2:\r\n return c1[:1]*0, c1\r\n else:\r\n dlen = len1 - len2\r\n scl = c2[-1]\r\n c2 = c2[:-1]/scl\r\n i = dlen\r\n j = len1 - 1\r\n while i >= 0:\r\n c1[i:j] -= c2*c1[j]\r\n i -= 1\r\n j -= 1\r\n return c1[j+1:]/scl, pu.trimseq(c1[:j+1])\r\n\r\n\r\ndef polypow(c, pow, maxpower=None):\r\n \"\"\"Raise a polynomial to a power.\r\n\r\n Returns the polynomial `c` raised to the power `pow`. The argument\r\n `c` is a sequence of coefficients ordered from low to high. i.e.,\r\n [1,2,3] is the series ``1 + 2*x + 3*x**2.``\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of array of series coefficients ordered from low to\r\n high degree.\r\n pow : integer\r\n Power to which the series will be raised\r\n maxpower : integer, optional\r\n Maximum power allowed. This is mainly to limit growth of the series\r\n to unmanageable size. Default is 16\r\n\r\n Returns\r\n -------\r\n coef : ndarray\r\n Power series of power.\r\n\r\n See Also\r\n --------\r\n polyadd, polysub, polymul, polydiv\r\n\r\n Examples\r\n --------\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n power = int(pow)\r\n if power != pow or power < 0:\r\n raise ValueError(\"Power must be a non-negative integer.\")\r\n elif maxpower is not None and power > maxpower:\r\n raise ValueError(\"Power is too large\")\r\n elif power == 0:\r\n return np.array([1], dtype=c.dtype)\r\n elif power == 1:\r\n return c\r\n else:\r\n # This can be made more efficient by using powers of two\r\n # in the usual way.\r\n prd = c\r\n for i in range(2, power + 1):\r\n prd = np.convolve(prd, c)\r\n return prd\r\n\r\n\r\ndef polyder(c, m=1, scl=1, axis=0):\r\n \"\"\"\r\n Differentiate a polynomial.\r\n\r\n Returns the polynomial coefficients `c` differentiated `m` times along\r\n `axis`. At each iteration the result is multiplied by `scl` (the\r\n scaling factor is for use in a linear change of variable). The\r\n argument `c` is an array of coefficients from low to high degree along\r\n each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``\r\n while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is\r\n ``x`` and axis=1 is ``y``.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n Array of polynomial coefficients. If c is multidimensional the\r\n different axis correspond to different variables with the degree\r\n in each axis given by the corresponding index.\r\n m : int, optional\r\n Number of derivatives taken, must be non-negative. (Default: 1)\r\n scl : scalar, optional\r\n Each differentiation is multiplied by `scl`. The end result is\r\n multiplication by ``scl**m``. This is for use in a linear change\r\n of variable. (Default: 1)\r\n axis : int, optional\r\n Axis over which the derivative is taken. (Default: 0).\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n der : ndarray\r\n Polynomial coefficients of the derivative.\r\n\r\n See Also\r\n --------\r\n polyint\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3\r\n >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2\r\n array([ 2., 6., 12.])\r\n >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24\r\n array([ 24.])\r\n >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2\r\n array([ -2., -6., -12.])\r\n >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x\r\n array([ 6., 24.])\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=1)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n # astype fails with NA\r\n c = c + 0.0\r\n cdt = c.dtype\r\n cnt, iaxis = [int(t) for t in [m, axis]]\r\n\r\n if cnt != m:\r\n raise ValueError(\"The order of derivation must be integer\")\r\n if cnt < 0:\r\n raise ValueError(\"The order of derivation must be non-negative\")\r\n if iaxis != axis:\r\n raise ValueError(\"The axis must be integer\")\r\n if not -c.ndim <= iaxis < c.ndim:\r\n raise ValueError(\"The axis is out of range\")\r\n if iaxis < 0:\r\n iaxis += c.ndim\r\n\r\n if cnt == 0:\r\n return c\r\n\r\n c = np.rollaxis(c, iaxis)\r\n n = len(c)\r\n if cnt >= n:\r\n c = c[:1]*0\r\n else:\r\n for i in range(cnt):\r\n n = n - 1\r\n c *= scl\r\n der = np.empty((n,) + c.shape[1:], dtype=cdt)\r\n for j in range(n, 0, -1):\r\n der[j - 1] = j*c[j]\r\n c = der\r\n c = np.rollaxis(c, 0, iaxis + 1)\r\n return c\r\n\r\n\r\ndef polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):\r\n \"\"\"\r\n Integrate a polynomial.\r\n\r\n Returns the polynomial coefficients `c` integrated `m` times from\r\n `lbnd` along `axis`. At each iteration the resulting series is\r\n **multiplied** by `scl` and an integration constant, `k`, is added.\r\n The scaling factor is for use in a linear change of variable. (\"Buyer\r\n beware\": note that, depending on what one is doing, one may want `scl`\r\n to be the reciprocal of what one might expect; for more information,\r\n see the Notes section below.) The argument `c` is an array of\r\n coefficients, from low to high degree along each axis, e.g., [1,2,3]\r\n represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]\r\n represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is\r\n ``y``.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of polynomial coefficients, ordered from low to high.\r\n m : int, optional\r\n Order of integration, must be positive. (Default: 1)\r\n k : {[], list, scalar}, optional\r\n Integration constant(s). The value of the first integral at zero\r\n is the first value in the list, the value of the second integral\r\n at zero is the second value, etc. If ``k == []`` (the default),\r\n all constants are set to zero. If ``m == 1``, a single scalar can\r\n be given instead of a list.\r\n lbnd : scalar, optional\r\n The lower bound of the integral. (Default: 0)\r\n scl : scalar, optional\r\n Following each integration the result is *multiplied* by `scl`\r\n before the integration constant is added. (Default: 1)\r\n axis : int, optional\r\n Axis over which the integral is taken. (Default: 0).\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n S : ndarray\r\n Coefficient array of the integral.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If ``m < 1``, ``len(k) > m``.\r\n\r\n See Also\r\n --------\r\n polyder\r\n\r\n Notes\r\n -----\r\n Note that the result of each integration is *multiplied* by `scl`. Why\r\n is this important to note? Say one is making a linear change of\r\n variable :math:`u = ax + b` in an integral relative to `x`. Then\r\n .. math::`dx = du/a`, so one will need to set `scl` equal to\r\n :math:`1/a` - perhaps not what one would have first thought.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> c = (1,2,3)\r\n >>> P.polyint(c) # should return array([0, 1, 1, 1])\r\n array([ 0., 1., 1., 1.])\r\n >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])\r\n array([ 0. , 0. , 0. , 0.16666667, 0.08333333,\r\n 0.05 ])\r\n >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])\r\n array([ 3., 1., 1., 1.])\r\n >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])\r\n array([ 6., 1., 1., 1.])\r\n >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])\r\n array([ 0., -2., -2., -2.])\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=1)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n # astype doesn't preserve mask attribute.\r\n c = c + 0.0\r\n cdt = c.dtype\r\n if not np.iterable(k):\r\n k = [k]\r\n cnt, iaxis = [int(t) for t in [m, axis]]\r\n\r\n if cnt != m:\r\n raise ValueError(\"The order of integration must be integer\")\r\n if cnt < 0:\r\n raise ValueError(\"The order of integration must be non-negative\")\r\n if len(k) > cnt:\r\n raise ValueError(\"Too many integration constants\")\r\n if iaxis != axis:\r\n raise ValueError(\"The axis must be integer\")\r\n if not -c.ndim <= iaxis < c.ndim:\r\n raise ValueError(\"The axis is out of range\")\r\n if iaxis < 0:\r\n iaxis += c.ndim\r\n\r\n if cnt == 0:\r\n return c\r\n\r\n k = list(k) + [0]*(cnt - len(k))\r\n c = np.rollaxis(c, iaxis)\r\n for i in range(cnt):\r\n n = len(c)\r\n c *= scl\r\n if n == 1 and np.all(c[0] == 0):\r\n c[0] += k[i]\r\n else:\r\n tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)\r\n tmp[0] = c[0]*0\r\n tmp[1] = c[0]\r\n for j in range(1, n):\r\n tmp[j + 1] = c[j]/(j + 1)\r\n tmp[0] += k[i] - polyval(lbnd, tmp)\r\n c = tmp\r\n c = np.rollaxis(c, 0, iaxis + 1)\r\n return c\r\n\r\n\r\ndef polyval(x, c, tensor=True):\r\n \"\"\"\r\n Evaluate a polynomial at points x.\r\n\r\n If `c` is of length `n + 1`, this function returns the value\r\n\r\n .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n\r\n\r\n The parameter `x` is converted to an array only if it is a tuple or a\r\n list, otherwise it is treated as a scalar. In either case, either `x`\r\n or its elements must support multiplication and addition both with\r\n themselves and with the elements of `c`.\r\n\r\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\r\n `c` is multidimensional, then the shape of the result depends on the\r\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\r\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\r\n scalars have shape (,).\r\n\r\n Trailing zeros in the coefficients will be used in the evaluation, so\r\n they should be avoided if efficiency is a concern.\r\n\r\n Parameters\r\n ----------\r\n x : array_like, compatible object\r\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\r\n it is left unchanged and treated as a scalar. In either case, `x`\r\n or its elements must support addition and multiplication with\r\n with themselves and with the elements of `c`.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficients for terms of\r\n degree n are contained in c[n]. If `c` is multidimensional the\r\n remaining indices enumerate multiple polynomials. In the two\r\n dimensional case the coefficients may be thought of as stored in\r\n the columns of `c`.\r\n tensor : boolean, optional\r\n If True, the shape of the coefficient array is extended with ones\r\n on the right, one for each dimension of `x`. Scalars have dimension 0\r\n for this action. The result is that every column of coefficients in\r\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\r\n over the columns of `c` for the evaluation. This keyword is useful\r\n when `c` is multidimensional. The default value is True.\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The shape of the returned array is described above.\r\n\r\n See Also\r\n --------\r\n polyval2d, polygrid2d, polyval3d, polygrid3d\r\n\r\n Notes\r\n -----\r\n The evaluation uses Horner's method.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial.polynomial import polyval\r\n >>> polyval(1, [1,2,3])\r\n 6.0\r\n >>> a = np.arange(4).reshape(2,2)\r\n >>> a\r\n array([[0, 1],\r\n [2, 3]])\r\n >>> polyval(a, [1,2,3])\r\n array([[ 1., 6.],\r\n [ 17., 34.]])\r\n >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients\r\n >>> coef\r\n array([[0, 1],\r\n [2, 3]])\r\n >>> polyval([1,2], coef, tensor=True)\r\n array([[ 2., 4.],\r\n [ 4., 7.]])\r\n >>> polyval([1,2], coef, tensor=False)\r\n array([ 2., 7.])\r\n\r\n \"\"\"\r\n c = np.array(c, ndmin=1, copy=0)\r\n if c.dtype.char in '?bBhHiIlLqQpP':\r\n # astype fails with NA\r\n c = c + 0.0\r\n if isinstance(x, (tuple, list)):\r\n x = np.asarray(x)\r\n if isinstance(x, np.ndarray) and tensor:\r\n c = c.reshape(c.shape + (1,)*x.ndim)\r\n\r\n c0 = c[-1] + x*0\r\n for i in range(2, len(c) + 1):\r\n c0 = c[-i] + c0*x\r\n return c0\r\n\r\n\r\ndef polyval2d(x, y, c):\r\n \"\"\"\r\n Evaluate a 2-D polynomial at points (x, y).\r\n\r\n This function returns the value\r\n\r\n .. math:: p(x,y) = \\\\sum_{i,j} c_{i,j} * x^i * y^j\r\n\r\n The parameters `x` and `y` are converted to arrays only if they are\r\n tuples or a lists, otherwise they are treated as a scalars and they\r\n must have the same shape after conversion. In either case, either `x`\r\n and `y` or their elements must support multiplication and addition both\r\n with themselves and with the elements of `c`.\r\n\r\n If `c` has fewer than two dimensions, ones are implicitly appended to\r\n its shape to make it 2-D. The shape of the result will be c.shape[2:] +\r\n x.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like, compatible objects\r\n The two dimensional series is evaluated at the points `(x, y)`,\r\n where `x` and `y` must have the same shape. If `x` or `y` is a list\r\n or tuple, it is first converted to an ndarray, otherwise it is left\r\n unchanged and, if it isn't an ndarray, it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficient of the term\r\n of multi-degree i,j is contained in `c[i,j]`. If `c` has\r\n dimension greater than two the remaining indices enumerate multiple\r\n sets of coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional polynomial at points formed with\r\n pairs of corresponding values from `x` and `y`.\r\n\r\n See Also\r\n --------\r\n polyval, polygrid2d, polyval3d, polygrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n try:\r\n x, y = np.array((x, y), copy=0)\r\n except:\r\n raise ValueError('x, y are incompatible')\r\n\r\n c = polyval(x, c)\r\n c = polyval(y, c, tensor=False)\r\n return c\r\n\r\n\r\ndef polygrid2d(x, y, c):\r\n \"\"\"\r\n Evaluate a 2-D polynomial on the Cartesian product of x and y.\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(a,b) = \\\\sum_{i,j} c_{i,j} * a^i * b^j\r\n\r\n where the points `(a, b)` consist of all pairs formed by taking\r\n `a` from `x` and `b` from `y`. The resulting points form a grid with\r\n `x` in the first dimension and `y` in the second.\r\n\r\n The parameters `x` and `y` are converted to arrays only if they are\r\n tuples or a lists, otherwise they are treated as a scalars. In either\r\n case, either `x` and `y` or their elements must support multiplication\r\n and addition both with themselves and with the elements of `c`.\r\n\r\n If `c` has fewer than two dimensions, ones are implicitly appended to\r\n its shape to make it 2-D. The shape of the result will be c.shape[2:] +\r\n x.shape + y.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like, compatible objects\r\n The two dimensional series is evaluated at the points in the\r\n Cartesian product of `x` and `y`. If `x` or `y` is a list or\r\n tuple, it is first converted to an ndarray, otherwise it is left\r\n unchanged and, if it isn't an ndarray, it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficients for terms of\r\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\r\n greater than two the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional polynomial at points in the Cartesian\r\n product of `x` and `y`.\r\n\r\n See Also\r\n --------\r\n polyval, polyval2d, polyval3d, polygrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n c = polyval(x, c)\r\n c = polyval(y, c)\r\n return c\r\n\r\n\r\ndef polyval3d(x, y, z, c):\r\n \"\"\"\r\n Evaluate a 3-D polynomial at points (x, y, z).\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(x,y,z) = \\\\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k\r\n\r\n The parameters `x`, `y`, and `z` are converted to arrays only if\r\n they are tuples or a lists, otherwise they are treated as a scalars and\r\n they must have the same shape after conversion. In either case, either\r\n `x`, `y`, and `z` or their elements must support multiplication and\r\n addition both with themselves and with the elements of `c`.\r\n\r\n If `c` has fewer than 3 dimensions, ones are implicitly appended to its\r\n shape to make it 3-D. The shape of the result will be c.shape[3:] +\r\n x.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like, compatible object\r\n The three dimensional series is evaluated at the points\r\n `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If\r\n any of `x`, `y`, or `z` is a list or tuple, it is first converted\r\n to an ndarray, otherwise it is left unchanged and if it isn't an\r\n ndarray it is treated as a scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficient of the term of\r\n multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension\r\n greater than 3 the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the multidimensional polynomial on points formed with\r\n triples of corresponding values from `x`, `y`, and `z`.\r\n\r\n See Also\r\n --------\r\n polyval, polyval2d, polygrid2d, polygrid3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n try:\r\n x, y, z = np.array((x, y, z), copy=0)\r\n except:\r\n raise ValueError('x, y, z are incompatible')\r\n\r\n c = polyval(x, c)\r\n c = polyval(y, c, tensor=False)\r\n c = polyval(z, c, tensor=False)\r\n return c\r\n\r\n\r\ndef polygrid3d(x, y, z, c):\r\n \"\"\"\r\n Evaluate a 3-D polynomial on the Cartesian product of x, y and z.\r\n\r\n This function returns the values:\r\n\r\n .. math:: p(a,b,c) = \\\\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k\r\n\r\n where the points `(a, b, c)` consist of all triples formed by taking\r\n `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form\r\n a grid with `x` in the first dimension, `y` in the second, and `z` in\r\n the third.\r\n\r\n The parameters `x`, `y`, and `z` are converted to arrays only if they\r\n are tuples or a lists, otherwise they are treated as a scalars. In\r\n either case, either `x`, `y`, and `z` or their elements must support\r\n multiplication and addition both with themselves and with the elements\r\n of `c`.\r\n\r\n If `c` has fewer than three dimensions, ones are implicitly appended to\r\n its shape to make it 3-D. The shape of the result will be c.shape[3:] +\r\n x.shape + y.shape + z.shape.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like, compatible objects\r\n The three dimensional series is evaluated at the points in the\r\n Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a\r\n list or tuple, it is first converted to an ndarray, otherwise it is\r\n left unchanged and, if it isn't an ndarray, it is treated as a\r\n scalar.\r\n c : array_like\r\n Array of coefficients ordered so that the coefficients for terms of\r\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\r\n greater than two the remaining indices enumerate multiple sets of\r\n coefficients.\r\n\r\n Returns\r\n -------\r\n values : ndarray, compatible object\r\n The values of the two dimensional polynomial at points in the Cartesian\r\n product of `x` and `y`.\r\n\r\n See Also\r\n --------\r\n polyval, polyval2d, polygrid2d, polyval3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n c = polyval(x, c)\r\n c = polyval(y, c)\r\n c = polyval(z, c)\r\n return c\r\n\r\n\r\ndef polyvander(x, deg):\r\n \"\"\"Vandermonde matrix of given degree.\r\n\r\n Returns the Vandermonde matrix of degree `deg` and sample points\r\n `x`. The Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., i] = x^i,\r\n\r\n where `0 <= i <= deg`. The leading indices of `V` index the elements of\r\n `x` and the last index is the power of `x`.\r\n\r\n If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the\r\n matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and\r\n ``polyval(x, c)`` are the same up to roundoff. This equivalence is\r\n useful both for least squares fitting and for the evaluation of a large\r\n number of polynomials of the same degree and sample points.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Array of points. The dtype is converted to float64 or complex128\r\n depending on whether any of the elements are complex. If `x` is\r\n scalar it is converted to a 1-D array.\r\n deg : int\r\n Degree of the resulting matrix.\r\n\r\n Returns\r\n -------\r\n vander : ndarray.\r\n The Vandermonde matrix. The shape of the returned matrix is\r\n ``x.shape + (deg + 1,)``, where the last index is the power of `x`.\r\n The dtype will be the same as the converted `x`.\r\n\r\n See Also\r\n --------\r\n polyvander2d, polyvander3d\r\n\r\n \"\"\"\r\n ideg = int(deg)\r\n if ideg != deg:\r\n raise ValueError(\"deg must be integer\")\r\n if ideg < 0:\r\n raise ValueError(\"deg must be non-negative\")\r\n\r\n x = np.array(x, copy=0, ndmin=1) + 0.0\r\n dims = (ideg + 1,) + x.shape\r\n dtyp = x.dtype\r\n v = np.empty(dims, dtype=dtyp)\r\n v[0] = x*0 + 1\r\n if ideg > 0:\r\n v[1] = x\r\n for i in range(2, ideg + 1):\r\n v[i] = v[i-1]*x\r\n return np.rollaxis(v, 0, v.ndim)\r\n\r\n\r\ndef polyvander2d(x, y, deg):\r\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\r\n\r\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\r\n points `(x, y)`. The pseudo-Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., deg[1]*i + j] = x^i * y^j,\r\n\r\n where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of\r\n `V` index the points `(x, y)` and the last index encodes the powers of\r\n `x` and `y`.\r\n\r\n If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`\r\n correspond to the elements of a 2-D coefficient array `c` of shape\r\n (xdeg + 1, ydeg + 1) in the order\r\n\r\n .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...\r\n\r\n and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same\r\n up to roundoff. This equivalence is useful both for least squares\r\n fitting and for the evaluation of a large number of 2-D polynomials\r\n of the same degrees and sample points.\r\n\r\n Parameters\r\n ----------\r\n x, y : array_like\r\n Arrays of point coordinates, all of the same shape. The dtypes\r\n will be converted to either float64 or complex128 depending on\r\n whether any of the elements are complex. Scalars are converted to\r\n 1-D arrays.\r\n deg : list of ints\r\n List of maximum degrees of the form [x_deg, y_deg].\r\n\r\n Returns\r\n -------\r\n vander2d : ndarray\r\n The shape of the returned matrix is ``x.shape + (order,)``, where\r\n :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same\r\n as the converted `x` and `y`.\r\n\r\n See Also\r\n --------\r\n polyvander, polyvander3d. polyval2d, polyval3d\r\n\r\n \"\"\"\r\n ideg = [int(d) for d in deg]\r\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\r\n if is_valid != [1, 1]:\r\n raise ValueError(\"degrees must be non-negative integers\")\r\n degx, degy = ideg\r\n x, y = np.array((x, y), copy=0) + 0.0\r\n\r\n vx = polyvander(x, degx)\r\n vy = polyvander(y, degy)\r\n v = vx[..., None]*vy[..., None,:]\r\n # einsum bug\r\n #v = np.einsum(\"...i,...j->...ij\", vx, vy)\r\n return v.reshape(v.shape[:-2] + (-1,))\r\n\r\n\r\ndef polyvander3d(x, y, z, deg):\r\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\r\n\r\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\r\n points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,\r\n then The pseudo-Vandermonde matrix is defined by\r\n\r\n .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,\r\n\r\n where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading\r\n indices of `V` index the points `(x, y, z)` and the last index encodes\r\n the powers of `x`, `y`, and `z`.\r\n\r\n If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns\r\n of `V` correspond to the elements of a 3-D coefficient array `c` of\r\n shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order\r\n\r\n .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...\r\n\r\n and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the\r\n same up to roundoff. This equivalence is useful both for least squares\r\n fitting and for the evaluation of a large number of 3-D polynomials\r\n of the same degrees and sample points.\r\n\r\n Parameters\r\n ----------\r\n x, y, z : array_like\r\n Arrays of point coordinates, all of the same shape. The dtypes will\r\n be converted to either float64 or complex128 depending on whether\r\n any of the elements are complex. Scalars are converted to 1-D\r\n arrays.\r\n deg : list of ints\r\n List of maximum degrees of the form [x_deg, y_deg, z_deg].\r\n\r\n Returns\r\n -------\r\n vander3d : ndarray\r\n The shape of the returned matrix is ``x.shape + (order,)``, where\r\n :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will\r\n be the same as the converted `x`, `y`, and `z`.\r\n\r\n See Also\r\n --------\r\n polyvander, polyvander3d. polyval2d, polyval3d\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n ideg = [int(d) for d in deg]\r\n is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]\r\n if is_valid != [1, 1, 1]:\r\n raise ValueError(\"degrees must be non-negative integers\")\r\n degx, degy, degz = ideg\r\n x, y, z = np.array((x, y, z), copy=0) + 0.0\r\n\r\n vx = polyvander(x, degx)\r\n vy = polyvander(y, degy)\r\n vz = polyvander(z, degz)\r\n v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]\r\n # einsum bug\r\n #v = np.einsum(\"...i, ...j, ...k->...ijk\", vx, vy, vz)\r\n return v.reshape(v.shape[:-3] + (-1,))\r\n\r\n\r\ndef polyfit(x, y, deg, rcond=None, full=False, w=None):\r\n \"\"\"\r\n Least-squares fit of a polynomial to data.\r\n\r\n Return the coefficients of a polynomial of degree `deg` that is the\r\n least squares fit to the data values `y` given at points `x`. If `y` is\r\n 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple\r\n fits are done, one for each column of `y`, and the resulting\r\n coefficients are stored in the corresponding columns of a 2-D return.\r\n The fitted polynomial(s) are in the form\r\n\r\n .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,\r\n\r\n where `n` is `deg`.\r\n\r\n Parameters\r\n ----------\r\n x : array_like, shape (`M`,)\r\n x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.\r\n y : array_like, shape (`M`,) or (`M`, `K`)\r\n y-coordinates of the sample points. Several sets of sample points\r\n sharing the same x-coordinates can be (independently) fit with one\r\n call to `polyfit` by passing in for `y` a 2-D array that contains\r\n one data set per column.\r\n deg : int or 1-D array_like\r\n Degree(s) of the fitting polynomials. If `deg` is a single integer\r\n all terms up to and including the `deg`'th term are included in the\r\n fit. For Numpy versions >= 1.11 a list of integers specifying the\r\n degrees of the terms to include may be used instead.\r\n rcond : float, optional\r\n Relative condition number of the fit. Singular values smaller\r\n than `rcond`, relative to the largest singular value, will be\r\n ignored. The default value is ``len(x)*eps``, where `eps` is the\r\n relative precision of the platform's float type, about 2e-16 in\r\n most cases.\r\n full : bool, optional\r\n Switch determining the nature of the return value. When ``False``\r\n (the default) just the coefficients are returned; when ``True``,\r\n diagnostic information from the singular value decomposition (used\r\n to solve the fit's matrix equation) is also returned.\r\n w : array_like, shape (`M`,), optional\r\n Weights. If not None, the contribution of each point\r\n ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the\r\n weights are chosen so that the errors of the products ``w[i]*y[i]``\r\n all have the same variance. The default value is None.\r\n\r\n .. versionadded:: 1.5.0\r\n\r\n Returns\r\n -------\r\n coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)\r\n Polynomial coefficients ordered from low to high. If `y` was 2-D,\r\n the coefficients in column `k` of `coef` represent the polynomial\r\n fit to the data in `y`'s `k`-th column.\r\n\r\n [residuals, rank, singular_values, rcond] : list\r\n These values are only returned if `full` = True\r\n\r\n resid -- sum of squared residuals of the least squares fit\r\n rank -- the numerical rank of the scaled Vandermonde matrix\r\n sv -- singular values of the scaled Vandermonde matrix\r\n rcond -- value of `rcond`.\r\n\r\n For more details, see `linalg.lstsq`.\r\n\r\n Raises\r\n ------\r\n RankWarning\r\n Raised if the matrix in the least-squares fit is rank deficient.\r\n The warning is only raised if `full` == False. The warnings can\r\n be turned off by:\r\n\r\n >>> import warnings\r\n >>> warnings.simplefilter('ignore', RankWarning)\r\n\r\n See Also\r\n --------\r\n chebfit, legfit, lagfit, hermfit, hermefit\r\n polyval : Evaluates a polynomial.\r\n polyvander : Vandermonde matrix for powers.\r\n linalg.lstsq : Computes a least-squares fit from the matrix.\r\n scipy.interpolate.UnivariateSpline : Computes spline fits.\r\n\r\n Notes\r\n -----\r\n The solution is the coefficients of the polynomial `p` that minimizes\r\n the sum of the weighted squared errors\r\n\r\n .. math :: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\r\n\r\n where the :math:`w_j` are the weights. This problem is solved by\r\n setting up the (typically) over-determined matrix equation:\r\n\r\n .. math :: V(x) * c = w * y,\r\n\r\n where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the\r\n coefficients to be solved for, `w` are the weights, and `y` are the\r\n observed values. This equation is then solved using the singular value\r\n decomposition of `V`.\r\n\r\n If some of the singular values of `V` are so small that they are\r\n neglected (and `full` == ``False``), a `RankWarning` will be raised.\r\n This means that the coefficient values may be poorly determined.\r\n Fitting to a lower order polynomial will usually get rid of the warning\r\n (but may not be what you want, of course; if you have independent\r\n reason(s) for choosing the degree which isn't working, you may have to:\r\n a) reconsider those reasons, and/or b) reconsider the quality of your\r\n data). The `rcond` parameter can also be set to a value smaller than\r\n its default, but the resulting fit may be spurious and have large\r\n contributions from roundoff error.\r\n\r\n Polynomial fits using double precision tend to \"fail\" at about\r\n (polynomial) degree 20. Fits using Chebyshev or Legendre series are\r\n generally better conditioned, but much can still depend on the\r\n distribution of the sample points and the smoothness of the data. If\r\n the quality of the fit is inadequate, splines may be a good\r\n alternative.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polynomial as P\r\n >>> x = np.linspace(-1,1,51) # x \"data\": [-1, -0.96, ..., 0.96, 1]\r\n >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) \"noise\"\r\n >>> c, stats = P.polyfit(x,y,3,full=True)\r\n >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1\r\n array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])\r\n >>> stats # note the large SSR, explaining the rather poor results\r\n [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,\r\n 0.28853036]), 1.1324274851176597e-014]\r\n\r\n Same thing without the added noise\r\n\r\n >>> y = x**3 - x\r\n >>> c, stats = P.polyfit(x,y,3,full=True)\r\n >>> c # c[0], c[2] should be \"very close to 0\", c[1] ~= -1, c[3] ~= 1\r\n array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,\r\n 1.00000000e+00])\r\n >>> stats # note the minuscule SSR\r\n [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,\r\n 0.50443316, 0.28853036]), 1.1324274851176597e-014]\r\n\r\n \"\"\"\r\n x = np.asarray(x) + 0.0\r\n y = np.asarray(y) + 0.0\r\n deg = np.asarray(deg)\r\n\r\n # check arguments.\r\n if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:\r\n raise TypeError(\"deg must be an int or non-empty 1-D array of int\")\r\n if deg.min() < 0:\r\n raise ValueError(\"expected deg >= 0\")\r\n if x.ndim != 1:\r\n raise TypeError(\"expected 1D vector for x\")\r\n if x.size == 0:\r\n raise TypeError(\"expected non-empty vector for x\")\r\n if y.ndim < 1 or y.ndim > 2:\r\n raise TypeError(\"expected 1D or 2D array for y\")\r\n if len(x) != len(y):\r\n raise TypeError(\"expected x and y to have same length\")\r\n\r\n if deg.ndim == 0:\r\n lmax = deg\r\n order = lmax + 1\r\n van = polyvander(x, lmax)\r\n else:\r\n deg = np.sort(deg)\r\n lmax = deg[-1]\r\n order = len(deg)\r\n van = polyvander(x, lmax)[:, deg]\r\n\r\n # set up the least squares matrices in transposed form\r\n lhs = van.T\r\n rhs = y.T\r\n if w is not None:\r\n w = np.asarray(w) + 0.0\r\n if w.ndim != 1:\r\n raise TypeError(\"expected 1D vector for w\")\r\n if len(x) != len(w):\r\n raise TypeError(\"expected x and w to have same length\")\r\n # apply weights. Don't use inplace operations as they\r\n # can cause problems with NA.\r\n lhs = lhs * w\r\n rhs = rhs * w\r\n\r\n # set rcond\r\n if rcond is None:\r\n rcond = len(x)*np.finfo(x.dtype).eps\r\n\r\n # Determine the norms of the design matrix columns.\r\n if issubclass(lhs.dtype.type, np.complexfloating):\r\n scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))\r\n else:\r\n scl = np.sqrt(np.square(lhs).sum(1))\r\n scl[scl == 0] = 1\r\n\r\n # Solve the least squares problem.\r\n c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)\r\n c = (c.T/scl).T\r\n\r\n # Expand c to include non-fitted coefficients which are set to zero\r\n if deg.ndim == 1:\r\n if c.ndim == 2:\r\n cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)\r\n else:\r\n cc = np.zeros(lmax + 1, dtype=c.dtype)\r\n cc[deg] = c\r\n c = cc\r\n\r\n # warn on rank reduction\r\n if rank != order and not full:\r\n msg = \"The fit may be poorly conditioned\"\r\n warnings.warn(msg, pu.RankWarning)\r\n\r\n if full:\r\n return c, [resids, rank, s, rcond]\r\n else:\r\n return c\r\n\r\n\r\ndef polycompanion(c):\r\n \"\"\"\r\n Return the companion matrix of c.\r\n\r\n The companion matrix for power series cannot be made symmetric by\r\n scaling the basis, so this function differs from those for the\r\n orthogonal polynomials.\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-D array of polynomial coefficients ordered from low to high\r\n degree.\r\n\r\n Returns\r\n -------\r\n mat : ndarray\r\n Companion matrix of dimensions (deg, deg).\r\n\r\n Notes\r\n -----\r\n\r\n .. versionadded:: 1.7.0\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n if len(c) < 2:\r\n raise ValueError('Series must have maximum degree of at least 1.')\r\n if len(c) == 2:\r\n return np.array([[-c[0]/c[1]]])\r\n\r\n n = len(c) - 1\r\n mat = np.zeros((n, n), dtype=c.dtype)\r\n bot = mat.reshape(-1)[n::n+1]\r\n bot[...] = 1\r\n mat[:, -1] -= c[:-1]/c[-1]\r\n return mat\r\n\r\n\r\ndef polyroots(c):\r\n \"\"\"\r\n Compute the roots of a polynomial.\r\n\r\n Return the roots (a.k.a. \"zeros\") of the polynomial\r\n\r\n .. math:: p(x) = \\\\sum_i c[i] * x^i.\r\n\r\n Parameters\r\n ----------\r\n c : 1-D array_like\r\n 1-D array of polynomial coefficients.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n Array of the roots of the polynomial. If all the roots are real,\r\n then `out` is also real, otherwise it is complex.\r\n\r\n See Also\r\n --------\r\n chebroots\r\n\r\n Notes\r\n -----\r\n The root estimates are obtained as the eigenvalues of the companion\r\n matrix, Roots far from the origin of the complex plane may have large\r\n errors due to the numerical instability of the power series for such\r\n values. Roots with multiplicity greater than 1 will also show larger\r\n errors as the value of the series near such points is relatively\r\n insensitive to errors in the roots. Isolated roots near the origin can\r\n be improved by a few iterations of Newton's method.\r\n\r\n Examples\r\n --------\r\n >>> import numpy.polynomial.polynomial as poly\r\n >>> poly.polyroots(poly.polyfromroots((-1,0,1)))\r\n array([-1., 0., 1.])\r\n >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype\r\n dtype('float64')\r\n >>> j = complex(0,1)\r\n >>> poly.polyroots(poly.polyfromroots((-j,0,j)))\r\n array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])\r\n\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = pu.as_series([c])\r\n if len(c) < 2:\r\n return np.array([], dtype=c.dtype)\r\n if len(c) == 2:\r\n return np.array([-c[0]/c[1]])\r\n\r\n m = polycompanion(c)\r\n r = la.eigvals(m)\r\n r.sort()\r\n return r\r\n\r\n\r\n#\r\n# polynomial class\r\n#\r\n\r\nclass Polynomial(ABCPolyBase):\r\n \"\"\"A power series class.\r\n\r\n The Polynomial class provides the standard Python numerical methods\r\n '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the\r\n attributes and methods listed in the `ABCPolyBase` documentation.\r\n\r\n Parameters\r\n ----------\r\n coef : array_like\r\n Polynomial coefficients in order of increasing degree, i.e.,\r\n ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``.\r\n domain : (2,) array_like, optional\r\n Domain to use. The interval ``[domain[0], domain[1]]`` is mapped\r\n to the interval ``[window[0], window[1]]`` by shifting and scaling.\r\n The default value is [-1, 1].\r\n window : (2,) array_like, optional\r\n Window, see `domain` for its use. The default value is [-1, 1].\r\n\r\n .. versionadded:: 1.6.0\r\n\r\n \"\"\"\r\n # Virtual Functions\r\n _add = staticmethod(polyadd)\r\n _sub = staticmethod(polysub)\r\n _mul = staticmethod(polymul)\r\n _div = staticmethod(polydiv)\r\n _pow = staticmethod(polypow)\r\n _val = staticmethod(polyval)\r\n _int = staticmethod(polyint)\r\n _der = staticmethod(polyder)\r\n _fit = staticmethod(polyfit)\r\n _line = staticmethod(polyline)\r\n _roots = staticmethod(polyroots)\r\n _fromroots = staticmethod(polyfromroots)\r\n\r\n # Virtual properties\r\n nickname = 'poly'\r\n domain = np.array(polydomain)\r\n window = np.array(polydomain)\r\n", "\"\"\"\r\n==================================\r\nConstants (:mod:`scipy.constants`)\r\n==================================\r\n\r\n.. currentmodule:: scipy.constants\r\n\r\nPhysical and mathematical constants and units.\r\n\r\n\r\nMathematical constants\r\n======================\r\n\r\n================ =================================================================\r\n``pi`` Pi\r\n``golden`` Golden ratio\r\n``golden_ratio`` Golden ratio\r\n================ =================================================================\r\n\r\n\r\nPhysical constants\r\n==================\r\n\r\n=========================== =================================================================\r\n``c`` speed of light in vacuum\r\n``speed_of_light`` speed of light in vacuum\r\n``mu_0`` the magnetic constant :math:`\\mu_0`\r\n``epsilon_0`` the electric constant (vacuum permittivity), :math:`\\epsilon_0`\r\n``h`` the Planck constant :math:`h`\r\n``Planck`` the Planck constant :math:`h`\r\n``hbar`` :math:`\\hbar = h/(2\\pi)`\r\n``G`` Newtonian constant of gravitation\r\n``gravitational_constant`` Newtonian constant of gravitation\r\n``g`` standard acceleration of gravity\r\n``e`` elementary charge\r\n``elementary_charge`` elementary charge\r\n``R`` molar gas constant\r\n``gas_constant`` molar gas constant\r\n``alpha`` fine-structure constant\r\n``fine_structure`` fine-structure constant\r\n``N_A`` Avogadro constant\r\n``Avogadro`` Avogadro constant\r\n``k`` Boltzmann constant\r\n``Boltzmann`` Boltzmann constant\r\n``sigma`` Stefan-Boltzmann constant :math:`\\sigma`\r\n``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\\sigma`\r\n``Wien`` Wien displacement law constant\r\n``Rydberg`` Rydberg constant\r\n``m_e`` electron mass\r\n``electron_mass`` electron mass\r\n``m_p`` proton mass\r\n``proton_mass`` proton mass\r\n``m_n`` neutron mass\r\n``neutron_mass`` neutron mass\r\n=========================== =================================================================\r\n\r\n\r\nConstants database\r\n------------------\r\n\r\nIn addition to the above variables, :mod:`scipy.constants` also contains the\r\n2014 CODATA recommended values [CODATA2014]_ database containing more physical\r\nconstants.\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n value -- Value in physical_constants indexed by key\r\n unit -- Unit in physical_constants indexed by key\r\n precision -- Relative precision in physical_constants indexed by key\r\n find -- Return list of physical_constant keys with a given string\r\n ConstantWarning -- Constant sought not in newest CODATA data set\r\n\r\n.. data:: physical_constants\r\n\r\n Dictionary of physical constants, of the format\r\n ``physical_constants[name] = (value, unit, uncertainty)``.\r\n\r\nAvailable constants:\r\n\r\n====================================================================== ====\r\n%(constant_names)s\r\n====================================================================== ====\r\n\r\n\r\nUnits\r\n=====\r\n\r\nSI prefixes\r\n-----------\r\n\r\n============ =================================================================\r\n``yotta`` :math:`10^{24}`\r\n``zetta`` :math:`10^{21}`\r\n``exa`` :math:`10^{18}`\r\n``peta`` :math:`10^{15}`\r\n``tera`` :math:`10^{12}`\r\n``giga`` :math:`10^{9}`\r\n``mega`` :math:`10^{6}`\r\n``kilo`` :math:`10^{3}`\r\n``hecto`` :math:`10^{2}`\r\n``deka`` :math:`10^{1}`\r\n``deci`` :math:`10^{-1}`\r\n``centi`` :math:`10^{-2}`\r\n``milli`` :math:`10^{-3}`\r\n``micro`` :math:`10^{-6}`\r\n``nano`` :math:`10^{-9}`\r\n``pico`` :math:`10^{-12}`\r\n``femto`` :math:`10^{-15}`\r\n``atto`` :math:`10^{-18}`\r\n``zepto`` :math:`10^{-21}`\r\n============ =================================================================\r\n\r\nBinary prefixes\r\n---------------\r\n\r\n============ =================================================================\r\n``kibi`` :math:`2^{10}`\r\n``mebi`` :math:`2^{20}`\r\n``gibi`` :math:`2^{30}`\r\n``tebi`` :math:`2^{40}`\r\n``pebi`` :math:`2^{50}`\r\n``exbi`` :math:`2^{60}`\r\n``zebi`` :math:`2^{70}`\r\n``yobi`` :math:`2^{80}`\r\n============ =================================================================\r\n\r\nWeight\r\n------\r\n\r\n================= ============================================================\r\n``gram`` :math:`10^{-3}` kg\r\n``metric_ton`` :math:`10^{3}` kg\r\n``grain`` one grain in kg\r\n``lb`` one pound (avoirdupous) in kg\r\n``pound`` one pound (avoirdupous) in kg\r\n``oz`` one ounce in kg\r\n``ounce`` one ounce in kg\r\n``stone`` one stone in kg\r\n``grain`` one grain in kg\r\n``long_ton`` one long ton in kg\r\n``short_ton`` one short ton in kg\r\n``troy_ounce`` one Troy ounce in kg\r\n``troy_pound`` one Troy pound in kg\r\n``carat`` one carat in kg\r\n``m_u`` atomic mass constant (in kg)\r\n``u`` atomic mass constant (in kg)\r\n``atomic_mass`` atomic mass constant (in kg)\r\n================= ============================================================\r\n\r\nAngle\r\n-----\r\n\r\n================= ============================================================\r\n``degree`` degree in radians\r\n``arcmin`` arc minute in radians\r\n``arcminute`` arc minute in radians\r\n``arcsec`` arc second in radians\r\n``arcsecond`` arc second in radians\r\n================= ============================================================\r\n\r\n\r\nTime\r\n----\r\n\r\n================= ============================================================\r\n``minute`` one minute in seconds\r\n``hour`` one hour in seconds\r\n``day`` one day in seconds\r\n``week`` one week in seconds\r\n``year`` one year (365 days) in seconds\r\n``Julian_year`` one Julian year (365.25 days) in seconds\r\n================= ============================================================\r\n\r\n\r\nLength\r\n------\r\n\r\n===================== ============================================================\r\n``inch`` one inch in meters\r\n``foot`` one foot in meters\r\n``yard`` one yard in meters\r\n``mile`` one mile in meters\r\n``mil`` one mil in meters\r\n``pt`` one point in meters\r\n``point`` one point in meters\r\n``survey_foot`` one survey foot in meters\r\n``survey_mile`` one survey mile in meters\r\n``nautical_mile`` one nautical mile in meters\r\n``fermi`` one Fermi in meters\r\n``angstrom`` one Angstrom in meters\r\n``micron`` one micron in meters\r\n``au`` one astronomical unit in meters\r\n``astronomical_unit`` one astronomical unit in meters\r\n``light_year`` one light year in meters\r\n``parsec`` one parsec in meters\r\n===================== ============================================================\r\n\r\nPressure\r\n--------\r\n\r\n================= ============================================================\r\n``atm`` standard atmosphere in pascals\r\n``atmosphere`` standard atmosphere in pascals\r\n``bar`` one bar in pascals\r\n``torr`` one torr (mmHg) in pascals\r\n``mmHg`` one torr (mmHg) in pascals\r\n``psi`` one psi in pascals\r\n================= ============================================================\r\n\r\nArea\r\n----\r\n\r\n================= ============================================================\r\n``hectare`` one hectare in square meters\r\n``acre`` one acre in square meters\r\n================= ============================================================\r\n\r\n\r\nVolume\r\n------\r\n\r\n=================== ========================================================\r\n``liter`` one liter in cubic meters\r\n``litre`` one liter in cubic meters\r\n``gallon`` one gallon (US) in cubic meters\r\n``gallon_US`` one gallon (US) in cubic meters\r\n``gallon_imp`` one gallon (UK) in cubic meters\r\n``fluid_ounce`` one fluid ounce (US) in cubic meters\r\n``fluid_ounce_US`` one fluid ounce (US) in cubic meters\r\n``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters\r\n``bbl`` one barrel in cubic meters\r\n``barrel`` one barrel in cubic meters\r\n=================== ========================================================\r\n\r\nSpeed\r\n-----\r\n\r\n================== ==========================================================\r\n``kmh`` kilometers per hour in meters per second\r\n``mph`` miles per hour in meters per second\r\n``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second\r\n``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second\r\n``knot`` one knot in meters per second\r\n================== ==========================================================\r\n\r\n\r\nTemperature\r\n-----------\r\n\r\n===================== =======================================================\r\n``zero_Celsius`` zero of Celsius scale in Kelvin\r\n``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins\r\n===================== =======================================================\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n convert_temperature\r\n C2K\r\n K2C\r\n F2C\r\n C2F\r\n F2K\r\n K2F\r\n\r\nEnergy\r\n------\r\n\r\n==================== =======================================================\r\n``eV`` one electron volt in Joules\r\n``electron_volt`` one electron volt in Joules\r\n``calorie`` one calorie (thermochemical) in Joules\r\n``calorie_th`` one calorie (thermochemical) in Joules\r\n``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules\r\n``erg`` one erg in Joules\r\n``Btu`` one British thermal unit (International Steam Table) in Joules\r\n``Btu_IT`` one British thermal unit (International Steam Table) in Joules\r\n``Btu_th`` one British thermal unit (thermochemical) in Joules\r\n``ton_TNT`` one ton of TNT in Joules\r\n==================== =======================================================\r\n\r\nPower\r\n-----\r\n\r\n==================== =======================================================\r\n``hp`` one horsepower in watts\r\n``horsepower`` one horsepower in watts\r\n==================== =======================================================\r\n\r\nForce\r\n-----\r\n\r\n==================== =======================================================\r\n``dyn`` one dyne in newtons\r\n``dyne`` one dyne in newtons\r\n``lbf`` one pound force in newtons\r\n``pound_force`` one pound force in newtons\r\n``kgf`` one kilogram force in newtons\r\n``kilogram_force`` one kilogram force in newtons\r\n==================== =======================================================\r\n\r\nOptics\r\n------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n lambda2nu\r\n nu2lambda\r\n\r\nReferences\r\n==========\r\n\r\n.. [CODATA2014] CODATA Recommended Values of the Fundamental\r\n Physical Constants 2014.\r\n\r\n http://physics.nist.gov/cuu/Constants/index.html\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n# Modules contributed by BasSw ([email protected])\r\nfrom .codata import *\r\nfrom .constants import *\r\nfrom .codata import _obsolete_constants\r\n\r\n_constant_names = [(_k.lower(), _k, _v)\r\n for _k, _v in physical_constants.items()\r\n if _k not in _obsolete_constants]\r\n_constant_names = \"\\n\".join([\"``%s``%s %s %s\" % (_x[1], \" \"*(66-len(_x[1])),\r\n _x[2][0], _x[2][1])\r\n for _x in sorted(_constant_names)])\r\nif __doc__ is not None:\r\n __doc__ = __doc__ % dict(constant_names=_constant_names)\r\n\r\ndel _constant_names\r\n\r\n__all__ = [s for s in dir() if not s.startswith('_')]\r\nfrom numpy.testing import Tester\r\ntest = Tester().test\r\n", "\"\"\"\r\n\"\"\"\r\nfrom __future__ import absolute_import, print_function\r\n\r\n# C:\\home\\ej\\wrk\\scipy\\weave\\examples>python vq.py\r\n# vq with 1000 observation, 10 features and 30 codes fo 100 iterations\r\n# speed in python: 0.150119999647\r\n# [25 29] [ 2.49147266 3.83021032]\r\n# speed in standard c: 0.00710999965668\r\n# [25 29] [ 2.49147266 3.83021032]\r\n# speed up: 21.11\r\n# speed inline/blitz: 0.0186300003529\r\n# [25 29] [ 2.49147272 3.83021021]\r\n# speed up: 8.06\r\n# speed inline/blitz2: 0.00461000084877\r\n# [25 29] [ 2.49147272 3.83021021]\r\n# speed up: 32.56\r\n\r\nfrom numpy import *\r\nimport sys\r\nsys.path.insert(0,'..')\r\nimport scipy.weave.inline_tools as inline_tools\r\nimport scipy.weave.converters as converters\r\nblitz_type_converters = converters.blitz\r\nimport scipy.weave.c_spec as c_spec\r\n\r\n\r\ndef vq(obs,code_book):\r\n # make sure we're looking at arrays.\r\n obs = asarray(obs)\r\n code_book = asarray(code_book)\r\n # check for 2d arrays and compatible sizes.\r\n obs_sh = shape(obs)\r\n code_book_sh = shape(code_book)\r\n assert(len(obs_sh) == 2 and len(code_book_sh) == 2)\r\n assert(obs_sh[1] == code_book_sh[1])\r\n type = c_spec.num_to_c_types[obs.typecode()]\r\n # band aid for now.\r\n ar_type = 'PyArray_FLOAT'\r\n code = \"\"\"\r\n #line 37 \"vq.py\"\r\n // Use tensor notation.\r\n blitz::Array<%(type)s,2> dist_sq(Ncode_book[0],Nobs[0]);\r\n blitz::firstIndex i;\r\n blitz::secondIndex j;\r\n blitz::thirdIndex k;\r\n dist_sq = sum(pow2(obs(j,k) - code_book(i,k)),k);\r\n // Surely there is a better way to do this...\r\n PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);\r\n blitz::Array<int,1> code((int*)(py_code->data),\r\n blitz::shape(Nobs[0]), blitz::neverDeleteData);\r\n code = minIndex(dist_sq(j,i),j);\r\n\r\n PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);\r\n blitz::Array<float,1> min_dist((float*)(py_min_dist->data),\r\n blitz::shape(Nobs[0]), blitz::neverDeleteData);\r\n min_dist = sqrt(min(dist_sq(j,i),j));\r\n py::tuple results(2);\r\n results[0] = py_code;\r\n results[1] = py_min_dist;\r\n return_val = results;\r\n \"\"\" % locals()\r\n code, distortion = inline_tools.inline(code,['obs','code_book'],\r\n type_converters=blitz_type_converters,\r\n compiler='gcc',\r\n verbose=1)\r\n return code, distortion\r\n\r\n\r\ndef vq2(obs,code_book):\r\n \"\"\" doesn't use blitz (except in conversion)\r\n ALSO DOES NOT HANDLE STRIDED ARRAYS CORRECTLY\r\n \"\"\"\r\n # make sure we're looking at arrays.\r\n obs = asarray(obs)\r\n code_book = asarray(code_book)\r\n # check for 2d arrays and compatible sizes.\r\n obs_sh = shape(obs)\r\n code_book_sh = shape(code_book)\r\n assert(len(obs_sh) == 2 and len(code_book_sh) == 2)\r\n assert(obs_sh[1] == code_book_sh[1])\r\n assert(obs.typecode() == code_book.typecode())\r\n type = c_spec.num_to_c_types[obs.typecode()]\r\n # band aid for now.\r\n ar_type = 'PyArray_FLOAT'\r\n code = \"\"\"\r\n #line 83 \"vq.py\"\r\n // THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY\r\n // Surely there is a better way to do this...\r\n PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);\r\n PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);\r\n\r\n int* raw_code = (int*)(py_code->data);\r\n float* raw_min_dist = (float*)(py_min_dist->data);\r\n %(type)s* raw_obs = obs.data();\r\n %(type)s* raw_code_book = code_book.data();\r\n %(type)s* this_obs = NULL;\r\n %(type)s* this_code = NULL;\r\n int Nfeatures = Nobs[1];\r\n float diff,dist;\r\n for(int i=0; i < Nobs[0]; i++)\r\n {\r\n this_obs = &raw_obs[i*Nfeatures];\r\n raw_min_dist[i] = (%(type)s)10000000.; // big number\r\n for(int j=0; j < Ncode_book[0]; j++)\r\n {\r\n this_code = &raw_code_book[j*Nfeatures];\r\n dist = 0;\r\n for(int k=0; k < Nfeatures; k++)\r\n {\r\n diff = this_obs[k] - this_code[k];\r\n dist += diff*diff;\r\n }\r\n dist = dist;\r\n if (dist < raw_min_dist[i])\r\n {\r\n raw_code[i] = j;\r\n raw_min_dist[i] = dist;\r\n }\r\n }\r\n raw_min_dist[i] = sqrt(raw_min_dist[i]);\r\n }\r\n py::tuple results(2);\r\n results[0] = py_code;\r\n results[1] = py_min_dist;\r\n return_val = results;\r\n \"\"\" % locals()\r\n code, distortion = inline_tools.inline(code,['obs','code_book'],\r\n type_converters=blitz_type_converters,\r\n compiler='gcc',\r\n verbose=1)\r\n return code, distortion\r\n\r\n\r\ndef vq3(obs,code_book):\r\n \"\"\" Uses standard array conversion completely bi-passing blitz.\r\n THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY\r\n \"\"\"\r\n # make sure we're looking at arrays.\r\n obs = asarray(obs)\r\n code_book = asarray(code_book)\r\n # check for 2d arrays and compatible sizes.\r\n obs_sh = shape(obs)\r\n code_book_sh = shape(code_book)\r\n assert(len(obs_sh) == 2 and len(code_book_sh) == 2)\r\n assert(obs_sh[1] == code_book_sh[1])\r\n assert(obs.typecode() == code_book.typecode())\r\n type = c_spec.num_to_c_types[obs.typecode()]\r\n code = \"\"\"\r\n #line 139 \"vq.py\"\r\n // Surely there is a better way to do this...\r\n PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);\r\n PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);\r\n\r\n int* code_data = (int*)(py_code->data);\r\n float* min_dist_data = (float*)(py_min_dist->data);\r\n %(type)s* this_obs = NULL;\r\n %(type)s* this_code = NULL;\r\n int Nfeatures = Nobs[1];\r\n float diff,dist;\r\n\r\n for(int i=0; i < Nobs[0]; i++)\r\n {\r\n this_obs = &obs_data[i*Nfeatures];\r\n min_dist_data[i] = (float)10000000.; // big number\r\n for(int j=0; j < Ncode_book[0]; j++)\r\n {\r\n this_code = &code_book_data[j*Nfeatures];\r\n dist = 0;\r\n for(int k=0; k < Nfeatures; k++)\r\n {\r\n diff = this_obs[k] - this_code[k];\r\n dist += diff*diff;\r\n }\r\n if (dist < min_dist_data[i])\r\n {\r\n code_data[i] = j;\r\n min_dist_data[i] = dist;\r\n }\r\n }\r\n min_dist_data[i] = sqrt(min_dist_data[i]);\r\n }\r\n py::tuple results(2);\r\n results[0] = py_code;\r\n results[1] = py_min_dist;\r\n return_val = results;\r\n \"\"\" % locals()\r\n # this is an unpleasant way to specify type factories -- work on it.\r\n import ext_tools\r\n code, distortion = inline_tools.inline(code,['obs','code_book'])\r\n return code, distortion\r\n\r\nimport time\r\nimport RandomArray\r\n\r\n\r\ndef compare(m,Nobs,Ncodes,Nfeatures):\r\n obs = RandomArray.normal(0.,1.,(Nobs,Nfeatures))\r\n codes = RandomArray.normal(0.,1.,(Ncodes,Nfeatures))\r\n import scipy.cluster.vq\r\n scipy.cluster.vq\r\n print('vq with %d observation, %d features and %d codes for %d iterations' %\r\n (Nobs,Nfeatures,Ncodes,m))\r\n t1 = time.time()\r\n for i in range(m):\r\n code,dist = scipy.cluster.vq.py_vq(obs,codes)\r\n t2 = time.time()\r\n py = (t2-t1)\r\n print(' speed in python:', (t2 - t1)/m)\r\n print(code[:2],dist[:2])\r\n\r\n t1 = time.time()\r\n for i in range(m):\r\n code,dist = scipy.cluster.vq.vq(obs,codes)\r\n t2 = time.time()\r\n print(' speed in standard c:', (t2 - t1)/m)\r\n print(code[:2],dist[:2])\r\n print(' speed up: %3.2f' % (py/(t2-t1)))\r\n\r\n # load into cache\r\n b = vq(obs,codes)\r\n t1 = time.time()\r\n for i in range(m):\r\n code,dist = vq(obs,codes)\r\n t2 = time.time()\r\n print(' speed inline/blitz:',(t2 - t1) / m)\r\n print(code[:2],dist[:2])\r\n print(' speed up: %3.2f' % (py/(t2-t1)))\r\n\r\n # load into cache\r\n b = vq2(obs,codes)\r\n t1 = time.time()\r\n for i in range(m):\r\n code,dist = vq2(obs,codes)\r\n t2 = time.time()\r\n print(' speed inline/blitz2:',(t2 - t1) / m)\r\n print(code[:2],dist[:2])\r\n print(' speed up: %3.2f' % (py/(t2-t1)))\r\n\r\n # load into cache\r\n b = vq3(obs,codes)\r\n t1 = time.time()\r\n for i in range(m):\r\n code,dist = vq3(obs,codes)\r\n t2 = time.time()\r\n print(' speed using C arrays:',(t2 - t1) / m)\r\n print(code[:2],dist[:2])\r\n print(' speed up: %3.2f' % (py/(t2-t1)))\r\n\r\nif __name__ == \"__main__\":\r\n compare(100,1000,30,10)\r\n #compare(1,10,2,10)\r\n", "# -*- coding: utf-8 -*-\r\n\"\"\"Functions for FIR filter design.\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nfrom math import ceil, log\r\nimport numpy as np\r\nfrom numpy.fft import irfft\r\nfrom scipy.special import sinc\r\nfrom scipy.linalg import toeplitz, hankel, pinv\r\nfrom . import sigtools\r\n\r\n__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',\r\n 'firwin', 'firwin2', 'remez', 'firls']\r\n\r\n\r\n# Some notes on function parameters:\r\n#\r\n# `cutoff` and `width` are given as a numbers between 0 and 1. These\r\n# are relative frequencies, expressed as a fraction of the Nyquist rate.\r\n# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width\r\n# of 300 Hz.\r\n#\r\n# The `order` of a FIR filter is one less than the number of taps.\r\n# This is a potential source of confusion, so in the following code,\r\n# we will always use the number of taps as the parameterization of\r\n# the 'size' of the filter. The \"number of taps\" means the number\r\n# of coefficients, which is the same as the length of the impulse\r\n# response of the filter.\r\n\r\n\r\ndef kaiser_beta(a):\r\n \"\"\"Compute the Kaiser parameter `beta`, given the attenuation `a`.\r\n\r\n Parameters\r\n ----------\r\n a : float\r\n The desired attenuation in the stopband and maximum ripple in\r\n the passband, in dB. This should be a *positive* number.\r\n\r\n Returns\r\n -------\r\n beta : float\r\n The `beta` parameter to be used in the formula for a Kaiser window.\r\n\r\n References\r\n ----------\r\n Oppenheim, Schafer, \"Discrete-Time Signal Processing\", p.475-476.\r\n \"\"\"\r\n if a > 50:\r\n beta = 0.1102 * (a - 8.7)\r\n elif a > 21:\r\n beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)\r\n else:\r\n beta = 0.0\r\n return beta\r\n\r\n\r\ndef kaiser_atten(numtaps, width):\r\n \"\"\"Compute the attenuation of a Kaiser FIR filter.\r\n\r\n Given the number of taps `N` and the transition width `width`, compute the\r\n attenuation `a` in dB, given by Kaiser's formula:\r\n\r\n a = 2.285 * (N - 1) * pi * width + 7.95\r\n\r\n Parameters\r\n ----------\r\n numtaps : int\r\n The number of taps in the FIR filter.\r\n width : float\r\n The desired width of the transition region between passband and\r\n stopband (or, in general, at any discontinuity) for the filter.\r\n\r\n Returns\r\n -------\r\n a : float\r\n The attenuation of the ripple, in dB.\r\n\r\n See Also\r\n --------\r\n kaiserord, kaiser_beta\r\n \"\"\"\r\n a = 2.285 * (numtaps - 1) * np.pi * width + 7.95\r\n return a\r\n\r\n\r\ndef kaiserord(ripple, width):\r\n \"\"\"\r\n Design a Kaiser window to limit ripple and width of transition region.\r\n\r\n Parameters\r\n ----------\r\n ripple : float\r\n Positive number specifying maximum ripple in passband (dB) and minimum\r\n ripple in stopband.\r\n width : float\r\n Width of transition region (normalized so that 1 corresponds to pi\r\n radians / sample).\r\n\r\n Returns\r\n -------\r\n numtaps : int\r\n The length of the kaiser window.\r\n beta : float\r\n The beta parameter for the kaiser window.\r\n\r\n See Also\r\n --------\r\n kaiser_beta, kaiser_atten\r\n\r\n Notes\r\n -----\r\n There are several ways to obtain the Kaiser window:\r\n\r\n - ``signal.kaiser(numtaps, beta, sym=True)``\r\n - ``signal.get_window(beta, numtaps)``\r\n - ``signal.get_window(('kaiser', beta), numtaps)``\r\n\r\n The empirical equations discovered by Kaiser are used.\r\n\r\n References\r\n ----------\r\n Oppenheim, Schafer, \"Discrete-Time Signal Processing\", p.475-476.\r\n\r\n \"\"\"\r\n A = abs(ripple) # in case somebody is confused as to what's meant\r\n if A < 8:\r\n # Formula for N is not valid in this range.\r\n raise ValueError(\"Requested maximum ripple attentuation %f is too \"\r\n \"small for the Kaiser formula.\" % A)\r\n beta = kaiser_beta(A)\r\n\r\n # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter\r\n # order, so we have to add 1 to get the number of taps.\r\n numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1\r\n\r\n return int(ceil(numtaps)), beta\r\n\r\n\r\ndef firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,\r\n scale=True, nyq=1.0):\r\n \"\"\"\r\n FIR filter design using the window method.\r\n\r\n This function computes the coefficients of a finite impulse response\r\n filter. The filter will have linear phase; it will be Type I if\r\n `numtaps` is odd and Type II if `numtaps` is even.\r\n\r\n Type II filters always have zero response at the Nyquist rate, so a\r\n ValueError exception is raised if firwin is called with `numtaps` even and\r\n having a passband whose right end is at the Nyquist rate.\r\n\r\n Parameters\r\n ----------\r\n numtaps : int\r\n Length of the filter (number of coefficients, i.e. the filter\r\n order + 1). `numtaps` must be even if a passband includes the\r\n Nyquist frequency.\r\n cutoff : float or 1D array_like\r\n Cutoff frequency of filter (expressed in the same units as `nyq`)\r\n OR an array of cutoff frequencies (that is, band edges). In the\r\n latter case, the frequencies in `cutoff` should be positive and\r\n monotonically increasing between 0 and `nyq`. The values 0 and\r\n `nyq` must not be included in `cutoff`.\r\n width : float or None, optional\r\n If `width` is not None, then assume it is the approximate width\r\n of the transition region (expressed in the same units as `nyq`)\r\n for use in Kaiser FIR filter design. In this case, the `window`\r\n argument is ignored.\r\n window : string or tuple of string and parameter values, optional\r\n Desired window to use. See `scipy.signal.get_window` for a list\r\n of windows and required parameters.\r\n pass_zero : bool, optional\r\n If True, the gain at the frequency 0 (i.e. the \"DC gain\") is 1.\r\n Otherwise the DC gain is 0.\r\n scale : bool, optional\r\n Set to True to scale the coefficients so that the frequency\r\n response is exactly unity at a certain frequency.\r\n That frequency is either:\r\n\r\n - 0 (DC) if the first passband starts at 0 (i.e. pass_zero\r\n is True)\r\n - `nyq` (the Nyquist rate) if the first passband ends at\r\n `nyq` (i.e the filter is a single band highpass filter);\r\n center of first passband otherwise\r\n\r\n nyq : float, optional\r\n Nyquist frequency. Each frequency in `cutoff` must be between 0\r\n and `nyq`.\r\n\r\n Returns\r\n -------\r\n h : (numtaps,) ndarray\r\n Coefficients of length `numtaps` FIR filter.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If any value in `cutoff` is less than or equal to 0 or greater\r\n than or equal to `nyq`, if the values in `cutoff` are not strictly\r\n monotonically increasing, or if `numtaps` is even but a passband\r\n includes the Nyquist frequency.\r\n\r\n See also\r\n --------\r\n firwin2\r\n firls\r\n remez\r\n\r\n Examples\r\n --------\r\n Low-pass from 0 to f:\r\n\r\n >>> from scipy import signal\r\n >>> numtaps = 3\r\n >>> f = 0.1\r\n >>> signal.firwin(numtaps, f)\r\n array([ 0.06799017, 0.86401967, 0.06799017])\r\n\r\n Use a specific window function:\r\n\r\n >>> signal.firwin(numtaps, f, window='nuttall')\r\n array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])\r\n\r\n High-pass ('stop' from 0 to f):\r\n\r\n >>> signal.firwin(numtaps, f, pass_zero=False)\r\n array([-0.00859313, 0.98281375, -0.00859313])\r\n\r\n Band-pass:\r\n\r\n >>> f1, f2 = 0.1, 0.2\r\n >>> signal.firwin(numtaps, [f1, f2], pass_zero=False)\r\n array([ 0.06301614, 0.88770441, 0.06301614])\r\n\r\n Band-stop:\r\n\r\n >>> signal.firwin(numtaps, [f1, f2])\r\n array([-0.00801395, 1.0160279 , -0.00801395])\r\n\r\n Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):\r\n\r\n >>> f3, f4 = 0.3, 0.4\r\n >>> signal.firwin(numtaps, [f1, f2, f3, f4])\r\n array([-0.01376344, 1.02752689, -0.01376344])\r\n\r\n Multi-band (passbands are [f1, f2] and [f3,f4]):\r\n\r\n >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)\r\n array([ 0.04890915, 0.91284326, 0.04890915])\r\n\r\n \"\"\"\r\n\r\n # The major enhancements to this function added in November 2010 were\r\n # developed by Tom Krauss (see ticket #902).\r\n\r\n cutoff = np.atleast_1d(cutoff) / float(nyq)\r\n\r\n # Check for invalid input.\r\n if cutoff.ndim > 1:\r\n raise ValueError(\"The cutoff argument must be at most \"\r\n \"one-dimensional.\")\r\n if cutoff.size == 0:\r\n raise ValueError(\"At least one cutoff frequency must be given.\")\r\n if cutoff.min() <= 0 or cutoff.max() >= 1:\r\n raise ValueError(\"Invalid cutoff frequency: frequencies must be \"\r\n \"greater than 0 and less than nyq.\")\r\n if np.any(np.diff(cutoff) <= 0):\r\n raise ValueError(\"Invalid cutoff frequencies: the frequencies \"\r\n \"must be strictly increasing.\")\r\n\r\n if width is not None:\r\n # A width was given. Find the beta parameter of the Kaiser window\r\n # and set `window`. This overrides the value of `window` passed in.\r\n atten = kaiser_atten(numtaps, float(width) / nyq)\r\n beta = kaiser_beta(atten)\r\n window = ('kaiser', beta)\r\n\r\n pass_nyquist = bool(cutoff.size & 1) ^ pass_zero\r\n if pass_nyquist and numtaps % 2 == 0:\r\n raise ValueError(\"A filter with an even number of coefficients must \"\r\n \"have zero response at the Nyquist rate.\")\r\n\r\n # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff\r\n # is even, and each pair in cutoff corresponds to passband.\r\n cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))\r\n\r\n # `bands` is a 2D array; each row gives the left and right edges of\r\n # a passband.\r\n bands = cutoff.reshape(-1, 2)\r\n\r\n # Build up the coefficients.\r\n alpha = 0.5 * (numtaps - 1)\r\n m = np.arange(0, numtaps) - alpha\r\n h = 0\r\n for left, right in bands:\r\n h += right * sinc(right * m)\r\n h -= left * sinc(left * m)\r\n\r\n # Get and apply the window function.\r\n from .signaltools import get_window\r\n win = get_window(window, numtaps, fftbins=False)\r\n h *= win\r\n\r\n # Now handle scaling if desired.\r\n if scale:\r\n # Get the first passband.\r\n left, right = bands[0]\r\n if left == 0:\r\n scale_frequency = 0.0\r\n elif right == 1:\r\n scale_frequency = 1.0\r\n else:\r\n scale_frequency = 0.5 * (left + right)\r\n c = np.cos(np.pi * m * scale_frequency)\r\n s = np.sum(h * c)\r\n h /= s\r\n\r\n return h\r\n\r\n\r\n# Original version of firwin2 from scipy ticket #457, submitted by \"tash\".\r\n#\r\n# Rewritten by Warren Weckesser, 2010.\r\n\r\ndef firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,\r\n antisymmetric=False):\r\n \"\"\"\r\n FIR filter design using the window method.\r\n\r\n From the given frequencies `freq` and corresponding gains `gain`,\r\n this function constructs an FIR filter with linear phase and\r\n (approximately) the given frequency response.\r\n\r\n Parameters\r\n ----------\r\n numtaps : int\r\n The number of taps in the FIR filter. `numtaps` must be less than\r\n `nfreqs`.\r\n freq : array_like, 1D\r\n The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being\r\n Nyquist. The Nyquist frequency can be redefined with the argument\r\n `nyq`.\r\n The values in `freq` must be nondecreasing. A value can be repeated\r\n once to implement a discontinuity. The first value in `freq` must\r\n be 0, and the last value must be `nyq`.\r\n gain : array_like\r\n The filter gains at the frequency sampling points. Certain\r\n constraints to gain values, depending on the filter type, are applied,\r\n see Notes for details.\r\n nfreqs : int, optional\r\n The size of the interpolation mesh used to construct the filter.\r\n For most efficient behavior, this should be a power of 2 plus 1\r\n (e.g, 129, 257, etc). The default is one more than the smallest\r\n power of 2 that is not less than `numtaps`. `nfreqs` must be greater\r\n than `numtaps`.\r\n window : string or (string, float) or float, or None, optional\r\n Window function to use. Default is \"hamming\". See\r\n `scipy.signal.get_window` for the complete list of possible values.\r\n If None, no window function is applied.\r\n nyq : float, optional\r\n Nyquist frequency. Each frequency in `freq` must be between 0 and\r\n `nyq` (inclusive).\r\n antisymmetric : bool, optional\r\n Whether resulting impulse response is symmetric/antisymmetric.\r\n See Notes for more details.\r\n\r\n Returns\r\n -------\r\n taps : ndarray\r\n The filter coefficients of the FIR filter, as a 1-D array of length\r\n `numtaps`.\r\n\r\n See also\r\n --------\r\n firls\r\n firwin\r\n remez\r\n\r\n Notes\r\n -----\r\n From the given set of frequencies and gains, the desired response is\r\n constructed in the frequency domain. The inverse FFT is applied to the\r\n desired response to create the associated convolution kernel, and the\r\n first `numtaps` coefficients of this kernel, scaled by `window`, are\r\n returned.\r\n\r\n The FIR filter will have linear phase. The type of filter is determined by\r\n the value of 'numtaps` and `antisymmetric` flag.\r\n There are four possible combinations:\r\n\r\n - odd `numtaps`, `antisymmetric` is False, type I filter is produced\r\n - even `numtaps`, `antisymmetric` is False, type II filter is produced\r\n - odd `numtaps`, `antisymmetric` is True, type III filter is produced\r\n - even `numtaps`, `antisymmetric` is True, type IV filter is produced\r\n\r\n Magnitude response of all but type I filters are subjects to following\r\n constraints:\r\n\r\n - type II -- zero at the Nyquist frequency\r\n - type III -- zero at zero and Nyquist frequencies\r\n - type IV -- zero at zero frequency\r\n\r\n .. versionadded:: 0.9.0\r\n\r\n References\r\n ----------\r\n .. [1] Oppenheim, A. V. and Schafer, R. W., \"Discrete-Time Signal\r\n Processing\", Prentice-Hall, Englewood Cliffs, New Jersey (1989).\r\n (See, for example, Section 7.4.)\r\n\r\n .. [2] Smith, Steven W., \"The Scientist and Engineer's Guide to Digital\r\n Signal Processing\", Ch. 17. http://www.dspguide.com/ch17/1.htm\r\n\r\n Examples\r\n --------\r\n A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and\r\n that decreases linearly on [0.5, 1.0] from 1 to 0:\r\n\r\n >>> from scipy import signal\r\n >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])\r\n >>> print(taps[72:78])\r\n [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]\r\n\r\n \"\"\"\r\n\r\n if len(freq) != len(gain):\r\n raise ValueError('freq and gain must be of same length.')\r\n\r\n if nfreqs is not None and numtaps >= nfreqs:\r\n raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '\r\n 'called with ntaps=%d and nfreqs=%s') %\r\n (numtaps, nfreqs))\r\n\r\n if freq[0] != 0 or freq[-1] != nyq:\r\n raise ValueError('freq must start with 0 and end with `nyq`.')\r\n d = np.diff(freq)\r\n if (d < 0).any():\r\n raise ValueError('The values in freq must be nondecreasing.')\r\n d2 = d[:-1] + d[1:]\r\n if (d2 == 0).any():\r\n raise ValueError('A value in freq must not occur more than twice.')\r\n\r\n if antisymmetric:\r\n if numtaps % 2 == 0:\r\n ftype = 4\r\n else:\r\n ftype = 3\r\n else:\r\n if numtaps % 2 == 0:\r\n ftype = 2\r\n else:\r\n ftype = 1\r\n\r\n if ftype == 2 and gain[-1] != 0.0:\r\n raise ValueError(\"A Type II filter must have zero gain at the \"\r\n \"Nyquist rate.\")\r\n elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):\r\n raise ValueError(\"A Type III filter must have zero gain at zero \"\r\n \"and Nyquist rates.\")\r\n elif ftype == 4 and gain[0] != 0.0:\r\n raise ValueError(\"A Type IV filter must have zero gain at zero rate.\")\r\n\r\n if nfreqs is None:\r\n nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))\r\n\r\n # Tweak any repeated values in freq so that interp works.\r\n eps = np.finfo(float).eps\r\n for k in range(len(freq)):\r\n if k < len(freq) - 1 and freq[k] == freq[k + 1]:\r\n freq[k] = freq[k] - eps\r\n freq[k + 1] = freq[k + 1] + eps\r\n\r\n # Linearly interpolate the desired response on a uniform mesh `x`.\r\n x = np.linspace(0.0, nyq, nfreqs)\r\n fx = np.interp(x, freq, gain)\r\n\r\n # Adjust the phases of the coefficients so that the first `ntaps` of the\r\n # inverse FFT are the desired filter coefficients.\r\n shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)\r\n if ftype > 2:\r\n shift *= 1j\r\n\r\n fx2 = fx * shift\r\n\r\n # Use irfft to compute the inverse FFT.\r\n out_full = irfft(fx2)\r\n\r\n if window is not None:\r\n # Create the window to apply to the filter coefficients.\r\n from .signaltools import get_window\r\n wind = get_window(window, numtaps, fftbins=False)\r\n else:\r\n wind = 1\r\n\r\n # Keep only the first `numtaps` coefficients in `out`, and multiply by\r\n # the window.\r\n out = out_full[:numtaps] * wind\r\n\r\n if ftype == 3:\r\n out[out.size // 2] = 0.0\r\n\r\n return out\r\n\r\n\r\ndef remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',\r\n maxiter=25, grid_density=16):\r\n \"\"\"\r\n Calculate the minimax optimal filter using the Remez exchange algorithm.\r\n\r\n Calculate the filter-coefficients for the finite impulse response\r\n (FIR) filter whose transfer function minimizes the maximum error\r\n between the desired gain and the realized gain in the specified\r\n frequency bands using the Remez exchange algorithm.\r\n\r\n Parameters\r\n ----------\r\n numtaps : int\r\n The desired number of taps in the filter. The number of taps is\r\n the number of terms in the filter, or the filter order plus one.\r\n bands : array_like\r\n A monotonic sequence containing the band edges in Hz.\r\n All elements must be non-negative and less than half the sampling\r\n frequency as given by `Hz`.\r\n desired : array_like\r\n A sequence half the size of bands containing the desired gain\r\n in each of the specified bands.\r\n weight : array_like, optional\r\n A relative weighting to give to each band region. The length of\r\n `weight` has to be half the length of `bands`.\r\n Hz : scalar, optional\r\n The sampling frequency in Hz. Default is 1.\r\n type : {'bandpass', 'differentiator', 'hilbert'}, optional\r\n The type of filter:\r\n\r\n * 'bandpass' : flat response in bands. This is the default.\r\n\r\n * 'differentiator' : frequency proportional response in bands.\r\n\r\n * 'hilbert' : filter with odd symmetry, that is, type III\r\n (for even order) or type IV (for odd order)\r\n linear phase filters.\r\n\r\n maxiter : int, optional\r\n Maximum number of iterations of the algorithm. Default is 25.\r\n grid_density : int, optional\r\n Grid density. The dense grid used in `remez` is of size\r\n ``(numtaps + 1) * grid_density``. Default is 16.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n A rank-1 array containing the coefficients of the optimal\r\n (in a minimax sense) filter.\r\n\r\n See Also\r\n --------\r\n freqz\r\n firls\r\n firwin\r\n firwin2\r\n\r\n References\r\n ----------\r\n .. [1] J. H. McClellan and T. W. Parks, \"A unified approach to the\r\n design of optimum FIR linear phase digital filters\",\r\n IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.\r\n .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, \"A Computer\r\n Program for Designing Optimum FIR Linear Phase Digital\r\n Filters\", IEEE Trans. Audio Electroacoust., vol. AU-21,\r\n pp. 506-525, 1973.\r\n\r\n Examples\r\n --------\r\n We want to construct a filter with a passband at 0.2-0.4 Hz, and\r\n stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the\r\n behavior in the frequency ranges between those bands is unspecified and\r\n may overshoot.\r\n\r\n >>> from scipy import signal\r\n >>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])\r\n >>> freq, response = signal.freqz(bpass)\r\n >>> ampl = np.abs(response)\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> fig = plt.figure()\r\n >>> ax1 = fig.add_subplot(111)\r\n >>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n # Convert type\r\n try:\r\n tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]\r\n except KeyError:\r\n raise ValueError(\"Type must be 'bandpass', 'differentiator', \"\r\n \"or 'hilbert'\")\r\n\r\n # Convert weight\r\n if weight is None:\r\n weight = [1] * len(desired)\r\n\r\n bands = np.asarray(bands).copy()\r\n return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,\r\n maxiter, grid_density)\r\n\r\n\r\ndef firls(numtaps, bands, desired, weight=None, nyq=1.):\r\n \"\"\"\r\n FIR filter design using least-squares error minimization.\r\n\r\n Calculate the filter coefficients for the linear-phase finite\r\n impulse response (FIR) filter which has the best approximation\r\n to the desired frequency response described by `bands` and\r\n `desired` in the least squares sense (i.e., the integral of the\r\n weighted mean-squared error within the specified bands is\r\n minimized).\r\n\r\n Parameters\r\n ----------\r\n numtaps : int\r\n The number of taps in the FIR filter. `numtaps` must be odd.\r\n bands : array_like\r\n A monotonic nondecreasing sequence containing the band edges in\r\n Hz. All elements must be non-negative and less than or equal to\r\n the Nyquist frequency given by `nyq`.\r\n desired : array_like\r\n A sequence the same size as `bands` containing the desired gain\r\n at the start and end point of each band.\r\n weight : array_like, optional\r\n A relative weighting to give to each band region when solving\r\n the least squares problem. `weight` has to be half the size of\r\n `bands`.\r\n nyq : float, optional\r\n Nyquist frequency. Each frequency in `bands` must be between 0\r\n and `nyq` (inclusive).\r\n\r\n Returns\r\n -------\r\n coeffs : ndarray\r\n Coefficients of the optimal (in a least squares sense) FIR filter.\r\n\r\n See also\r\n --------\r\n firwin\r\n firwin2\r\n\r\n Notes\r\n -----\r\n This implementation follows the algorithm given in [1]_.\r\n As noted there, least squares design has multiple advantages:\r\n\r\n 1. Optimal in a least-squares sense.\r\n 2. Simple, non-iterative method.\r\n 3. The general solution can obtained by solving a linear\r\n system of equations.\r\n 4. Allows the use of a frequency dependent weighting function.\r\n\r\n This function constructs a Type I linear phase FIR filter, which\r\n contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:\r\n\r\n .. math:: coeffs(n) = coeffs(numtaps - 1 - n)\r\n\r\n The odd number of coefficients and filter symmetry avoid boundary\r\n conditions that could otherwise occur at the Nyquist and 0 frequencies\r\n (e.g., for Type II, III, or IV variants).\r\n\r\n .. versionadded:: 0.18\r\n\r\n References\r\n ----------\r\n .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.\r\n OpenStax CNX. Aug 9, 2005.\r\n http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7\r\n\r\n Examples\r\n --------\r\n We want to construct a band-pass filter. Note that the behavior in the\r\n frequency ranges between our stop bands and pass bands is unspecified,\r\n and thus may overshoot depending on the parameters of our filter:\r\n\r\n >>> from scipy import signal\r\n >>> import matplotlib.pyplot as plt\r\n >>> fig, axs = plt.subplots(2)\r\n >>> nyq = 5. # Hz\r\n >>> desired = (0, 0, 1, 1, 0, 0)\r\n >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):\r\n ... fir_firls = signal.firls(73, bands, desired, nyq=nyq)\r\n ... fir_remez = signal.remez(73, bands, desired[::2], Hz=2 * nyq)\r\n ... fir_firwin2 = signal.firwin2(73, bands, desired, nyq=nyq)\r\n ... hs = list()\r\n ... ax = axs[bi]\r\n ... for fir in (fir_firls, fir_remez, fir_firwin2):\r\n ... freq, response = signal.freqz(fir)\r\n ... hs.append(ax.semilogy(nyq*freq/(np.pi), np.abs(response))[0])\r\n ... for band, gains in zip(zip(bands[::2], bands[1::2]), zip(desired[::2], desired[1::2])):\r\n ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)\r\n ... if bi == 0:\r\n ... ax.legend(hs, ('firls', 'remez', 'firwin2'), loc='lower center', frameon=False)\r\n ... else:\r\n ... ax.set_xlabel('Frequency (Hz)')\r\n ... ax.grid(True)\r\n ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')\r\n ...\r\n >>> fig.tight_layout()\r\n >>> plt.show()\r\n\r\n \"\"\" # noqa\r\n numtaps = int(numtaps)\r\n if numtaps % 2 == 0 or numtaps < 1:\r\n raise ValueError(\"numtaps must be odd and >= 1\")\r\n M = (numtaps-1) // 2\r\n\r\n # normalize bands 0->1 and make it 2 columns\r\n nyq = float(nyq)\r\n if nyq <= 0:\r\n raise ValueError('nyq must be positive, got %s <= 0.' % nyq)\r\n bands = np.asarray(bands).flatten() / nyq\r\n if len(bands) % 2 != 0:\r\n raise ValueError(\"bands must contain frequency pairs.\")\r\n bands.shape = (-1, 2)\r\n\r\n # check remaining params\r\n desired = np.asarray(desired).flatten()\r\n if bands.size != desired.size:\r\n raise ValueError(\"desired must have one entry per frequency, got %s \"\r\n \"gains for %s frequencies.\"\r\n % (desired.size, bands.size))\r\n desired.shape = (-1, 2)\r\n if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():\r\n raise ValueError(\"bands must be monotonically nondecreasing and have \"\r\n \"width > 0.\")\r\n if (bands[:-1, 1] > bands[1:, 0]).any():\r\n raise ValueError(\"bands must not overlap.\")\r\n if (desired < 0).any():\r\n raise ValueError(\"desired must be non-negative.\")\r\n if weight is None:\r\n weight = np.ones(len(desired))\r\n weight = np.asarray(weight).flatten()\r\n if len(weight) != len(desired):\r\n raise ValueError(\"weight must be the same size as the number of \"\r\n \"band pairs (%s).\" % (len(bands),))\r\n if (weight < 0).any():\r\n raise ValueError(\"weight must be non-negative.\")\r\n\r\n # Set up the linear matrix equation to be solved, Qa = b\r\n\r\n # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)\r\n # where Q1(k,n)=q(k−n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.\r\n\r\n # We omit the factor of 0.5 above, instead adding it during coefficient\r\n # calculation.\r\n\r\n # We also omit the 1/π from both Q and b equations, as they cancel\r\n # during solving.\r\n\r\n # We have that:\r\n # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)\r\n # Using our nomalization ω=πf and with a constant weight W over each\r\n # interval f1->f2 we get:\r\n # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf\r\n # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).\r\n n = np.arange(numtaps)[:, np.newaxis, np.newaxis]\r\n q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)\r\n\r\n # Now we assemble our sum of Toeplitz and Hankel\r\n Q1 = toeplitz(q[:M+1])\r\n Q2 = hankel(q[:M+1], q[M:])\r\n Q = Q1 + Q2\r\n\r\n # Now for b(n) we have that:\r\n # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)\r\n # Using our nomalization ω=πf and with a constant weight W over each\r\n # interval and a linear term for D(ω) we get (over each f1->f2 interval):\r\n # b(n) = W ∫ (mf+c)cos(πnf)df\r\n # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2\r\n # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).\r\n n = n[:M + 1] # only need this many coefficients here\r\n # Choose m and c such that we are at the start and end weights\r\n m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))\r\n c = desired[:, [0]] - bands[:, [0]] * m\r\n b = bands * (m*bands + c) * np.sinc(bands * n)\r\n # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0\r\n b[0] -= m * bands * bands / 2.\r\n b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2\r\n b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)\r\n\r\n # Now we can solve the equation (use pinv because Q can be rank deficient)\r\n a = np.dot(pinv(Q), b)\r\n\r\n # make coefficients symmetric (linear phase)\r\n coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))\r\n return coeffs\r\n", "import numpy as np\r\nfrom scipy.special._testutils import FuncData\r\nfrom scipy.special import gamma, gammaln, loggamma\r\n\r\n\r\ndef test_identities1():\r\n # test the identity exp(loggamma(z)) = gamma(z)\r\n x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])\r\n y = x.copy()\r\n x, y = np.meshgrid(x, y)\r\n z = (x + 1J*y).flatten()\r\n dataset = np.vstack((z, gamma(z))).T\r\n\r\n def f(z):\r\n return np.exp(loggamma(z))\r\n\r\n FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()\r\n\r\n\r\ndef test_identities2():\r\n # test the identity loggamma(z + 1) = log(z) + loggamma(z)\r\n x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])\r\n y = x.copy()\r\n x, y = np.meshgrid(x, y)\r\n z = (x + 1J*y).flatten()\r\n dataset = np.vstack((z, np.log(z) + loggamma(z))).T\r\n\r\n def f(z):\r\n return loggamma(z + 1)\r\n\r\n FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()\r\n\r\n\r\ndef test_realpart():\r\n # Test that the real parts of loggamma and gammaln agree on the\r\n # real axis.\r\n x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5\r\n dataset = np.vstack((x, gammaln(x))).T\r\n\r\n def f(z):\r\n return loggamma(z).real\r\n \r\n FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()\r\n", "\"\"\"\r\nSparse Eigenvalue Solvers\r\n-------------------------\r\n\r\nThe submodules of sparse.linalg.eigen:\r\n 1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\nfrom .arpack import *\r\nfrom .lobpcg import *\r\n\r\n__all__ = [s for s in dir() if not s.startswith('_')]\r\nfrom numpy.testing import Tester\r\ntest = Tester().test\r\nbench = Tester().bench\r\n", "from __future__ import division, print_function, absolute_import\r\n\r\nfrom scipy import stats\r\nimport numpy as np\r\nfrom numpy.testing import assert_almost_equal, assert_, assert_raises, \\\r\n assert_array_almost_equal, assert_array_almost_equal_nulp, run_module_suite\r\n\r\n\r\ndef test_kde_1d():\r\n #some basic tests comparing to normal distribution\r\n np.random.seed(8765678)\r\n n_basesample = 500\r\n xn = np.random.randn(n_basesample)\r\n xnmean = xn.mean()\r\n xnstd = xn.std(ddof=1)\r\n\r\n # get kde for original sample\r\n gkde = stats.gaussian_kde(xn)\r\n\r\n # evaluate the density function for the kde for some points\r\n xs = np.linspace(-7,7,501)\r\n kdepdf = gkde.evaluate(xs)\r\n normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)\r\n intervall = xs[1] - xs[0]\r\n\r\n assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)\r\n prob1 = gkde.integrate_box_1d(xnmean, np.inf)\r\n prob2 = gkde.integrate_box_1d(-np.inf, xnmean)\r\n assert_almost_equal(prob1, 0.5, decimal=1)\r\n assert_almost_equal(prob2, 0.5, decimal=1)\r\n assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)\r\n assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)\r\n\r\n assert_almost_equal(gkde.integrate_kde(gkde),\r\n (kdepdf**2).sum()*intervall, decimal=2)\r\n assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),\r\n (kdepdf*normpdf).sum()*intervall, decimal=2)\r\n\r\n\r\ndef test_kde_2d():\r\n #some basic tests comparing to normal distribution\r\n np.random.seed(8765678)\r\n n_basesample = 500\r\n\r\n mean = np.array([1.0, 3.0])\r\n covariance = np.array([[1.0, 2.0], [2.0, 6.0]])\r\n\r\n # Need transpose (shape (2, 500)) for kde\r\n xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T\r\n\r\n # get kde for original sample\r\n gkde = stats.gaussian_kde(xn)\r\n\r\n # evaluate the density function for the kde for some points\r\n x, y = np.mgrid[-7:7:500j, -7:7:500j]\r\n grid_coords = np.vstack([x.ravel(), y.ravel()])\r\n kdepdf = gkde.evaluate(grid_coords)\r\n kdepdf = kdepdf.reshape(500, 500)\r\n\r\n normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)\r\n intervall = y.ravel()[1] - y.ravel()[0]\r\n\r\n assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)\r\n\r\n small = -1e100\r\n large = 1e100\r\n prob1 = gkde.integrate_box([small, mean[1]], [large, large])\r\n prob2 = gkde.integrate_box([small, small], [large, mean[1]])\r\n\r\n assert_almost_equal(prob1, 0.5, decimal=1)\r\n assert_almost_equal(prob2, 0.5, decimal=1)\r\n assert_almost_equal(gkde.integrate_kde(gkde),\r\n (kdepdf**2).sum()*(intervall**2), decimal=2)\r\n assert_almost_equal(gkde.integrate_gaussian(mean, covariance),\r\n (kdepdf*normpdf).sum()*(intervall**2), decimal=2)\r\n\r\n\r\ndef test_kde_bandwidth_method():\r\n def scotts_factor(kde_obj):\r\n \"\"\"Same as default, just check that it works.\"\"\"\r\n return np.power(kde_obj.n, -1./(kde_obj.d+4))\r\n\r\n np.random.seed(8765678)\r\n n_basesample = 50\r\n xn = np.random.randn(n_basesample)\r\n\r\n # Default\r\n gkde = stats.gaussian_kde(xn)\r\n # Supply a callable\r\n gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)\r\n # Supply a scalar\r\n gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)\r\n\r\n xs = np.linspace(-7,7,51)\r\n kdepdf = gkde.evaluate(xs)\r\n kdepdf2 = gkde2.evaluate(xs)\r\n assert_almost_equal(kdepdf, kdepdf2)\r\n kdepdf3 = gkde3.evaluate(xs)\r\n assert_almost_equal(kdepdf, kdepdf3)\r\n\r\n assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')\r\n\r\n\r\n# Subclasses that should stay working (extracted from various sources).\r\n# Unfortunately the earlier design of gaussian_kde made it necessary for users\r\n# to create these kinds of subclasses, or call _compute_covariance() directly.\r\n\r\nclass _kde_subclass1(stats.gaussian_kde):\r\n def __init__(self, dataset):\r\n self.dataset = np.atleast_2d(dataset)\r\n self.d, self.n = self.dataset.shape\r\n self.covariance_factor = self.scotts_factor\r\n self._compute_covariance()\r\n\r\n\r\nclass _kde_subclass2(stats.gaussian_kde):\r\n def __init__(self, dataset):\r\n self.covariance_factor = self.scotts_factor\r\n super(_kde_subclass2, self).__init__(dataset)\r\n\r\n\r\nclass _kde_subclass3(stats.gaussian_kde):\r\n def __init__(self, dataset, covariance):\r\n self.covariance = covariance\r\n stats.gaussian_kde.__init__(self, dataset)\r\n\r\n def _compute_covariance(self):\r\n self.inv_cov = np.linalg.inv(self.covariance)\r\n self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \\\r\n * self.n\r\n\r\n\r\nclass _kde_subclass4(stats.gaussian_kde):\r\n def covariance_factor(self):\r\n return 0.5 * self.silverman_factor()\r\n\r\n\r\ndef test_gaussian_kde_subclassing():\r\n x1 = np.array([-7, -5, 1, 4, 5], dtype=float)\r\n xs = np.linspace(-10, 10, num=50)\r\n\r\n # gaussian_kde itself\r\n kde = stats.gaussian_kde(x1)\r\n ys = kde(xs)\r\n\r\n # subclass 1\r\n kde1 = _kde_subclass1(x1)\r\n y1 = kde1(xs)\r\n assert_array_almost_equal_nulp(ys, y1, nulp=10)\r\n\r\n # subclass 2\r\n kde2 = _kde_subclass2(x1)\r\n y2 = kde2(xs)\r\n assert_array_almost_equal_nulp(ys, y2, nulp=10)\r\n\r\n # subclass 3\r\n kde3 = _kde_subclass3(x1, kde.covariance)\r\n y3 = kde3(xs)\r\n assert_array_almost_equal_nulp(ys, y3, nulp=10)\r\n\r\n # subclass 4\r\n kde4 = _kde_subclass4(x1)\r\n y4 = kde4(x1)\r\n y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]\r\n\r\n assert_array_almost_equal(y_expected, y4, decimal=6)\r\n\r\n # Not a subclass, but check for use of _compute_covariance()\r\n kde5 = kde\r\n kde5.covariance_factor = lambda: kde.factor\r\n kde5._compute_covariance()\r\n y5 = kde5(xs)\r\n assert_array_almost_equal_nulp(ys, y5, nulp=10)\r\n\r\n\r\ndef test_gaussian_kde_covariance_caching():\r\n x1 = np.array([-7, -5, 1, 4, 5], dtype=float)\r\n xs = np.linspace(-10, 10, num=5)\r\n # These expected values are from scipy 0.10, before some changes to\r\n # gaussian_kde. They were not compared with any external reference.\r\n y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]\r\n\r\n # Set the bandwidth, then reset it to the default.\r\n kde = stats.gaussian_kde(x1)\r\n kde.set_bandwidth(bw_method=0.5)\r\n kde.set_bandwidth(bw_method='scott')\r\n y2 = kde(xs)\r\n\r\n assert_array_almost_equal(y_expected, y2, decimal=7)\r\n\r\n\r\ndef test_gaussian_kde_monkeypatch():\r\n \"\"\"Ugly, but people may rely on this. See scipy pull request 123,\r\n specifically the linked ML thread \"Width of the Gaussian in stats.kde\".\r\n If it is necessary to break this later on, that is to be discussed on ML.\r\n \"\"\"\r\n x1 = np.array([-7, -5, 1, 4, 5], dtype=float)\r\n xs = np.linspace(-10, 10, num=50)\r\n\r\n # The old monkeypatched version to get at Silverman's Rule.\r\n kde = stats.gaussian_kde(x1)\r\n kde.covariance_factor = kde.silverman_factor\r\n kde._compute_covariance()\r\n y1 = kde(xs)\r\n\r\n # The new saner version.\r\n kde2 = stats.gaussian_kde(x1, bw_method='silverman')\r\n y2 = kde2(xs)\r\n\r\n assert_array_almost_equal_nulp(y1, y2, nulp=10)\r\n\r\n\r\ndef test_kde_integer_input():\r\n \"\"\"Regression test for #1181.\"\"\"\r\n x1 = np.arange(5)\r\n kde = stats.gaussian_kde(x1)\r\n y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]\r\n assert_array_almost_equal(kde(x1), y_expected, decimal=6)\r\n\r\n\r\ndef test_pdf_logpdf():\r\n np.random.seed(1)\r\n n_basesample = 50\r\n xn = np.random.randn(n_basesample)\r\n\r\n # Default\r\n gkde = stats.gaussian_kde(xn)\r\n\r\n xs = np.linspace(-15, 12, 25)\r\n pdf = gkde.evaluate(xs)\r\n pdf2 = gkde.pdf(xs)\r\n assert_almost_equal(pdf, pdf2, decimal=12)\r\n\r\n logpdf = np.log(pdf)\r\n logpdf2 = gkde.logpdf(xs)\r\n assert_almost_equal(logpdf, logpdf2, decimal=12)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\"\r\nSet operations for 1D numeric arrays based on sorting.\r\n\r\n:Contains:\r\n ediff1d,\r\n unique,\r\n intersect1d,\r\n setxor1d,\r\n in1d,\r\n union1d,\r\n setdiff1d\r\n\r\n:Notes:\r\n\r\nFor floating point arrays, inaccurate results may appear due to usual round-off\r\nand floating point comparison issues.\r\n\r\nSpeed could be gained in some operations by an implementation of\r\nsort(), that can provide directly the permutation vectors, avoiding\r\nthus calls to argsort().\r\n\r\nTo do: Optionally return indices analogously to unique for all functions.\r\n\r\n:Author: Robert Cimrman\r\n\r\n\"\"\"\r\nfrom __future__ import division, absolute_import, print_function\r\n\r\nimport numpy as np\r\n\r\n\r\n__all__ = [\r\n 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',\r\n 'in1d'\r\n ]\r\n\r\n\r\ndef ediff1d(ary, to_end=None, to_begin=None):\r\n \"\"\"\r\n The differences between consecutive elements of an array.\r\n\r\n Parameters\r\n ----------\r\n ary : array_like\r\n If necessary, will be flattened before the differences are taken.\r\n to_end : array_like, optional\r\n Number(s) to append at the end of the returned differences.\r\n to_begin : array_like, optional\r\n Number(s) to prepend at the beginning of the returned differences.\r\n\r\n Returns\r\n -------\r\n ediff1d : ndarray\r\n The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.\r\n\r\n See Also\r\n --------\r\n diff, gradient\r\n\r\n Notes\r\n -----\r\n When applied to masked arrays, this function drops the mask information\r\n if the `to_begin` and/or `to_end` parameters are used.\r\n\r\n Examples\r\n --------\r\n >>> x = np.array([1, 2, 4, 7, 0])\r\n >>> np.ediff1d(x)\r\n array([ 1, 2, 3, -7])\r\n\r\n >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))\r\n array([-99, 1, 2, 3, -7, 88, 99])\r\n\r\n The returned array is always 1D.\r\n\r\n >>> y = [[1, 2, 4], [1, 6, 24]]\r\n >>> np.ediff1d(y)\r\n array([ 1, 2, -3, 5, 18])\r\n\r\n \"\"\"\r\n ary = np.asanyarray(ary).flat\r\n ed = ary[1:] - ary[:-1]\r\n arrays = [ed]\r\n if to_begin is not None:\r\n arrays.insert(0, to_begin)\r\n if to_end is not None:\r\n arrays.append(to_end)\r\n\r\n if len(arrays) != 1:\r\n # We'll save ourselves a copy of a potentially large array in\r\n # the common case where neither to_begin or to_end was given.\r\n ed = np.hstack(arrays)\r\n\r\n return ed\r\n\r\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\r\n \"\"\"\r\n Find the unique elements of an array.\r\n\r\n Returns the sorted unique elements of an array. There are three optional\r\n outputs in addition to the unique elements: the indices of the input array\r\n that give the unique values, the indices of the unique array that\r\n reconstruct the input array, and the number of times each unique value\r\n comes up in the input array.\r\n\r\n Parameters\r\n ----------\r\n ar : array_like\r\n Input array. This will be flattened if it is not already 1-D.\r\n return_index : bool, optional\r\n If True, also return the indices of `ar` that result in the unique\r\n array.\r\n return_inverse : bool, optional\r\n If True, also return the indices of the unique array that can be used\r\n to reconstruct `ar`.\r\n return_counts : bool, optional\r\n If True, also return the number of times each unique value comes up\r\n in `ar`.\r\n\r\n .. versionadded:: 1.9.0\r\n\r\n Returns\r\n -------\r\n unique : ndarray\r\n The sorted unique values.\r\n unique_indices : ndarray, optional\r\n The indices of the first occurrences of the unique values in the\r\n (flattened) original array. Only provided if `return_index` is True.\r\n unique_inverse : ndarray, optional\r\n The indices to reconstruct the (flattened) original array from the\r\n unique array. Only provided if `return_inverse` is True.\r\n unique_counts : ndarray, optional\r\n The number of times each of the unique values comes up in the\r\n original array. Only provided if `return_counts` is True.\r\n\r\n .. versionadded:: 1.9.0\r\n\r\n See Also\r\n --------\r\n numpy.lib.arraysetops : Module with a number of other functions for\r\n performing set operations on arrays.\r\n\r\n Examples\r\n --------\r\n >>> np.unique([1, 1, 2, 2, 3, 3])\r\n array([1, 2, 3])\r\n >>> a = np.array([[1, 1], [2, 3]])\r\n >>> np.unique(a)\r\n array([1, 2, 3])\r\n\r\n Return the indices of the original array that give the unique values:\r\n\r\n >>> a = np.array(['a', 'b', 'b', 'c', 'a'])\r\n >>> u, indices = np.unique(a, return_index=True)\r\n >>> u\r\n array(['a', 'b', 'c'],\r\n dtype='|S1')\r\n >>> indices\r\n array([0, 1, 3])\r\n >>> a[indices]\r\n array(['a', 'b', 'c'],\r\n dtype='|S1')\r\n\r\n Reconstruct the input array from the unique values:\r\n\r\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\r\n >>> u, indices = np.unique(a, return_inverse=True)\r\n >>> u\r\n array([1, 2, 3, 4, 6])\r\n >>> indices\r\n array([0, 1, 4, 3, 1, 2, 1])\r\n >>> u[indices]\r\n array([1, 2, 6, 4, 2, 3, 2])\r\n\r\n \"\"\"\r\n ar = np.asanyarray(ar).flatten()\r\n\r\n optional_indices = return_index or return_inverse\r\n optional_returns = optional_indices or return_counts\r\n\r\n if ar.size == 0:\r\n if not optional_returns:\r\n ret = ar\r\n else:\r\n ret = (ar,)\r\n if return_index:\r\n ret += (np.empty(0, np.bool),)\r\n if return_inverse:\r\n ret += (np.empty(0, np.bool),)\r\n if return_counts:\r\n ret += (np.empty(0, np.intp),)\r\n return ret\r\n\r\n if optional_indices:\r\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\r\n aux = ar[perm]\r\n else:\r\n ar.sort()\r\n aux = ar\r\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\r\n\r\n if not optional_returns:\r\n ret = aux[flag]\r\n else:\r\n ret = (aux[flag],)\r\n if return_index:\r\n ret += (perm[flag],)\r\n if return_inverse:\r\n iflag = np.cumsum(flag) - 1\r\n inv_idx = np.empty(ar.shape, dtype=np.intp)\r\n inv_idx[perm] = iflag\r\n ret += (inv_idx,)\r\n if return_counts:\r\n idx = np.concatenate(np.nonzero(flag) + ([ar.size],))\r\n ret += (np.diff(idx),)\r\n return ret\r\n\r\ndef intersect1d(ar1, ar2, assume_unique=False):\r\n \"\"\"\r\n Find the intersection of two arrays.\r\n\r\n Return the sorted, unique values that are in both of the input arrays.\r\n\r\n Parameters\r\n ----------\r\n ar1, ar2 : array_like\r\n Input arrays.\r\n assume_unique : bool\r\n If True, the input arrays are both assumed to be unique, which\r\n can speed up the calculation. Default is False.\r\n\r\n Returns\r\n -------\r\n intersect1d : ndarray\r\n Sorted 1D array of common and unique elements.\r\n\r\n See Also\r\n --------\r\n numpy.lib.arraysetops : Module with a number of other functions for\r\n performing set operations on arrays.\r\n\r\n Examples\r\n --------\r\n >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])\r\n array([1, 3])\r\n\r\n To intersect more than two arrays, use functools.reduce:\r\n\r\n >>> from functools import reduce\r\n >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))\r\n array([3])\r\n \"\"\"\r\n if not assume_unique:\r\n # Might be faster than unique( intersect1d( ar1, ar2 ) )?\r\n ar1 = unique(ar1)\r\n ar2 = unique(ar2)\r\n aux = np.concatenate((ar1, ar2))\r\n aux.sort()\r\n return aux[:-1][aux[1:] == aux[:-1]]\r\n\r\ndef setxor1d(ar1, ar2, assume_unique=False):\r\n \"\"\"\r\n Find the set exclusive-or of two arrays.\r\n\r\n Return the sorted, unique values that are in only one (not both) of the\r\n input arrays.\r\n\r\n Parameters\r\n ----------\r\n ar1, ar2 : array_like\r\n Input arrays.\r\n assume_unique : bool\r\n If True, the input arrays are both assumed to be unique, which\r\n can speed up the calculation. Default is False.\r\n\r\n Returns\r\n -------\r\n setxor1d : ndarray\r\n Sorted 1D array of unique values that are in only one of the input\r\n arrays.\r\n\r\n Examples\r\n --------\r\n >>> a = np.array([1, 2, 3, 2, 4])\r\n >>> b = np.array([2, 3, 5, 7, 5])\r\n >>> np.setxor1d(a,b)\r\n array([1, 4, 5, 7])\r\n\r\n \"\"\"\r\n if not assume_unique:\r\n ar1 = unique(ar1)\r\n ar2 = unique(ar2)\r\n\r\n aux = np.concatenate((ar1, ar2))\r\n if aux.size == 0:\r\n return aux\r\n\r\n aux.sort()\r\n# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0\r\n flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))\r\n# flag2 = ediff1d( flag ) == 0\r\n flag2 = flag[1:] == flag[:-1]\r\n return aux[flag2]\r\n\r\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\r\n \"\"\"\r\n Test whether each element of a 1-D array is also present in a second array.\r\n\r\n Returns a boolean array the same length as `ar1` that is True\r\n where an element of `ar1` is in `ar2` and False otherwise.\r\n\r\n Parameters\r\n ----------\r\n ar1 : (M,) array_like\r\n Input array.\r\n ar2 : array_like\r\n The values against which to test each value of `ar1`.\r\n assume_unique : bool, optional\r\n If True, the input arrays are both assumed to be unique, which\r\n can speed up the calculation. Default is False.\r\n invert : bool, optional\r\n If True, the values in the returned array are inverted (that is,\r\n False where an element of `ar1` is in `ar2` and True otherwise).\r\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\r\n to (but is faster than) ``np.invert(in1d(a, b))``.\r\n\r\n .. versionadded:: 1.8.0\r\n\r\n Returns\r\n -------\r\n in1d : (M,) ndarray, bool\r\n The values `ar1[in1d]` are in `ar2`.\r\n\r\n See Also\r\n --------\r\n numpy.lib.arraysetops : Module with a number of other functions for\r\n performing set operations on arrays.\r\n\r\n Notes\r\n -----\r\n `in1d` can be considered as an element-wise function version of the\r\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\r\n equivalent to ``np.array([item in b for item in a])``.\r\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\r\n container: As ``ar2`` is converted to an array, in those cases\r\n ``asarray(ar2)`` is an object array rather than the expected array of\r\n contained values.\r\n\r\n .. versionadded:: 1.4.0\r\n\r\n Examples\r\n --------\r\n >>> test = np.array([0, 1, 2, 5, 0])\r\n >>> states = [0, 2]\r\n >>> mask = np.in1d(test, states)\r\n >>> mask\r\n array([ True, False, True, False, True], dtype=bool)\r\n >>> test[mask]\r\n array([0, 2, 0])\r\n >>> mask = np.in1d(test, states, invert=True)\r\n >>> mask\r\n array([False, True, False, True, False], dtype=bool)\r\n >>> test[mask]\r\n array([1, 5])\r\n \"\"\"\r\n # Ravel both arrays, behavior for the first array could be different\r\n ar1 = np.asarray(ar1).ravel()\r\n ar2 = np.asarray(ar2).ravel()\r\n\r\n # This code is significantly faster when the condition is satisfied.\r\n if len(ar2) < 10 * len(ar1) ** 0.145:\r\n if invert:\r\n mask = np.ones(len(ar1), dtype=np.bool)\r\n for a in ar2:\r\n mask &= (ar1 != a)\r\n else:\r\n mask = np.zeros(len(ar1), dtype=np.bool)\r\n for a in ar2:\r\n mask |= (ar1 == a)\r\n return mask\r\n\r\n # Otherwise use sorting\r\n if not assume_unique:\r\n ar1, rev_idx = np.unique(ar1, return_inverse=True)\r\n ar2 = np.unique(ar2)\r\n\r\n ar = np.concatenate((ar1, ar2))\r\n # We need this to be a stable sort, so always use 'mergesort'\r\n # here. The values from the first array should always come before\r\n # the values from the second array.\r\n order = ar.argsort(kind='mergesort')\r\n sar = ar[order]\r\n if invert:\r\n bool_ar = (sar[1:] != sar[:-1])\r\n else:\r\n bool_ar = (sar[1:] == sar[:-1])\r\n flag = np.concatenate((bool_ar, [invert]))\r\n ret = np.empty(ar.shape, dtype=bool)\r\n ret[order] = flag\r\n\r\n if assume_unique:\r\n return ret[:len(ar1)]\r\n else:\r\n return ret[rev_idx]\r\n\r\ndef union1d(ar1, ar2):\r\n \"\"\"\r\n Find the union of two arrays.\r\n\r\n Return the unique, sorted array of values that are in either of the two\r\n input arrays.\r\n\r\n Parameters\r\n ----------\r\n ar1, ar2 : array_like\r\n Input arrays. They are flattened if they are not already 1D.\r\n\r\n Returns\r\n -------\r\n union1d : ndarray\r\n Unique, sorted union of the input arrays.\r\n\r\n See Also\r\n --------\r\n numpy.lib.arraysetops : Module with a number of other functions for\r\n performing set operations on arrays.\r\n\r\n Examples\r\n --------\r\n >>> np.union1d([-1, 0, 1], [-2, 0, 2])\r\n array([-2, -1, 0, 1, 2])\r\n\r\n To find the union of more than two arrays, use functools.reduce:\r\n\r\n >>> from functools import reduce\r\n >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))\r\n array([1, 2, 3, 4, 6])\r\n \"\"\"\r\n return unique(np.concatenate((ar1, ar2)))\r\n\r\ndef setdiff1d(ar1, ar2, assume_unique=False):\r\n \"\"\"\r\n Find the set difference of two arrays.\r\n\r\n Return the sorted, unique values in `ar1` that are not in `ar2`.\r\n\r\n Parameters\r\n ----------\r\n ar1 : array_like\r\n Input array.\r\n ar2 : array_like\r\n Input comparison array.\r\n assume_unique : bool\r\n If True, the input arrays are both assumed to be unique, which\r\n can speed up the calculation. Default is False.\r\n\r\n Returns\r\n -------\r\n setdiff1d : ndarray\r\n Sorted 1D array of values in `ar1` that are not in `ar2`.\r\n\r\n See Also\r\n --------\r\n numpy.lib.arraysetops : Module with a number of other functions for\r\n performing set operations on arrays.\r\n\r\n Examples\r\n --------\r\n >>> a = np.array([1, 2, 3, 2, 4, 1])\r\n >>> b = np.array([3, 4, 5, 6])\r\n >>> np.setdiff1d(a, b)\r\n array([1, 2])\r\n\r\n \"\"\"\r\n if assume_unique:\r\n ar1 = np.asarray(ar1).ravel()\r\n else:\r\n ar1 = unique(ar1)\r\n ar2 = unique(ar2)\r\n return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]\r\n", "from numpy.testing import TestCase, run_module_suite, assert_allclose\r\nfrom scipy.linalg import cython_lapack as cython_lapack\r\nfrom scipy.linalg import lapack\r\n\r\n\r\nclass test_lamch(TestCase):\r\n\r\n def test_slamch(self):\r\n for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:\r\n assert_allclose(cython_lapack._test_slamch(c),\r\n lapack.slamch(c))\r\n\r\n def test_dlamch(self):\r\n for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:\r\n assert_allclose(cython_lapack._test_dlamch(c),\r\n lapack.dlamch(c))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n", "\"\"\"\r\nImplementation of Harwell-Boeing read/write.\r\n\r\nAt the moment not the full Harwell-Boeing format is supported. Supported\r\nfeatures are:\r\n\r\n - assembled, non-symmetric, real matrices\r\n - integer for pointer/indices\r\n - exponential format for float values, and int format\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n# TODO:\r\n# - Add more support (symmetric/complex matrices, non-assembled matrices ?)\r\n\r\n# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but\r\n# takes a lot of memory. Being faster would require compiled code.\r\n# write is not efficient. Although not a terribly exciting task,\r\n# having reusable facilities to efficiently read/write fortran-formatted files\r\n# would be useful outside this module.\r\n\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom scipy.sparse import csc_matrix\r\nfrom scipy.io.harwell_boeing._fortran_format_parser import \\\r\n FortranFormatParser, IntFormat, ExpFormat\r\n\r\nfrom scipy._lib.six import string_types\r\n\r\n__all__ = [\"MalformedHeader\", \"hb_read\", \"hb_write\", \"HBInfo\", \"HBFile\",\r\n \"HBMatrixType\"]\r\n\r\n\r\nclass MalformedHeader(Exception):\r\n pass\r\n\r\n\r\nclass LineOverflow(Warning):\r\n pass\r\n\r\n\r\ndef _nbytes_full(fmt, nlines):\r\n \"\"\"Return the number of bytes to read to get every full lines for the\r\n given parsed fortran format.\"\"\"\r\n return (fmt.repeat * fmt.width + 1) * (nlines - 1)\r\n\r\n\r\nclass HBInfo(object):\r\n @classmethod\r\n def from_data(cls, m, title=\"Default title\", key=\"0\", mxtype=None, fmt=None):\r\n \"\"\"Create a HBInfo instance from an existing sparse matrix.\r\n\r\n Parameters\r\n ----------\r\n m : sparse matrix\r\n the HBInfo instance will derive its parameters from m\r\n title : str\r\n Title to put in the HB header\r\n key : str\r\n Key\r\n mxtype : HBMatrixType\r\n type of the input matrix\r\n fmt : dict\r\n not implemented\r\n\r\n Returns\r\n -------\r\n hb_info : HBInfo instance\r\n \"\"\"\r\n pointer = m.indptr\r\n indices = m.indices\r\n values = m.data\r\n\r\n nrows, ncols = m.shape\r\n nnon_zeros = m.nnz\r\n\r\n if fmt is None:\r\n # +1 because HB use one-based indexing (Fortran), and we will write\r\n # the indices /pointer as such\r\n pointer_fmt = IntFormat.from_number(np.max(pointer+1))\r\n indices_fmt = IntFormat.from_number(np.max(indices+1))\r\n\r\n if values.dtype.kind in np.typecodes[\"AllFloat\"]:\r\n values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))\r\n elif values.dtype.kind in np.typecodes[\"AllInteger\"]:\r\n values_fmt = IntFormat.from_number(-np.max(np.abs(values)))\r\n else:\r\n raise NotImplementedError(\"type %s not implemented yet\" % values.dtype.kind)\r\n else:\r\n raise NotImplementedError(\"fmt argument not supported yet.\")\r\n\r\n if mxtype is None:\r\n if not np.isrealobj(values):\r\n raise ValueError(\"Complex values not supported yet\")\r\n if values.dtype.kind in np.typecodes[\"AllInteger\"]:\r\n tp = \"integer\"\r\n elif values.dtype.kind in np.typecodes[\"AllFloat\"]:\r\n tp = \"real\"\r\n else:\r\n raise NotImplementedError(\"type %s for values not implemented\"\r\n % values.dtype)\r\n mxtype = HBMatrixType(tp, \"unsymmetric\", \"assembled\")\r\n else:\r\n raise ValueError(\"mxtype argument not handled yet.\")\r\n\r\n def _nlines(fmt, size):\r\n nlines = size // fmt.repeat\r\n if nlines * fmt.repeat != size:\r\n nlines += 1\r\n return nlines\r\n\r\n pointer_nlines = _nlines(pointer_fmt, pointer.size)\r\n indices_nlines = _nlines(indices_fmt, indices.size)\r\n values_nlines = _nlines(values_fmt, values.size)\r\n\r\n total_nlines = pointer_nlines + indices_nlines + values_nlines\r\n\r\n return cls(title, key,\r\n total_nlines, pointer_nlines, indices_nlines, values_nlines,\r\n mxtype, nrows, ncols, nnon_zeros,\r\n pointer_fmt.fortran_format, indices_fmt.fortran_format,\r\n values_fmt.fortran_format)\r\n\r\n @classmethod\r\n def from_file(cls, fid):\r\n \"\"\"Create a HBInfo instance from a file object containg a matrix in the\r\n HB format.\r\n\r\n Parameters\r\n ----------\r\n fid : file-like matrix\r\n File or file-like object containing a matrix in the HB format.\r\n\r\n Returns\r\n -------\r\n hb_info : HBInfo instance\r\n \"\"\"\r\n # First line\r\n line = fid.readline().strip(\"\\n\")\r\n if not len(line) > 72:\r\n raise ValueError(\"Expected at least 72 characters for first line, \"\r\n \"got: \\n%s\" % line)\r\n title = line[:72]\r\n key = line[72:]\r\n\r\n # Second line\r\n line = fid.readline().strip(\"\\n\")\r\n if not len(line.rstrip()) >= 56:\r\n raise ValueError(\"Expected at least 56 characters for second line, \"\r\n \"got: \\n%s\" % line)\r\n total_nlines = _expect_int(line[:14])\r\n pointer_nlines = _expect_int(line[14:28])\r\n indices_nlines = _expect_int(line[28:42])\r\n values_nlines = _expect_int(line[42:56])\r\n\r\n rhs_nlines = line[56:72].strip()\r\n if rhs_nlines == '':\r\n rhs_nlines = 0\r\n else:\r\n rhs_nlines = _expect_int(rhs_nlines)\r\n if not rhs_nlines == 0:\r\n raise ValueError(\"Only files without right hand side supported for \"\r\n \"now.\")\r\n\r\n # Third line\r\n line = fid.readline().strip(\"\\n\")\r\n if not len(line) >= 70:\r\n raise ValueError(\"Expected at least 72 character for third line, got:\\n\"\r\n \"%s\" % line)\r\n\r\n mxtype_s = line[:3].upper()\r\n if not len(mxtype_s) == 3:\r\n raise ValueError(\"mxtype expected to be 3 characters long\")\r\n\r\n mxtype = HBMatrixType.from_fortran(mxtype_s)\r\n if mxtype.value_type not in [\"real\", \"integer\"]:\r\n raise ValueError(\"Only real or integer matrices supported for \"\r\n \"now (detected %s)\" % mxtype)\r\n if not mxtype.structure == \"unsymmetric\":\r\n raise ValueError(\"Only unsymmetric matrices supported for \"\r\n \"now (detected %s)\" % mxtype)\r\n if not mxtype.storage == \"assembled\":\r\n raise ValueError(\"Only assembled matrices supported for now\")\r\n\r\n if not line[3:14] == \" \" * 11:\r\n raise ValueError(\"Malformed data for third line: %s\" % line)\r\n\r\n nrows = _expect_int(line[14:28])\r\n ncols = _expect_int(line[28:42])\r\n nnon_zeros = _expect_int(line[42:56])\r\n nelementals = _expect_int(line[56:70])\r\n if not nelementals == 0:\r\n raise ValueError(\"Unexpected value %d for nltvl (last entry of line 3)\"\r\n % nelementals)\r\n\r\n # Fourth line\r\n line = fid.readline().strip(\"\\n\")\r\n\r\n ct = line.split()\r\n if not len(ct) == 3:\r\n raise ValueError(\"Expected 3 formats, got %s\" % ct)\r\n\r\n return cls(title, key,\r\n total_nlines, pointer_nlines, indices_nlines, values_nlines,\r\n mxtype, nrows, ncols, nnon_zeros,\r\n ct[0], ct[1], ct[2],\r\n rhs_nlines, nelementals)\r\n\r\n def __init__(self, title, key,\r\n total_nlines, pointer_nlines, indices_nlines, values_nlines,\r\n mxtype, nrows, ncols, nnon_zeros,\r\n pointer_format_str, indices_format_str, values_format_str,\r\n right_hand_sides_nlines=0, nelementals=0):\r\n \"\"\"Do not use this directly, but the class ctrs (from_* functions).\"\"\"\r\n self.title = title\r\n self.key = key\r\n if title is None:\r\n title = \"No Title\"\r\n if len(title) > 72:\r\n raise ValueError(\"title cannot be > 72 characters\")\r\n\r\n if key is None:\r\n key = \"|No Key\"\r\n if len(key) > 8:\r\n warnings.warn(\"key is > 8 characters (key is %s)\" % key, LineOverflow)\r\n\r\n self.total_nlines = total_nlines\r\n self.pointer_nlines = pointer_nlines\r\n self.indices_nlines = indices_nlines\r\n self.values_nlines = values_nlines\r\n\r\n parser = FortranFormatParser()\r\n pointer_format = parser.parse(pointer_format_str)\r\n if not isinstance(pointer_format, IntFormat):\r\n raise ValueError(\"Expected int format for pointer format, got %s\"\r\n % pointer_format)\r\n\r\n indices_format = parser.parse(indices_format_str)\r\n if not isinstance(indices_format, IntFormat):\r\n raise ValueError(\"Expected int format for indices format, got %s\" %\r\n indices_format)\r\n\r\n values_format = parser.parse(values_format_str)\r\n if isinstance(values_format, ExpFormat):\r\n if mxtype.value_type not in [\"real\", \"complex\"]:\r\n raise ValueError(\"Inconsistency between matrix type %s and \"\r\n \"value type %s\" % (mxtype, values_format))\r\n values_dtype = np.float64\r\n elif isinstance(values_format, IntFormat):\r\n if mxtype.value_type not in [\"integer\"]:\r\n raise ValueError(\"Inconsistency between matrix type %s and \"\r\n \"value type %s\" % (mxtype, values_format))\r\n # XXX: fortran int -> dtype association ?\r\n values_dtype = int\r\n else:\r\n raise ValueError(\"Unsupported format for values %r\" % (values_format,))\r\n\r\n self.pointer_format = pointer_format\r\n self.indices_format = indices_format\r\n self.values_format = values_format\r\n\r\n self.pointer_dtype = np.int32\r\n self.indices_dtype = np.int32\r\n self.values_dtype = values_dtype\r\n\r\n self.pointer_nlines = pointer_nlines\r\n self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)\r\n\r\n self.indices_nlines = indices_nlines\r\n self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)\r\n\r\n self.values_nlines = values_nlines\r\n self.values_nbytes_full = _nbytes_full(values_format, values_nlines)\r\n\r\n self.nrows = nrows\r\n self.ncols = ncols\r\n self.nnon_zeros = nnon_zeros\r\n self.nelementals = nelementals\r\n self.mxtype = mxtype\r\n\r\n def dump(self):\r\n \"\"\"Gives the header corresponding to this instance as a string.\"\"\"\r\n header = [self.title.ljust(72) + self.key.ljust(8)]\r\n\r\n header.append(\"%14d%14d%14d%14d\" %\r\n (self.total_nlines, self.pointer_nlines,\r\n self.indices_nlines, self.values_nlines))\r\n header.append(\"%14s%14d%14d%14d%14d\" %\r\n (self.mxtype.fortran_format.ljust(14), self.nrows,\r\n self.ncols, self.nnon_zeros, 0))\r\n\r\n pffmt = self.pointer_format.fortran_format\r\n iffmt = self.indices_format.fortran_format\r\n vffmt = self.values_format.fortran_format\r\n header.append(\"%16s%16s%20s\" %\r\n (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))\r\n return \"\\n\".join(header)\r\n\r\n\r\ndef _expect_int(value, msg=None):\r\n try:\r\n return int(value)\r\n except ValueError:\r\n if msg is None:\r\n msg = \"Expected an int, got %s\"\r\n raise ValueError(msg % value)\r\n\r\n\r\ndef _read_hb_data(content, header):\r\n # XXX: look at a way to reduce memory here (big string creation)\r\n ptr_string = \"\".join([content.read(header.pointer_nbytes_full),\r\n content.readline()])\r\n ptr = np.fromstring(ptr_string,\r\n dtype=int, sep=' ')\r\n\r\n ind_string = \"\".join([content.read(header.indices_nbytes_full),\r\n content.readline()])\r\n ind = np.fromstring(ind_string,\r\n dtype=int, sep=' ')\r\n\r\n val_string = \"\".join([content.read(header.values_nbytes_full),\r\n content.readline()])\r\n val = np.fromstring(val_string,\r\n dtype=header.values_dtype, sep=' ')\r\n\r\n try:\r\n return csc_matrix((val, ind-1, ptr-1),\r\n shape=(header.nrows, header.ncols))\r\n except ValueError as e:\r\n raise e\r\n\r\n\r\ndef _write_data(m, fid, header):\r\n def write_array(f, ar, nlines, fmt):\r\n # ar_nlines is the number of full lines, n is the number of items per\r\n # line, ffmt the fortran format\r\n pyfmt = fmt.python_format\r\n pyfmt_full = pyfmt * fmt.repeat\r\n\r\n # for each array to write, we first write the full lines, and special\r\n # case for partial line\r\n full = ar[:(nlines - 1) * fmt.repeat]\r\n for row in full.reshape((nlines-1, fmt.repeat)):\r\n f.write(pyfmt_full % tuple(row) + \"\\n\")\r\n nremain = ar.size - full.size\r\n if nremain > 0:\r\n f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + \"\\n\")\r\n\r\n fid.write(header.dump())\r\n fid.write(\"\\n\")\r\n # +1 is for fortran one-based indexing\r\n write_array(fid, m.indptr+1, header.pointer_nlines,\r\n header.pointer_format)\r\n write_array(fid, m.indices+1, header.indices_nlines,\r\n header.indices_format)\r\n write_array(fid, m.data, header.values_nlines,\r\n header.values_format)\r\n\r\n\r\nclass HBMatrixType(object):\r\n \"\"\"Class to hold the matrix type.\"\"\"\r\n # q2f* translates qualified names to fortran character\r\n _q2f_type = {\r\n \"real\": \"R\",\r\n \"complex\": \"C\",\r\n \"pattern\": \"P\",\r\n \"integer\": \"I\",\r\n }\r\n _q2f_structure = {\r\n \"symmetric\": \"S\",\r\n \"unsymmetric\": \"U\",\r\n \"hermitian\": \"H\",\r\n \"skewsymmetric\": \"Z\",\r\n \"rectangular\": \"R\"\r\n }\r\n _q2f_storage = {\r\n \"assembled\": \"A\",\r\n \"elemental\": \"E\",\r\n }\r\n\r\n _f2q_type = dict([(j, i) for i, j in _q2f_type.items()])\r\n _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])\r\n _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])\r\n\r\n @classmethod\r\n def from_fortran(cls, fmt):\r\n if not len(fmt) == 3:\r\n raise ValueError(\"Fortran format for matrix type should be 3 \"\r\n \"characters long\")\r\n try:\r\n value_type = cls._f2q_type[fmt[0]]\r\n structure = cls._f2q_structure[fmt[1]]\r\n storage = cls._f2q_storage[fmt[2]]\r\n return cls(value_type, structure, storage)\r\n except KeyError:\r\n raise ValueError(\"Unrecognized format %s\" % fmt)\r\n\r\n def __init__(self, value_type, structure, storage=\"assembled\"):\r\n self.value_type = value_type\r\n self.structure = structure\r\n self.storage = storage\r\n\r\n if value_type not in self._q2f_type:\r\n raise ValueError(\"Unrecognized type %s\" % value_type)\r\n if structure not in self._q2f_structure:\r\n raise ValueError(\"Unrecognized structure %s\" % structure)\r\n if storage not in self._q2f_storage:\r\n raise ValueError(\"Unrecognized storage %s\" % storage)\r\n\r\n @property\r\n def fortran_format(self):\r\n return self._q2f_type[self.value_type] + \\\r\n self._q2f_structure[self.structure] + \\\r\n self._q2f_storage[self.storage]\r\n\r\n def __repr__(self):\r\n return \"HBMatrixType(%s, %s, %s)\" % \\\r\n (self.value_type, self.structure, self.storage)\r\n\r\n\r\nclass HBFile(object):\r\n def __init__(self, file, hb_info=None):\r\n \"\"\"Create a HBFile instance.\r\n\r\n Parameters\r\n ----------\r\n file : file-object\r\n StringIO work as well\r\n hb_info : HBInfo, optional\r\n Should be given as an argument for writing, in which case the file\r\n should be writable.\r\n \"\"\"\r\n self._fid = file\r\n if hb_info is None:\r\n self._hb_info = HBInfo.from_file(file)\r\n else:\r\n #raise IOError(\"file %s is not writable, and hb_info \"\r\n # \"was given.\" % file)\r\n self._hb_info = hb_info\r\n\r\n @property\r\n def title(self):\r\n return self._hb_info.title\r\n\r\n @property\r\n def key(self):\r\n return self._hb_info.key\r\n\r\n @property\r\n def type(self):\r\n return self._hb_info.mxtype.value_type\r\n\r\n @property\r\n def structure(self):\r\n return self._hb_info.mxtype.structure\r\n\r\n @property\r\n def storage(self):\r\n return self._hb_info.mxtype.storage\r\n\r\n def read_matrix(self):\r\n return _read_hb_data(self._fid, self._hb_info)\r\n\r\n def write_matrix(self, m):\r\n return _write_data(m, self._fid, self._hb_info)\r\n\r\n\r\ndef hb_read(file):\r\n \"\"\"Read HB-format file.\r\n\r\n Parameters\r\n ----------\r\n file : str-like or file-like\r\n If a string-like object, file is the name of the file to read. If a\r\n file-like object, the data are read from it.\r\n\r\n Returns\r\n -------\r\n data : scipy.sparse.csc_matrix instance\r\n The data read from the HB file as a sparse matrix.\r\n\r\n Notes\r\n -----\r\n At the moment not the full Harwell-Boeing format is supported. Supported\r\n features are:\r\n\r\n - assembled, non-symmetric, real matrices\r\n - integer for pointer/indices\r\n - exponential format for float values, and int format\r\n\r\n \"\"\"\r\n def _get_matrix(fid):\r\n hb = HBFile(fid)\r\n return hb.read_matrix()\r\n\r\n if isinstance(file, string_types):\r\n fid = open(file)\r\n try:\r\n return _get_matrix(fid)\r\n finally:\r\n fid.close()\r\n else:\r\n return _get_matrix(file)\r\n\r\n\r\ndef hb_write(file, m, hb_info=None):\r\n \"\"\"Write HB-format file.\r\n\r\n Parameters\r\n ----------\r\n file : str-like or file-like\r\n if a string-like object, file is the name of the file to read. If a\r\n file-like object, the data are read from it.\r\n m : sparse-matrix\r\n the sparse matrix to write\r\n hb_info : HBInfo\r\n contains the meta-data for write\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n Notes\r\n -----\r\n At the moment not the full Harwell-Boeing format is supported. Supported\r\n features are:\r\n\r\n - assembled, non-symmetric, real matrices\r\n - integer for pointer/indices\r\n - exponential format for float values, and int format\r\n\r\n \"\"\"\r\n if hb_info is None:\r\n hb_info = HBInfo.from_data(m)\r\n\r\n def _set_matrix(fid):\r\n hb = HBFile(fid, hb_info)\r\n return hb.write_matrix(m)\r\n\r\n if isinstance(file, string_types):\r\n fid = open(file, \"w\")\r\n try:\r\n return _set_matrix(fid)\r\n finally:\r\n fid.close()\r\n else:\r\n return _set_matrix(file)\r\n", "from __future__ import division, absolute_import, print_function\r\n\r\nimport os\r\nfrom distutils.core import Command\r\nfrom distutils.ccompiler import new_compiler\r\nfrom numpy.distutils.misc_util import get_cmd\r\n\r\nclass install_clib(Command):\r\n description = \"Command to install installable C libraries\"\r\n\r\n user_options = []\r\n\r\n def initialize_options(self):\r\n self.install_dir = None\r\n self.outfiles = []\r\n\r\n def finalize_options(self):\r\n self.set_undefined_options('install', ('install_lib', 'install_dir'))\r\n\r\n def run (self):\r\n build_clib_cmd = get_cmd(\"build_clib\")\r\n build_dir = build_clib_cmd.build_clib\r\n\r\n # We need the compiler to get the library name -> filename association\r\n if not build_clib_cmd.compiler:\r\n compiler = new_compiler(compiler=None)\r\n compiler.customize(self.distribution)\r\n else:\r\n compiler = build_clib_cmd.compiler\r\n\r\n for l in self.distribution.installed_libraries:\r\n target_dir = os.path.join(self.install_dir, l.target_dir)\r\n name = compiler.library_filename(l.name)\r\n source = os.path.join(build_dir, name)\r\n self.mkpath(target_dir)\r\n self.outfiles.append(self.copy_file(source, target_dir)[0])\r\n\r\n def get_outputs(self):\r\n return self.outfiles\r\n", "import numpy as np\r\nfrom numpy.linalg import lstsq\r\nfrom numpy.testing import (assert_allclose, assert_equal, assert_,\r\n run_module_suite, assert_raises)\r\nfrom scipy.sparse import rand\r\nfrom scipy.sparse.linalg import aslinearoperator\r\nfrom scipy.optimize import lsq_linear\r\n\r\n\r\nA = np.array([\r\n [0.171, -0.057],\r\n [-0.049, -0.248],\r\n [-0.166, 0.054],\r\n])\r\nb = np.array([0.074, 1.014, -0.383])\r\n\r\n\r\nclass BaseMixin(object):\r\n def __init__(self):\r\n self.rnd = np.random.RandomState(0)\r\n\r\n def test_dense_no_bounds(self):\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)\r\n assert_allclose(res.x, lstsq(A, b)[0])\r\n\r\n def test_dense_bounds(self):\r\n # Solutions for comparison are taken from MATLAB.\r\n lb = np.array([-1, -10])\r\n ub = np.array([1, 0])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, lstsq(A, b)[0])\r\n\r\n lb = np.array([0.0, -np.inf])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, np.inf), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, np.array([0.0, -4.084174437334673]),\r\n atol=1e-6)\r\n\r\n lb = np.array([-1, 0])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, np.inf), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, np.array([0.448427311733504, 0]),\r\n atol=1e-15)\r\n\r\n ub = np.array([np.inf, -5])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (-np.inf, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, np.array([-0.105560998682388, -5]))\r\n\r\n ub = np.array([-1, np.inf])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (-np.inf, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, np.array([-1, -4.181102129483254]))\r\n\r\n lb = np.array([0, -4])\r\n ub = np.array([1, 0])\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, np.array([0.005236663400791, -4]))\r\n\r\n def test_dense_rank_deficient(self):\r\n A = np.array([[-0.307, -0.184]])\r\n b = np.array([0.773])\r\n lb = [-0.1, -0.1]\r\n ub = [0.1, 0.1]\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.x, [-0.1, -0.1])\r\n\r\n A = np.array([\r\n [0.334, 0.668],\r\n [-0.516, -1.032],\r\n [0.192, 0.384],\r\n ])\r\n b = np.array([-1.436, 0.135, 0.909])\r\n lb = [0, -1]\r\n ub = [1, -0.5]\r\n for lsq_solver in self.lsq_solvers:\r\n res = lsq_linear(A, b, (lb, ub), method=self.method,\r\n lsq_solver=lsq_solver)\r\n assert_allclose(res.optimality, 0, atol=1e-11)\r\n\r\n def test_full_result(self):\r\n lb = np.array([0, -4])\r\n ub = np.array([1, 0])\r\n res = lsq_linear(A, b, (lb, ub), method=self.method)\r\n\r\n assert_allclose(res.x, [0.005236663400791, -4])\r\n\r\n r = A.dot(res.x) - b\r\n assert_allclose(res.cost, 0.5 * np.dot(r, r))\r\n assert_allclose(res.fun, r)\r\n\r\n assert_allclose(res.optimality, 0.0, atol=1e-12)\r\n assert_equal(res.active_mask, [0, -1])\r\n assert_(res.nit < 15)\r\n assert_(res.status == 1 or res.status == 3)\r\n assert_(isinstance(res.message, str))\r\n assert_(res.success)\r\n\r\n\r\nclass SparseMixin(object):\r\n def test_sparse_and_LinearOperator(self):\r\n m = 5000\r\n n = 1000\r\n A = rand(m, n, random_state=0)\r\n b = self.rnd.randn(m)\r\n res = lsq_linear(A, b)\r\n assert_allclose(res.optimality, 0, atol=1e-6)\r\n\r\n A = aslinearoperator(A)\r\n res = lsq_linear(A, b)\r\n assert_allclose(res.optimality, 0, atol=1e-6)\r\n\r\n def test_sparse_bounds(self):\r\n m = 5000\r\n n = 1000\r\n A = rand(m, n, random_state=0)\r\n b = self.rnd.randn(m)\r\n lb = self.rnd.randn(n)\r\n ub = lb + 1\r\n res = lsq_linear(A, b, (lb, ub))\r\n assert_allclose(res.optimality, 0.0, atol=1e-8)\r\n\r\n res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)\r\n assert_allclose(res.optimality, 0.0, atol=1e-8)\r\n\r\n res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')\r\n assert_allclose(res.optimality, 0.0, atol=1e-8)\r\n\r\n\r\nclass TestTRF(BaseMixin, SparseMixin):\r\n method = 'trf'\r\n lsq_solvers = ['exact', 'lsmr']\r\n\r\n\r\nclass TestBVLS(BaseMixin):\r\n method = 'bvls'\r\n lsq_solvers = ['exact']\r\n\r\n\r\nif __name__ == '__main__':\r\n run_module_suite()\r\n", "# -*- coding: utf-8 -*-\n\"\"\"\nThis example demonstrates writing a custom Node subclass for use with flowcharts.\n\nWe implement a couple of simple image processing nodes.\n\"\"\"\nimport initExample ## Add path to library (just for examples; you do not need this)\n\nfrom pyqtgraph.flowchart import Flowchart, Node\nimport pyqtgraph.flowchart.library as fclib\nfrom pyqtgraph.flowchart.library.common import CtrlNode\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nimport numpy as np\n\napp = QtGui.QApplication([])\n\n## Create main window with a grid layout inside\nwin = QtGui.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: FlowchartCustomNode')\ncw = QtGui.QWidget()\nwin.setCentralWidget(cw)\nlayout = QtGui.QGridLayout()\ncw.setLayout(layout)\n\n## Create an empty flowchart with a single input and output\nfc = Flowchart(terminals={\n 'dataIn': {'io': 'in'},\n 'dataOut': {'io': 'out'} \n})\nw = fc.widget()\n\nlayout.addWidget(fc.widget(), 0, 0, 2, 1)\n\n## Create two ImageView widgets to display the raw and processed data with contrast\n## and color control.\nv1 = pg.ImageView()\nv2 = pg.ImageView()\nlayout.addWidget(v1, 0, 1)\nlayout.addWidget(v2, 1, 1)\n\nwin.show()\n\n## generate random input data\ndata = np.random.normal(size=(100,100))\ndata = 25 * pg.gaussianFilter(data, (5,5))\ndata += np.random.normal(size=(100,100))\ndata[40:60, 40:60] += 15.0\ndata[30:50, 30:50] += 15.0\n#data += np.sin(np.linspace(0, 100, 1000))\n#data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])\n\n## Set the raw data as the input value to the flowchart\nfc.setInput(dataIn=data)\n\n\n## At this point, we need some custom Node classes since those provided in the library\n## are not sufficient. Each node will define a set of input/output terminals, a \n## processing function, and optionally a control widget (to be displayed in the \n## flowchart control panel)\n\nclass ImageViewNode(Node):\n \"\"\"Node that displays image data in an ImageView widget\"\"\"\n nodeName = 'ImageView'\n \n def __init__(self, name):\n self.view = None\n ## Initialize node with only a single input terminal\n Node.__init__(self, name, terminals={'data': {'io':'in'}})\n \n def setView(self, view): ## setView must be called by the program\n self.view = view\n \n def process(self, data, display=True):\n ## if process is called with display=False, then the flowchart is being operated\n ## in batch processing mode, so we should skip displaying to improve performance.\n \n if display and self.view is not None:\n ## the 'data' argument is the value given to the 'data' terminal\n if data is None:\n self.view.setImage(np.zeros((1,1))) # give a blank array to clear the view\n else:\n self.view.setImage(data)\n\n\n\n \n## We will define an unsharp masking filter node as a subclass of CtrlNode.\n## CtrlNode is just a convenience class that automatically creates its\n## control widget based on a simple data structure.\nclass UnsharpMaskNode(CtrlNode):\n \"\"\"Return the input data passed through pg.gaussianFilter.\"\"\"\n nodeName = \"UnsharpMask\"\n uiTemplate = [\n ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'range': [0.0, None]}),\n ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'range': [0.0, None]}),\n ]\n def __init__(self, name):\n ## Define the input / output terminals available on this node\n terminals = {\n 'dataIn': dict(io='in'), # each terminal needs at least a name and\n 'dataOut': dict(io='out'), # to specify whether it is input or output\n } # other more advanced options are available\n # as well..\n \n CtrlNode.__init__(self, name, terminals=terminals)\n \n def process(self, dataIn, display=True):\n # CtrlNode has created self.ctrls, which is a dict containing {ctrlName: widget}\n sigma = self.ctrls['sigma'].value()\n strength = self.ctrls['strength'].value()\n output = dataIn - (strength * pg.gaussianFilter(dataIn, (sigma,sigma)))\n return {'dataOut': output}\n\n\n## To make our custom node classes available in the flowchart context menu,\n## we can either register them with the default node library or make a\n## new library.\n\n \n## Method 1: Register to global default library:\n#fclib.registerNodeType(ImageViewNode, [('Display',)])\n#fclib.registerNodeType(UnsharpMaskNode, [('Image',)])\n\n## Method 2: If we want to make our custom node available only to this flowchart,\n## then instead of registering the node type globally, we can create a new \n## NodeLibrary:\nlibrary = fclib.LIBRARY.copy() # start with the default node set\nlibrary.addNodeType(ImageViewNode, [('Display',)])\n# Add the unsharp mask node to two locations in the menu to demonstrate\n# that we can create arbitrary menu structures\nlibrary.addNodeType(UnsharpMaskNode, [('Image',), \n ('Submenu_test','submenu2','submenu3')])\nfc.setLibrary(library)\n\n\n## Now we will programmatically add nodes to define the function of the flowchart.\n## Normally, the user will do this manually or by loading a pre-generated\n## flowchart file.\n\nv1Node = fc.createNode('ImageView', pos=(0, -150))\nv1Node.setView(v1)\n\nv2Node = fc.createNode('ImageView', pos=(150, -150))\nv2Node.setView(v2)\n\nfNode = fc.createNode('UnsharpMask', pos=(0, 0))\nfc.connectTerminals(fc['dataIn'], fNode['dataIn'])\nfc.connectTerminals(fc['dataIn'], v1Node['data'])\nfc.connectTerminals(fNode['dataOut'], v2Node['data'])\nfc.connectTerminals(fNode['dataOut'], fc['dataOut'])\n\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n" ]
[ [ "numpy.setbufsize", "numpy.all", "numpy.binary_repr", "numpy.where", "numpy.unique", "numpy.zeros", "numpy.testing.assert_raises", "numpy.chararray", "numpy.compat.sixu", "numpy.find_common_type", "numpy.array", "numpy.fromfile", "numpy.indices", "numpy.random.shuffle", "numpy.testing.assert_array_equal", "numpy.float_", "numpy.add", "numpy.object_", "numpy.asarray", "numpy.ndarray", "numpy.concatenate", "numpy.iinfo", "numpy.copyto", "numpy.compat.asbytes_nested", "numpy.reshape", "numpy.frompyfunc", "numpy.argmax", "numpy.float32", "numpy.rec.array", "numpy.random.rand", "numpy.testing.assert_", "numpy.divide.reduce", "numpy.errstate", "numpy.testing.assert_warns", "numpy.add.accumulate", "numpy.testing.run_module_suite", "numpy.maximum", "numpy.rec.fromarrays", "numpy.ones", "numpy.recarray", "numpy.empty", "numpy.take", "numpy.linspace", "numpy.divide.accumulate", "numpy.longdouble", "numpy.typeDict.values", "numpy.zeros_like", "numpy.bool_", "numpy.double", "numpy.testing.assert_equal", "numpy.hstack", "numpy.add.reduce", "numpy.lexsort", "numpy.array_str", "numpy.count_nonzero", "numpy.unicode_", "numpy.testing.assert_array_almost_equal", "numpy.format_parser", "numpy.nonzero", "numpy.char.array", "numpy.transpose", "numpy.single", "numpy.uint64", "numpy.dot", "numpy.exp2", "numpy.string_", "numpy.subtract.accumulate", "numpy.dtype", "numpy.lib.stride_tricks.as_strided", "numpy.random.randn", "numpy.any", "numpy.arange", "numpy.empty_like", "numpy.finfo", "numpy.testing.assert_almost_equal", "numpy.load", "numpy.int_", "numpy.subtract.reduce", "numpy.abs", "numpy.random.seed", "numpy.intp", "numpy.add.outer", "numpy.int32", "numpy.sort", "numpy.compat.asbytes", "numpy.sign", "numpy.random.normal", "numpy.fromstring", "numpy.float64", "numpy.testing.utils._assert_valid_refcount" ], [ "scipy.linalg.svd", "scipy.linalg.lapack.dsbevd", "numpy.sqrt", "scipy.linalg.schur", "numpy.all", "scipy.linalg.decomp._datacopied", "scipy.linalg.lapack.zgbtrs", "numpy.frombuffer", "scipy.linalg.diagsvd", "numpy.outer", "numpy.zeros", "numpy.testing.assert_raises", "numpy.array", "scipy.linalg.qr", "scipy.linalg.lu", "scipy.linalg.lapack.zhbevd", "numpy.testing.assert_array_equal", "numpy.shape", "scipy.linalg.lu_solve", "numpy.asarray", "numpy.seterr", "scipy.linalg.lapack.zgbtrf", "numpy.triu", "scipy.linalg.solve", "numpy.atleast_2d", "scipy.linalg.hessenberg", "numpy.random.rand", "numpy.testing.assert_", "scipy.linalg.svdvals", "scipy.linalg.eig", "numpy.testing.run_module_suite", "numpy.linalg.solve", "numpy.testing.dec.knownfailureif", "numpy.ones", "numpy.bmat", "scipy.linalg.rsf2csf", "scipy.linalg.rq", "numpy.empty", "numpy.diag", "numpy.matrix", "scipy.linalg.eigvals_banded", "numpy.conjugate", "scipy.linalg.lapack.dsbevx", "numpy.testing.assert_equal", "numpy.eye", "scipy.linalg.lapack.zhbevx", "scipy.linalg.eigh", "numpy.testing.assert_array_almost_equal", "scipy.linalg.qz", "scipy.linalg.orth", "scipy.linalg.eigvals", "numpy.transpose", "numpy.argsort", "numpy.conj", "scipy.linalg.eig_banded", "numpy.dot", "scipy.linalg.lapack.dgbtrf", "scipy.linalg.misc.norm", "numpy.testing.TestCase.__init__", "numpy.dtype", "scipy.linalg._testutils.assert_no_overwrite", "scipy.linalg.qr_multiply", "numpy.tril", "scipy.linalg.lapack.dsbev", "numpy.arange", "numpy.linalg.eig", "numpy.testing.assert_almost_equal", "scipy.linalg.ordqz", "scipy._lib.six.xrange", "numpy.identity", "numpy.iscomplex", "scipy.linalg.lu_factor", "scipy.linalg.lapack.dgbtrs", "numpy.random.random", "numpy.abs", "numpy.random.seed", "numpy.isfinite", "numpy.sort", "numpy.sign", "numpy.random.normal" ], [ "numpy.can_cast", "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "numpy.ones", "numpy.asmatrix", "numpy.asanyarray", "scipy._lib.six.xrange", "numpy.ravel", "numpy.array", "numpy.zeros" ], [ "numpy.rollaxis", "numpy.dot", "numpy.diag", "numpy.sqrt", "numpy.asarray", "numpy.max", "numpy.any", "numpy.linalg.LinAlgError", "numpy.square", "numpy.tril_indices", "numpy.linalg.slogdet", "numpy.eye", "scipy.linalg.blas.drot", "scipy.special.multigammaln", "numpy.finfo", "scipy._lib._util.check_random_state", "numpy.copy", "scipy.linalg.lapack.get_lapack_funcs", "numpy.outer", "numpy.zeros", "numpy.log", "numpy.min", "numpy.append", "numpy.identity", "scipy.special.gammaln", "scipy.linalg.misc.LinAlgError", "numpy.sum", "scipy.misc.doccer.docformat", "numpy.asarray_chkfinite", "scipy.special.psi", "numpy.triu_indices", "numpy.ones", "numpy.sign", "numpy.isscalar", "numpy.diag_indices", "numpy.ndindex", "numpy.vstack" ], [ "numpy.testing.Tester" ], [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite", "scipy.optimize.differential_evolution", "scipy.optimize._differentialevolution._make_random_gen", "numpy.arange", "numpy.testing.assert_string_equal", "scipy.optimize._differentialevolution.DifferentialEvolutionSolver", "numpy.all", "numpy.seterr", "numpy.testing.assert_almost_equal", "numpy.size", "numpy.testing.assert_raises", "numpy.argmin", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.array", "numpy.isinf" ], [ "numpy.ma.testutils.assert_", "numpy.sqrt", "scipy.stats.mstats.scoreatpercentile", "scipy.stats.zscore", "scipy.stats.mstats.tsem", "scipy.stats.mstats.ttest_1samp", "numpy.ma.masked_where", "scipy.stats.mstats.obrientransform", "scipy.stats.ttest_rel", "scipy.stats.kurtosistest", "scipy.stats.mstats.hmean", "scipy.stats.obrientransform", "scipy.stats.signaltonoise", "numpy.power", "scipy.stats.tvar", "scipy.stats.pearsonr", "scipy.stats.tmax", "scipy.stats.mstats.gmean", "scipy.stats.mstats.tmin", "scipy.stats.mstats.trimmed_stde", "scipy.stats.kurtosis", "numpy.array", "scipy.stats.mstats.zscore", "scipy.stats.mstats.find_repeats", "scipy.stats.normaltest", "scipy.stats.mstats.trimmed_mean", "scipy.stats.mstats.pearsonr", "scipy.stats.kendalltau", "scipy.stats.mstats.theilslopes", "scipy.stats.mstats.skewtest", "scipy.stats.ttest_ind", "scipy.stats.mstats.f_oneway", "numpy.vstack", "scipy.stats.mstats.pointbiserialr", "numpy.ma.testutils.assert_equal", "numpy.asarray", "scipy.stats.mstats.spearmanr", "numpy.ma.testutils.assert_allclose", "numpy.seterr", "scipy.stats.tmin", "scipy.stats.mstats.mode", "scipy.stats.mstats.winsorize", "scipy.stats.mstats.variation", "scipy.stats.mstats.signaltonoise", "numpy.reshape", "scipy.stats.trimboth", "numpy.ma.arange", "scipy.stats.mstats.kurtosistest", "scipy.stats.skew", "scipy.stats.mstats.kendalltau_seasonal", "scipy.stats.linregress", "numpy.random.rand", "numpy.ma.fix_invalid", "numpy.testing.run_module_suite", "scipy.stats.mstats.ks_twosamp", "numpy.ones", "scipy.stats.mstats.trim", "scipy.stats.sem", "numpy.linspace", "scipy.stats.zmap", "numpy.ma.testutils.assert_raises", "scipy.stats.mstats.kendalltau", "scipy.stats.tmean", "numpy.ma.testutils.assert_almost_equal", "scipy.stats.mstats.describe", "scipy.stats.describe", "scipy.stats.mstats.kurtosis", "scipy.stats.moment", "scipy.stats.tsem", "numpy.isnan", "scipy.stats.skewtest", "numpy.ma.testutils.assert_array_almost_equal", "scipy.stats.mstats.tvar", "scipy.stats.mstats.betai", "scipy.stats.mstats.zmap", "scipy.stats.mstats.sem", "scipy.stats.mstats.friedmanchisquare", "scipy.stats.mstats.moment", "scipy.stats.ttest_1samp", "scipy.stats.mstats.trimtail", "scipy.stats.mstats.kruskal", "scipy.stats.mstats.ttest_ind", "scipy.stats.mstats.tmax", "scipy.stats.mstats.mquantiles", "scipy.stats.mstats.tmean", "scipy.stats.mstats.linregress", "numpy.testing.TestCase.__init__", "scipy.stats.mstats.ttest_rel", "numpy.random.randn", "numpy.ma.array", "scipy.stats.spearmanr", "scipy.stats.find_repeats", "scipy.stats.mstats.mannwhitneyu", "numpy.arange", "scipy.stats.betai", "scipy.stats.mstats.rankdata", "scipy.stats.mstats.skew", "scipy.stats.mstats.normaltest", "scipy.stats.mstats.trimboth", "numpy.random.random", "numpy.abs", "numpy.random.seed", "scipy.stats.rankdata", "scipy.stats.variation", "numpy.sort" ], [ "numpy.testing.Tester" ], [ "numpy.arange", "numpy.eye", "scipy.ndimage.measurements.label", "numpy.identity", "scipy.ndimage.measurements.find_objects", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "scipy.sparse.linalg.interface.aslinearoperator", "scipy.sparse.linalg.interface.LinearOperator", "numpy.asmatrix", "numpy.asanyarray", "numpy.array", "numpy.zeros", "scipy.sparse.linalg.interface.IdentityOperator" ], [ "numpy.array", "scipy.optimize.root", "numpy.testing.assert_" ], [ "scipy.special.ivp", "scipy.special._ufuncs.i1", "scipy.special.k1e", "scipy.special._ufuncs.ndtri", "scipy.special.hankel1", "numpy.sin", "scipy.special.berp", "scipy.special.it2j0y0", "scipy.special._testutils.assert_tol_equal", "scipy.special._ufuncs.gammasgn", "scipy.special._ufuncs.dawsn", "scipy.special._ufuncs.chndtrinc", "scipy.special.pbdv", "scipy.special._ufuncs.pdtri", "scipy.special.polygamma", "scipy.special.sph_jn", "scipy.special.ellipe", "scipy.special.kvp", "scipy.special._ufuncs.nbdtri", "scipy.special.kerp_zeros", "scipy.special.agm", "scipy.special.genlaguerre", "scipy.special.fresnelc_zeros", "numpy.iinfo", "scipy.special._ufuncs.yve", "scipy.special._ufuncs.shichi", "scipy.special.y1p_zeros", "scipy.special.riccati_jn", "scipy.special.hankel2", "scipy.special._ufuncs.cbrt", "scipy.special._ufuncs.itairy", "scipy.special._ufuncs.rgamma", "scipy.special._ufuncs.kelvin", "scipy.special._ufuncs.pdtrik", "scipy.special._ufuncs.bdtrin", "scipy.special._ufuncs.jv", "scipy.special._ufuncs.obl_cv", "scipy.special._ufuncs.chndtrix", "numpy.tan", "scipy.special.kelvin", "numpy.random.rand", "scipy.special.xlogy", "scipy.special._ufuncs.hyp2f0", "scipy.special._ufuncs.pbdv", "scipy.special.sinc", "scipy.special._ufuncs.modstruve", "scipy.special._ufuncs.round", "scipy.special.ellipeinc", "scipy.special._ufuncs.chndtr", "numpy.sinh", "scipy.special.comb", "numpy.vectorize", "scipy.special._ufuncs.exp2", "scipy.special._ufuncs.nrdtrisd", "scipy.special.sph_in", "scipy.special.pdtrc", "scipy.special.jve", "scipy.special.nbdtrc", "scipy.special._ufuncs.pro_ang1", "scipy.special._ufuncs.chdtri", "scipy.special.ker", "scipy.special.gamma", "scipy.special._ufuncs.chdtrc", "scipy.special.ellipkinc", "numpy.isnan", "scipy.special.chdtri", "scipy.special._ufuncs.k1", "scipy.special._ufuncs.mathieu_modcem2", "scipy.special._ufuncs.kv", "scipy.special._ufuncs.btdtrib", "scipy.special.struve", "scipy.special.y0", "scipy.special.cosm1", "scipy.special.hyp2f1", "scipy.special.lpmv", "scipy.special.expm1", "numpy.random.pareto", "scipy.special.i1", "scipy.special._ufuncs.cotdg", "scipy.special._ufuncs.kn", "scipy.special._ufuncs.sici", "scipy.special._ufuncs.kolmogi", "numpy.testing.assert_almost_equal", "scipy.special._ufuncs.btdtria", "scipy.special._ufuncs.erf", "scipy.special.ellipj", "scipy.special._ufuncs.bei", "scipy.special.sindg", "numpy.testing.assert_array_almost_equal_nulp", "scipy.special._ufuncs.kei", "scipy.special._ufuncs.obl_rad1_cv", "scipy.special._ufuncs.itmodstruve0", "scipy.special._ufuncs.yn", "numpy.isfinite", "scipy.special.chdtr", "scipy.special._ufuncs.betaincinv", "numpy.row_stack", "scipy.special._ufuncs.spence", "scipy.special.ber_zeros", "scipy.special.berp_zeros", "scipy.special.rgamma", "scipy.special.pdtr", "numpy.testing.assert_approx_equal", "scipy.special._ufuncs.binom", "scipy.special.k0e", "scipy.special._ufuncs.kve", "scipy.special._ufuncs.sindg", "numpy.square", "scipy.special._ufuncs.erfi", "scipy.special.nbdtr", "scipy.special._ufuncs.fresnel", "scipy.special.dawsn", "scipy.special._ufuncs.airye", "scipy.special.airy", "numpy.zeros", "numpy.log", "scipy.special.exp10", "scipy.special._ufuncs.iv", "scipy.special._ufuncs.chdtr", "scipy.special._ufuncs.hyperu", "scipy.special._ufuncs.obl_rad2", "scipy.special.yn_zeros", "scipy.special._ufuncs.it2j0y0", "scipy.special.smirnov", "numpy.nan_to_num", "numpy.seterr", "scipy.special.h1vp", "scipy.special.kn", "scipy.special._ufuncs.beta", "scipy.special._ufuncs.k0e", "scipy.special.yn", "scipy.special._ufuncs.gdtrc", "scipy.special.erfcx", "scipy.special.erfc", "scipy.special.jvp", "scipy.special.jnp_zeros", "scipy.special.exp2", "scipy.special.log1p", "scipy.special.jnjnp_zeros", "numpy.testing.run_module_suite", "numpy.testing.dec.knownfailureif", "numpy.isposinf", "scipy.special.legendre", "scipy.special._ufuncs.nbdtrik", "scipy.special._ufuncs.nctdtridf", "scipy.special.lmbda", "scipy.special._ufuncs.tklmbda", "numpy.linspace", "scipy.special.gammainc", "scipy.special._ufuncs.modfresnelm", "scipy.special._ufuncs.i1e", "scipy.special._ufuncs.exp1", "scipy.special._ufuncs.ellipeinc", "numpy.testing.assert_equal", "scipy.special._ufuncs.hyp3f0", "scipy.special.lqmn", "scipy.special._ufuncs.chdtriv", "scipy.special._ufuncs.yv", "scipy.special.clpmn", "scipy.special.obl_cv_seq", "scipy.special.itj0y0", "scipy.special.fresnel", "scipy.special._ufuncs.nbdtrin", "scipy.special.perm", "scipy.special.it2i0k0", "numpy.cos", "scipy.special.tandg", "scipy.special.bdtr", "scipy.special._ufuncs.bdtrc", "scipy.special.keip", "scipy.special._ufuncs.gammaincc", "scipy.special._ufuncs.kolmogorov", "scipy.special.hyp2f0", "numpy.random.randn", "scipy.special.smirnovi", "scipy.special.lpn", "scipy.special._ufuncs.zetac", "numpy.finfo", "scipy.special.euler", "scipy.special.erf_zeros", "scipy.special.hankel1e", "scipy.special._ufuncs.pdtrc", "scipy.special.yve", "scipy.special.erf", "scipy.special.factorial2", "scipy.special.hyp1f1", "scipy.special._ufuncs.hankel2e", "scipy.special._ufuncs.stdtrit", "scipy.special._ufuncs.nctdtrinc", "scipy.special.betaln", "scipy.special.bdtri", "scipy.special.lqn", "scipy.special._ufuncs.exp10", "scipy.special._ufuncs.itj0y0", "numpy.random.random", "scipy.special._ufuncs.nctdtrit", "scipy.special.kelvin_zeros", "scipy.special._ufuncs.betaln", "scipy.special.expn", "scipy.special._ufuncs.y0", "scipy.special.j1", "scipy.special.pdtri", "numpy.sqrt", "scipy.special.airye", "scipy.special.erfinv", "scipy.special.hyp0f1", "scipy.special.chdtrc", "scipy.special._ufuncs.jve", "scipy.special.yv", "scipy.special._ufuncs.btdtri", "scipy.special.beip", "scipy.special._ufuncs.y1", "numpy.log1p", "scipy.special._ufuncs.obl_rad1", "scipy.special._ufuncs.beip", "scipy.special._ufuncs.pro_cv", "scipy.special._ufuncs.stdtridf", "scipy.special.gammaln", "scipy.special._ufuncs.fdtrc", "scipy.special._ufuncs.ncfdtri", "scipy.special.psi", "scipy.special._ufuncs.keip", "numpy.testing.assert_array_equal", "scipy.special.ai_zeros", "scipy.special._ufuncs.mathieu_a", "scipy.special._ufuncs.mathieu_modcem1", "scipy.special.ive", "scipy.special._ufuncs.gdtrib", "scipy.special._ufuncs.ber", "scipy.special._ufuncs.gdtrix", "numpy.arctan", "numpy.nextafter", "scipy.special._ufuncs.pro_ang1_cv", "scipy.special.sph_inkn", "scipy.special._ufuncs.nrdtrimn", "scipy.special._ufuncs.expm1", "numpy.argmax", "scipy.special._ufuncs.radian", "scipy.special._ufuncs.stdtr", "scipy.special._ufuncs.smirnovi", "scipy.special.pbdn_seq", "scipy.special.cotdg", "scipy.special.betainc", "scipy.special.diric", "scipy.special.laguerre", "numpy.cosh", "scipy.special.riccati_yn", "numpy.int64", "scipy.special._ufuncs.it2i0k0", "scipy.special.k0", "scipy.special.ker_zeros", "scipy.special.jn", "scipy.special._ufuncs.hankel2", "scipy.special._ufuncs.k1e", "scipy.special._ufuncs.ncfdtr", "scipy.special._ufuncs.pro_rad2_cv", "scipy.special._ufuncs.mathieu_b", "scipy.special._ufuncs.it2struve0", "scipy.special.yvp", "scipy.special.ellipk", "scipy.special._ufuncs.ndtr", "scipy.special.bei", "scipy.special.k1", "scipy.special._ufuncs.pbwa", "scipy.special._ufuncs.obl_ang1_cv", "scipy.special.sph_kn", "scipy.special.gammainccinv", "numpy.testing.assert_array_almost_equal", "scipy.special.fresnels_zeros", "numpy.logspace", "scipy.special.i0", "scipy.special._ufuncs._gammaln", "scipy.special._ufuncs.fdtr", "numpy.testing.assert_allclose", "numpy.broadcast_arrays", "scipy.special.ber", "scipy.special._ufuncs.itstruve0", "scipy.special._ufuncs.bdtri", "scipy.special.sph_yn", "scipy.special.round", "scipy.special.kei_zeros", "scipy.special.erfi", "scipy.special.kv", "scipy.special.zeta", "scipy.special._ufuncs.iti0k0", "scipy.special._ufuncs.cosdg", "scipy.special.cosdg", "numpy.dtype", "scipy.special.hankel2e", "scipy.special._ufuncs.airy", "scipy.special._ufuncs.kerp", "scipy.special._ufuncs.hyp1f2", "scipy.special.pbvv", "scipy.special.keip_zeros", "scipy.special.betaincinv", "scipy.special._ufuncs.ive", "scipy.special.h2vp", "scipy.special.pbdv_seq", "scipy.special._ufuncs.j0", "scipy.special._ufuncs.pbvv", "scipy.special._ufuncs.pdtr", "scipy.special._ufuncs.besselpoly", "scipy.special._ufuncs.mathieu_modsem2", "scipy.special._ufuncs.modfresnelp", "scipy.special._ufuncs.pro_rad2", "scipy.special._ufuncs.fdtri", "scipy.special._ufuncs.bdtr", "scipy.special.huber", "scipy.special._ufuncs.hankel1e", "scipy.special.bi_zeros", "scipy.special._ufuncs.berp", "scipy.special._testutils.assert_func_equal", "scipy.special.assoc_laguerre", "numpy.exp", "scipy.special.i0e", "scipy.special._ufuncs.betainc", "scipy.special.sph_harm", "scipy.special.kei", "scipy.special.fresnel_zeros", "numpy.real", "scipy.special.iv", "scipy.special.i1e", "scipy.special.iti0k0", "scipy.special._ufuncs.k0", "scipy.special.factorialk", "numpy.testing.assert_raises", "numpy.array", "scipy.special._ufuncs.gdtr", "scipy.special._ufuncs.gammainc", "numpy.isinf", "scipy.special.gammaincinv", "numpy.asarray", "scipy.special._ufuncs.gammainccinv", "scipy.special._ufuncs.smirnov", "scipy.special.y0_zeros", "scipy.special.jacobi", "scipy.special._ufuncs.obl_rad2_cv", "scipy.special.gammaincc", "scipy.special._ufuncs.mathieu_sem", "scipy.special.y1", "scipy.special.beip_zeros", "scipy.special.factorial", "scipy.special._ufuncs.erfc", "scipy.special._ufuncs.obl_ang1", "scipy.special._ufuncs.mathieu_cem", "scipy.special._ufuncs.mathieu_modsem1", "scipy.special.nbdtri", "scipy.special._ufuncs.ker", "scipy.special._ufuncs.ncfdtridfn", "scipy.special._ufuncs.ellipj", "scipy.special._ufuncs.gamma", "numpy.testing.assert_", "scipy.special._ufuncs.pro_rad1_cv", "scipy.special.sph_jnyn", "scipy.special.bernoulli", "scipy.special._ufuncs.gdtria", "scipy.special._ufuncs.i0e", "scipy.special._ufuncs.j1", "scipy.special.cbrt", "scipy.special._ufuncs.jn", "scipy.special.kve", "numpy.random.randint", "scipy.special.y1_zeros", "scipy.special._ufuncs.ellipkinc", "scipy.special.lpmn", "scipy.special._ufuncs.psi", "scipy.special.kerp", "scipy.special._ufuncs.nbdtr", "scipy.special.jv", "scipy.special._ufuncs.fdtridfd", "scipy.special.ellipkm1", "scipy.special.errprint", "scipy.special.bei_zeros", "scipy.special._ufuncs.chndtridf", "scipy.special.beta", "scipy.special._ufuncs.ncfdtridfd", "scipy.special._ufuncs.hankel1", "scipy.special._ufuncs.hyp1f1", "scipy.special._ufuncs.nctdtr", "scipy.special._ufuncs.i0", "scipy.special.jn_zeros", "scipy.special.mathieu_even_coef", "scipy.special._ufuncs.tandg", "scipy.special._ufuncs.ncfdtrinc", "scipy.special.ynp_zeros", "scipy.special._ufuncs.ellipe", "scipy._lib._version.NumpyVersion", "numpy.ones_like", "numpy.arange", "scipy.special._ufuncs.pro_rad1", "scipy.special._ufuncs.expi", "scipy.special._ufuncs.expn", "scipy.special._ufuncs.nbdtrc", "scipy.special._ufuncs.cosm1", "scipy.special._ufuncs.bdtrik", "scipy.special.pro_cv_seq", "scipy.special.hyperu", "scipy.special._ufuncs.btdtr", "scipy.special.radian", "scipy.special.j0", "scipy.special._ufuncs.struve", "scipy.special.bdtrc", "numpy.abs", "numpy.random.seed", "scipy.special._ufuncs.hyp2f1", "scipy.special._ufuncs.lpmv", "scipy.special.wofz", "scipy.special.erfcinv", "scipy.special.jnyn_zeros" ], [ "numpy.convolve", "numpy.rollaxis", "numpy.square", "numpy.linalg.eigvals", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.arange", "numpy.cos", "numpy.sort", "numpy.ones", "numpy.all", "numpy.linalg.lstsq", "numpy.finfo", "numpy.iterable", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.issubdtype", "scipy.fftpack.fft", "numpy.random.randn", "scipy.fftpack.fftfreq", "numpy.exp", "scipy.fftpack.ihilbert", "numpy.testing.assert_equal", "scipy.fftpack.diff", "numpy.arange", "numpy.sin", "numpy.testing.assert_array_almost_equal", "scipy.fftpack.ifft", "scipy.fftpack.tilbert", "numpy.tanh", "numpy.sum", "numpy.testing.run_module_suite", "scipy.fftpack.hilbert", "numpy.random.random", "numpy.random.seed", "scipy.fftpack.itilbert", "numpy.cos", "numpy.sign" ], [ "numpy.dot", "scipy.linalg.svd", "numpy.asarray", "numpy.issubdtype", "scipy._lib._util._asarray_validated", "numpy.dtype", "numpy.any", "numpy.square", "numpy.ones_like", "numpy.greater", "numpy.eye", "numpy.finfo", "numpy.atleast_1d", "scipy._lib._util.getargspec_no_self", "numpy.zeros", "scipy._lib._util._lazywhere", "numpy.transpose", "numpy.sum", "numpy.asarray_chkfinite", "numpy.abs", "numpy.isfinite", "numpy.shape" ], [ "numpy.sqrt", "scipy.linalg.invhilbert", "scipy.linalg.hilbert", "scipy.fftpack.fft", "numpy.testing.assert_equal", "scipy.linalg.hadamard", "scipy.linalg.dft", "numpy.arange", "numpy.eye", "numpy.linalg.cond", "numpy.copy", "scipy.linalg.tri", "scipy.linalg.hankel", "scipy.linalg.leslie", "scipy.linalg.companion", "scipy.linalg.kron", "scipy.linalg.triu", "scipy._lib.six.xrange", "numpy.testing.assert_raises", "numpy.array", "scipy.linalg.tril", "numpy.testing.run_module_suite", "scipy.linalg.toeplitz", "scipy.linalg.block_diag", "numpy.add.outer", "numpy.ones", "numpy.testing.assert_array_equal", "scipy.linalg.helmert", "scipy.special.comb", "scipy.linalg.circulant", "scipy.linalg.invpascal", "scipy.linalg.pascal" ], [ "numpy.dot", "numpy.hstack", "numpy.reshape", "numpy.linalg.inv", "numpy.eye", "numpy.asarray", "numpy.linalg.LinAlgError" ], [ "numpy.resize", "numpy.recarray.__new__", "numpy.asarray", "numpy.ndarray.__new__", "numpy.ma.getdata", "numpy.ma.default_fill_value", "numpy.dtype", "numpy.ma.make_mask_descr", "numpy.core.records.fromarrays", "numpy.ma.array", "numpy.core.records.fromrecords", "numpy.ma.getmaskarray", "numpy.ma.MaskedArray.__setitem__", "numpy.reshape", "numpy.size", "numpy.ndarray.view", "numpy.ma.make_mask_none", "numpy.array", "numpy.recarray", "numpy.ndarray.__getattribute__", "numpy.ma.filled", "numpy.ma.MAError", "numpy.ma.masked_array", "numpy.ndarray.__setstate__", "numpy.empty" ], [ "scipy.spatial.distance.is_valid_y", "numpy.asarray", "numpy.arange", "numpy.unique", "scipy._lib.six.xrange", "scipy.spatial.distance.pdist", "numpy.random.rand", "numpy.searchsorted", "numpy.floor", "scipy.spatial.distance.num_obs_y", "numpy.array", "numpy.issubsctype", "numpy.zeros", "numpy.where" ], [ "numpy.convolve", "numpy.rollaxis", "numpy.square", "numpy.linalg.eigvals", "numpy.asarray", "numpy.sort", "numpy.ones", "numpy.all", "numpy.linalg.lstsq", "numpy.finfo", "numpy.iterable", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.testing.Tester" ], [ "scipy.weave.inline_tools.inline" ], [ "scipy.special.sinc", "numpy.hstack", "scipy.linalg.pinv", "numpy.fft.irfft", "scipy.linalg.toeplitz", "numpy.linspace", "numpy.sinc", "numpy.asarray", "numpy.arange", "numpy.cos", "numpy.finfo", "numpy.atleast_1d", "numpy.diff", "numpy.interp", "scipy.linalg.hankel", "numpy.exp", "numpy.sum" ], [ "numpy.log", "scipy.special.gamma", "numpy.meshgrid", "numpy.logspace", "scipy.special.loggamma", "scipy.special._testutils.FuncData", "scipy.special.gammaln", "numpy.array" ], [ "numpy.testing.Tester" ], [ "numpy.linspace", "numpy.random.multivariate_normal", "scipy.stats.gaussian_kde", "numpy.random.randn", "scipy.stats.gaussian_kde.__init__", "numpy.arange", "numpy.linalg.det", "numpy.testing.assert_almost_equal", "numpy.testing.assert_array_almost_equal", "numpy.log", "numpy.power", "numpy.testing.assert_array_almost_equal_nulp", "numpy.linalg.inv", "numpy.atleast_2d", "numpy.testing.assert_raises", "numpy.array", "numpy.sum", "numpy.testing.run_module_suite", "scipy.stats.norm.pdf", "numpy.random.seed", "numpy.dstack" ], [ "numpy.hstack", "numpy.nonzero", "numpy.unique", "numpy.asarray", "numpy.cumsum", "numpy.concatenate", "numpy.asanyarray", "numpy.diff", "numpy.empty" ], [ "numpy.testing.run_module_suite", "scipy.linalg.cython_lapack._test_dlamch", "scipy.linalg.lapack.slamch", "scipy.linalg.lapack.dlamch", "scipy.linalg.cython_lapack._test_slamch" ], [ "scipy.sparse.csc_matrix", "scipy.io.harwell_boeing._fortran_format_parser.FortranFormatParser", "numpy.abs", "numpy.max", "numpy.fromstring", "numpy.isrealobj" ], [ "numpy.distutils.misc_util.get_cmd" ], [ "numpy.testing.assert_equal", "numpy.dot", "numpy.testing.run_module_suite", "scipy.sparse.rand", "numpy.linalg.lstsq", "scipy.sparse.linalg.aslinearoperator", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.array", "scipy.optimize.lsq_linear", "numpy.random.RandomState" ], [ "numpy.random.normal", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.18", "0.17", "0.19" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.10", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.10", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.4", "1.3", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.12" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.18", "0.19" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.7", "1.0", "0.17", "1.2", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NunoEdgarGFlowHub/torchbearer
[ "940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0", "940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0", "940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0" ]
[ "torchbearer/callbacks/weight_decay.py", "torchbearer/callbacks/between_class.py", "tests/metrics/test_aggregators.py" ]
[ "import torchbearer\n\nfrom torchbearer.callbacks import Callback\n\nimport torch\n\n\nclass WeightDecay(Callback):\n \"\"\"Create a WeightDecay callback which uses the given norm on the given parameters and with the given decay rate.\n If params is None (default) then the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model\n >>> decay = WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n p (int): The norm level\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, p=2, params=None):\n super(WeightDecay, self).__init__()\n\n self.p = p\n self.params = params\n self.rate = rate\n\n def on_start(self, state):\n \"\"\"Retrieve params from state['model'] if required.\n\n Args:\n state (dict): The :class:`.Trial` state\n \"\"\"\n if self.params is None:\n self.params = state[torchbearer.MODEL].parameters()\n\n def on_criterion(self, state):\n \"\"\"Calculate the decay term and add to state['loss'].\n\n Args:\n state (dict): The :class:`.Trial` state\n \"\"\"\n for param in self.params:\n state[torchbearer.LOSS] += self.rate * torch.norm(param, self.p)\n\n\nclass L1WeightDecay(WeightDecay):\n \"\"\"WeightDecay callback which uses an L1 norm with the given rate and parameters. If params is None (default) then\n the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import L1WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model using an L1 norm\n >>> decay = L1WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, params=None):\n super(L1WeightDecay, self).__init__(rate=rate, p=1, params=params)\n\n\nclass L2WeightDecay(WeightDecay):\n \"\"\"WeightDecay callback which uses an L2 norm with the given rate and parameters. If params is None (default) then\n the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import L2WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model using an L2 norm\n >>> decay = L2WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, params=None):\n super(L2WeightDecay, self).__init__(rate=rate, p=2, params=params)\n", "import torchbearer\nfrom torchbearer import Callback\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Beta\n\nfrom torchbearer.bases import cite\n\nbc = \"\"\"\n@inproceedings{tokozume2018between,\n title={Between-class learning for image classification},\n author={Tokozume, Yuji and Ushiku, Yoshitaka and Harada, Tatsuya},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={5486--5494},\n year={2018}\n}\n\"\"\"\n\n\n@cite(bc)\nclass BCPlus(Callback):\n \"\"\"BC+ callback which mixes images by treating them as waveforms. For standard BC, see :class:`.Mixup`.\n This callback can optionally convert labels to one hot before combining them according to the lambda parameters,\n sampled from a beta distribution, use alpha=1 to replicate the paper. Use with :meth:`BCPlus.bc_loss` or set\n `mixup_loss = True` and use :meth:`.Mixup.mixup_loss`.\n\n .. note::\n\n This callback first sets all images to have zero mean. Consider adding an offset (e.g. 0.5) back before\n visualising.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import BCPlus\n\n # Example Trial which does BCPlus regularisation\n >>> bcplus = BCPlus(classes=10)\n >>> trial = Trial(None, criterion=BCPlus.bc_loss, callbacks=[bcplus], metrics=['acc'])\n\n Args:\n mixup_loss (bool): If True, the lambda and targets will be stored for use with the mixup loss function.\n alpha (float): The alpha value for the beta distribution.\n classes (int): The number of classes for conversion to one hot.\n\n State Requirements:\n - :attr:`torchbearer.state.X`: State should have the current data stored and correctly normalised\n - :attr:`torchbearer.state.Y_TRUE`: State should have the current data stored\n \"\"\"\n\n def __init__(self, mixup_loss=False, alpha=1, classes=-1):\n super(BCPlus, self).__init__()\n self.mixup_loss = mixup_loss\n self.classes = classes\n self.dist = Beta(torch.tensor([float(alpha)]), torch.tensor([float(alpha)]))\n\n @staticmethod\n def bc_loss(state):\n \"\"\"The KL divergence between the outputs of the model and the ratio labels. Model ouputs should be un-normalised\n logits as this function performs a log_softmax.\n\n Args:\n state: The current :class:`Trial` state.\n \"\"\"\n prediction, target = state[torchbearer.Y_PRED], state[torchbearer.Y_TRUE]\n\n entropy = - (target[target.nonzero().split(1, dim=1)] * target[target.nonzero().split(1, dim=1)].log()).sum()\n cross = - (target * F.log_softmax(prediction, dim=1)).sum()\n\n return (cross - entropy) / prediction.size(0)\n\n def _to_one_hot(self, target):\n if target.dim() == 1:\n target = target.unsqueeze(1)\n one_hot = torch.zeros_like(target).repeat(1, self.classes)\n one_hot.scatter_(1, target, 1)\n return one_hot\n return target.float()\n\n def on_sample(self, state):\n super(BCPlus, self).on_sample(state)\n\n lam = self.dist.sample().to(state[torchbearer.DEVICE])\n\n permutation = torch.randperm(state[torchbearer.X].size(0))\n\n batch1 = state[torchbearer.X]\n batch1 = batch1 - batch1.view(batch1.size(0), -1).mean(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))\n g1 = batch1.view(batch1.size(0), -1).std(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))\n\n batch2 = batch1[permutation]\n g2 = g1[permutation]\n\n p = 1. / (1 + ((g1 / g2) * ((1 - lam) / lam)))\n\n state[torchbearer.X] = (batch1 * p + batch2 * (1 - p)) / (p.pow(2) + (1 - p).pow(2)).sqrt()\n\n if not self.mixup_loss:\n target = self._to_one_hot(state[torchbearer.TARGET]).float()\n state[torchbearer.Y_TRUE] = lam * target + (1 - lam) * target[permutation]\n else:\n state[torchbearer.MIXUP_LAMBDA] = lam\n state[torchbearer.MIXUP_PERMUTATION] = permutation\n state[torchbearer.Y_TRUE] = (state[torchbearer.Y_TRUE], state[torchbearer.Y_TRUE][state[torchbearer.MIXUP_PERMUTATION]])\n\n def on_sample_validation(self, state):\n super(BCPlus, self).on_sample_validation(state)\n if not self.mixup_loss:\n state[torchbearer.TARGET] = self._to_one_hot(state[torchbearer.TARGET]).float()\n", "import unittest\n\nfrom mock import Mock, call\n\nfrom torchbearer.metrics import RunningMean, Metric, RunningMetric, Mean, Std, Var\n\nimport torch\n\n\nclass TestVar(unittest.TestCase):\n def test_variance_dim(self):\n var = Var('test', dim=0)\n var.process(torch.Tensor([[1., 2.], [3., 4.]]))\n var.process(torch.Tensor([[4., 3.], [2., 1.]]))\n var.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = var.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 1.6000) < 0.0001)\n\n\nclass TestStd(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([0.1, 0.2, 0.3]),\n torch.FloatTensor([0.4, 0.5, 0.6]),\n torch.FloatTensor([0.7, 0.8, 0.9]),\n torch.ones(torch.Size([]))]\n\n self._std = Std('test', unbiased=False)\n self._std.reset({})\n self._target = 0.31622776601684\n\n def test_train(self):\n self.setUp()\n self._std.train()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_validate(self):\n self.setUp()\n self._std.eval()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_precision_error(self):\n self.setUp()\n self._std.train()\n val = torch.tensor([0.55])\n for i in range(2):\n self._std.process(val)\n\n result = self._std.process_final({})\n self.assertEqual(0, result)\n\n def setUpMoreDims(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),\n torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),\n torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),\n torch.ones(torch.Size([]))]\n self._std = Std('test', unbiased=False)\n self._std.reset({})\n self._target = 0.57662804083742\n\n def test_more_dims(self):\n self.setUpMoreDims()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_std_dim(self):\n std = Std('test', dim=0)\n std.process(torch.Tensor([[1., 2.], [3., 4.]]))\n std.process(torch.Tensor([[4., 3.], [2., 1.]]))\n std.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = std.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 1.2649) < 0.0001)\n\n\nclass TestMean(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([0.1, 0.2, 0.3]),\n torch.FloatTensor([0.4, 0.5, 0.6]),\n torch.FloatTensor([0.7, 0.8, 0.9]),\n torch.ones(torch.Size([]))]\n\n self._mean = Mean('test')\n self._mean.reset({})\n self._target = 0.5\n\n def test_train_dict(self):\n self.setUp()\n self._mean.train()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_validate_dict(self):\n self.setUp()\n self._mean.eval()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def setUpMoreDims(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),\n torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),\n torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),\n torch.ones(torch.Size([]))]\n self._mean = Mean('test')\n self._mean.reset({})\n self._target = 0.95\n\n def test_more_dims(self):\n self.setUpMoreDims()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_mean_dim(self):\n mean = Mean('test', dim=0)\n mean.process(torch.Tensor([[1., 2.], [3., 4.]]))\n mean.process(torch.Tensor([[4., 3.], [2., 1.]]))\n mean.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = mean.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 2.0) < 0.0001)\n\n\nclass TestRunningMetric(unittest.TestCase):\n def setUp(self):\n self._metric = RunningMetric('test', batch_size=5, step_size=5)\n self._metric.reset({})\n self._metric._process_train = Mock(return_value=3)\n self._metric._step = Mock(return_value='output')\n\n def test_train_called_with_state(self):\n self._metric.train()\n self._metric.process({'test': -1})\n self._metric._process_train.assert_called_with({'test': -1})\n\n def test_cache_one_step(self):\n self._metric.train()\n for i in range(6):\n self._metric.process({})\n self._metric._step.assert_has_calls([call([3]), call([3, 3, 3, 3, 3])])\n\n def test_empty_methods(self):\n metric = RunningMetric('test')\n self.assertRaises(NotImplementedError, lambda: metric._step(['test']) is None)\n self.assertRaises(NotImplementedError, lambda: metric._process_train(['test']) is None)\n\n\nclass TestRunningMean(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._mean = RunningMean('test')\n self._cache = [torch.Tensor([1.0]), torch.Tensor([1.5]), torch.Tensor([2.0])]\n self._target = 1.5\n\n def test_train(self):\n result = self._mean._process_train(torch.FloatTensor([1.0, 1.5, 2.0]))\n self.assertAlmostEqual(self._target, result, 3, 0.002)\n\n def test_step(self):\n result = self._mean._step(self._cache)\n self.assertEqual(self._target, result)\n\n def test_dims(self):\n mean = RunningMean('test', dim=0)\n cache = [mean._process_train(torch.Tensor([[1., 2.], [3., 4.]])),\n mean._process_train(torch.Tensor([[4., 3.], [2., 1.]])),\n mean._process_train(torch.Tensor([[1., 1.], [1., 1.]]))]\n\n res = mean._step(cache)\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 2.0) < 0.0001)\n" ]
[ [ "torch.norm" ], [ "torch.zeros_like", "torch.nn.functional.log_softmax" ], [ "torch.Size", "torch.FloatTensor", "torch.Tensor", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
datalayer-externals/papermill-scrapbook
[ "911220a26c7f6606f6370a75a4cdac4284675bdc" ]
[ "scrapbook/models.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmodels.py\n\nProvides the various model wrapper objects for scrapbook\n\"\"\"\nfrom __future__ import unicode_literals\nimport os\nimport copy\nimport nbformat\nimport collections\nimport pandas as pd\n\nfrom six import string_types\nfrom collections import OrderedDict\nfrom IPython.display import display as ip_display, Markdown\n\n# We lean on papermill's readers to connect to remote stores\nfrom papermill.iorw import papermill_io\n\nfrom .scraps import Scrap, Scraps, payload_to_scrap, scrap_to_payload\nfrom .schemas import GLUE_PAYLOAD_PREFIX, RECORD_PAYLOAD_PREFIX\nfrom .encoders import registry as encoder_registry\nfrom .exceptions import ScrapbookException\nfrom .utils import kernel_required, deprecated\n\ntry:\n from urllib.parse import urlparse # Py3\nexcept ImportError:\n from urlparse import urlparse # Py2\n\n\ndef merge_dicts(dicts):\n iterdicts = iter(dicts)\n outcome = next(iterdicts).copy()\n for d in iterdicts:\n outcome.update(d)\n return outcome\n\n\nclass Notebook(object):\n \"\"\"\n Representation of a notebook. This model is quasi-compatible with the\n nbformat NotebookNode object in that it support access to the v4\n required fields from nbformat's json schema. For complete access to\n normal nbformat operations, use the `node` attribute of this model.\n\n Parameters\n ----------\n node_or_path : `nbformat.NotebookNode`, str\n a notebook object, or a path to a notebook object\n \"\"\"\n\n def __init__(self, node_or_path):\n if isinstance(node_or_path, string_types):\n path = urlparse(node_or_path).path\n if not os.path.splitext(path)[-1].endswith('ipynb'):\n raise Warning(\n \"Requires an '.ipynb' file extension. Provided path: '{}'\".format(\n node_or_path\n )\n )\n self.path = node_or_path\n self.node = nbformat.reads(papermill_io.read(node_or_path), as_version=4)\n else:\n self.path = \"\"\n self.node = node_or_path\n\n # Memoized traits\n self._scraps = None\n self._outputs = None\n\n def copy(self):\n cp = Notebook(self.node.copy())\n cp.path = self.path\n return cp\n\n # nbformat mirroring properties\n @property\n def metadata(self):\n return self.node.metadata\n\n @property\n def nbformat_minor(self):\n return self.node.nbformat_minor\n\n @property\n def nbformat(self):\n return self.node.nbformat\n\n @property\n def cells(self):\n return self.node.cells\n\n @property\n def filename(self):\n \"\"\"str: filename found a the specified path\"\"\"\n return os.path.basename(self.path)\n\n @property\n def directory(self):\n \"\"\"str: directory name found for a notebook (nb)\"\"\"\n return os.path.dirname(self.path)\n\n @property\n def parameters(self):\n \"\"\"dict: parameters stored in the notebook metadata\"\"\"\n return self.metadata.get(\"papermill\", {}).get(\"parameters\", {})\n\n def _extract_papermill_output_data(self, sig, payload):\n if sig.startswith(RECORD_PAYLOAD_PREFIX):\n # Fetch '+json' and strip the leading '+'\n encoder = sig.split(RECORD_PAYLOAD_PREFIX, 1)[1][1:]\n # First key is the only named payload\n for name, data in payload.items():\n return encoder_registry.decode(Scrap(name, data, encoder))\n\n def _extract_output_data_scraps(self, output):\n output_scraps = Scraps()\n for sig, payload in output.get(\"data\", {}).items():\n # Backwards compatibility for papermill\n scrap = self._extract_papermill_output_data(sig, payload)\n if scrap is None and sig.startswith(GLUE_PAYLOAD_PREFIX):\n scrap = encoder_registry.decode(payload_to_scrap(payload))\n if scrap:\n output_scraps[scrap.name] = scrap\n\n return output_scraps\n\n def _extract_output_displays(self, output):\n output_displays = OrderedDict()\n # Backwards compatibility for papermill\n metadata = output.get(\"metadata\", {})\n if \"papermill\" in metadata:\n output_name = output.metadata[\"papermill\"].get(\"name\")\n if output_name:\n output_displays[output_name] = output\n # Only grab outputs that are displays\n elif metadata.get(\"scrapbook\", {}).get(\"display\"):\n output_name = output.metadata[\"scrapbook\"].get(\"name\")\n if output_name:\n output_displays[output_name] = output\n\n return output_displays\n\n def _fetch_scraps(self):\n \"\"\"Returns a dictionary of the data recorded in a notebook.\"\"\"\n scraps = Scraps()\n\n for cell in self.cells:\n for output in cell.get(\"outputs\", []):\n output_data_scraps = self._extract_output_data_scraps(output)\n output_displays = self._extract_output_displays(output)\n\n # Combine displays with data while trying to preserve ordering\n output_scraps = Scraps(\n [\n # Hydrate with output_displays\n (\n scrap.name,\n Scrap(\n scrap.name,\n scrap.data,\n scrap.encoder,\n output_displays.get(scrap.name),\n ),\n )\n for scrap in output_data_scraps.values()\n ]\n )\n for name, display in output_displays.items():\n if name not in output_scraps:\n output_scraps[name] = Scrap(name, None, \"display\", display)\n scraps.update(output_scraps)\n\n return scraps\n\n @property\n def scraps(self):\n \"\"\"dict: a dictionary of data found in the notebook\"\"\"\n if self._scraps is None:\n self._scraps = self._fetch_scraps()\n return self._scraps\n\n @property\n def cell_timing(self):\n \"\"\"list: a list of cell execution timings in cell order\"\"\"\n return [\n # TODO: Other timing conventions?\n cell.metadata.get(\"papermill\", {}).get(\"duration\", 0.0)\n if cell.get(\"execution_count\")\n else None\n for cell in self.cells\n ]\n\n @property\n def execution_counts(self):\n \"\"\"list: a list of cell execution counts in cell order\"\"\"\n return [cell.get(\"execution_count\") for cell in self.cells]\n\n @property\n @deprecated('0.4.0', '`metrics`')\n def papermill_metrics(self):\n return self.metrics\n\n @property\n def metrics(self):\n \"\"\"pandas dataframe: dataframe of cell execution counts and times\"\"\"\n df = pd.DataFrame(columns=[\"filename\", \"cell\", \"value\", \"type\"])\n\n for i, cell in enumerate(self.cells):\n execution_count = cell.get(\"execution_count\")\n if not execution_count:\n continue\n name = \"Out [{}]\".format(str(execution_count))\n value = cell.metadata.get(\"papermill\", {}).get(\"duration\", 0.0)\n df.loc[i] = self.filename, name, value, \"time (s)\"\n return df\n\n @property\n def parameter_dataframe(self):\n \"\"\"pandas dataframe: dataframe of notebook parameters\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return pd.DataFrame(\n [\n [name, self.parameters[name], \"parameter\", self.filename]\n for name in sorted(self.parameters.keys())\n ],\n columns=[\"name\", \"value\", \"type\", \"filename\"],\n )\n\n @property\n def scrap_dataframe(self):\n \"\"\"pandas dataframe: dataframe of cell scraps\"\"\"\n df = self.scraps.dataframe\n df[\"filename\"] = self.filename\n return df\n\n @property\n @deprecated('1.0.0')\n def papermill_record_dataframe(self):\n \"\"\"pandas dataframe: dataframe of cell scraps\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return pd.DataFrame(\n [\n [name, self.scraps[name].data, \"record\", self.filename]\n for name in sorted(self.scraps.keys())\n if self.scraps[name].data is not None\n ],\n columns=[\"name\", \"value\", \"type\", \"filename\"],\n )\n\n @property\n @deprecated('1.0.0')\n def papermill_dataframe(self):\n \"\"\"pandas dataframe: dataframe of notebook parameters and cell scraps\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return self.parameter_dataframe.append(\n self.papermill_record_dataframe, ignore_index=True\n )\n\n def _strip_scrapbook_metadata(self, metadata):\n copied = copy.copy(metadata)\n # Strip old metadata name\n copied.pop(\"papermill\", None)\n copied.pop(\"scrapbook\", None)\n return copied\n\n @kernel_required\n def reglue(self, name, new_name=None, raise_on_missing=True, unattached=False):\n \"\"\"\n Display output from a named source of the notebook.\n\n Parameters\n ----------\n name : str\n name of scrap object\n new_name : str\n replacement name for scrap\n raise_error : bool\n indicator for if the resketch should print a message or error on missing snaps\n unattached : bool\n indicator for rendering without making the display recallable as scrapbook data\n \"\"\"\n # Avoid circular imports\n from .api import _prepare_ipy_data_format, _prepare_ipy_display_format\n\n if name not in self.scraps:\n if raise_on_missing:\n raise ScrapbookException(\n \"Scrap '{}' is not available in this notebook.\".format(name)\n )\n else:\n ip_display(\n \"No scrap found with name '{}' in this notebook\".format(name)\n )\n else:\n scrap = self.scraps[name]\n if new_name:\n scrap = scrap._replace(name=new_name)\n if scrap.data is not None:\n data, metadata = _prepare_ipy_data_format(\n scrap.name, scrap_to_payload(scrap), scrap.encoder\n )\n # Skip saving data for later regluing and remove 'scrapbook'\n # from keys, when unattached\n if unattached:\n metadata = self._strip_scrapbook_metadata(metadata)\n ip_display(data, metadata=metadata, raw=True)\n if scrap.display is not None:\n scrap_data = scrap.display.get(\"data\", {})\n scrap_metadata = self._strip_scrapbook_metadata(\n scrap.display.get(\"metadata\", {})\n )\n data, metadata = _prepare_ipy_display_format(\n scrap.name, scrap_data, scrap_metadata\n )\n if unattached:\n # Remove 'scrapbook' from keys if we want it unassociated\n metadata = self._strip_scrapbook_metadata(metadata)\n ip_display(data, metadata=metadata, raw=True)\n\n\nclass Scrapbook(collections.MutableMapping):\n \"\"\"\n A collection of notebooks represented as a dictionary of notebooks\n \"\"\"\n\n def __init__(self):\n self._notebooks = OrderedDict()\n\n def __setitem__(self, key, value):\n # If notebook is a path str then load the notebook.\n if isinstance(value, string_types):\n value = Notebook(value)\n self._notebooks.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self._notebooks.__getitem__(key)\n\n def __delitem__(self, key):\n return self._notebooks.__delitem__(key)\n\n def __iter__(self):\n return self._notebooks.__iter__()\n\n def __len__(self):\n return self._notebooks.__len__()\n\n @property\n @deprecated('1.0.0')\n def papermill_dataframe(self):\n \"\"\"list: a list of data names from a collection of notebooks\"\"\"\n\n # Backwards compatible dataframe interface\n\n df_list = []\n for key in self._notebooks:\n nb = self._notebooks[key]\n df = nb.papermill_dataframe\n df[\"key\"] = key\n df_list.append(df)\n return pd.concat(df_list).reset_index(drop=True)\n\n @property\n @deprecated('0.4.0', 'metrics')\n def papermill_metrics(self):\n return self.metrics\n\n @property\n def metrics(self):\n \"\"\"list: a list of metrics from a collection of notebooks\"\"\"\n df_list = []\n for key in self._notebooks:\n nb = self._notebooks[key]\n df = nb.metrics\n df[\"key\"] = key\n df_list.append(df)\n return pd.concat(df_list).reset_index(drop=True)\n\n @property\n def notebooks(self):\n \"\"\"list: a sorted list of associated notebooks.\"\"\"\n return self.values()\n\n @property\n def notebook_scraps(self):\n \"\"\"dict: a dictionary of the notebook scraps by key.\"\"\"\n return OrderedDict([(key, nb.scraps) for key, nb in self._notebooks.items()])\n\n @property\n def scraps(self):\n \"\"\"dict: a dictionary of the merged notebook scraps.\"\"\"\n return Scraps(merge_dicts(nb.scraps for nb in self.notebooks))\n\n def scraps_report(\n self, scrap_names=None, notebook_names=None, include_data=False, headers=True\n ):\n \"\"\"\n Display scraps as markdown structed outputs.\n\n Parameters\n ----------\n scrap_names : str or iterable[str] (optional)\n the scraps to display as reported outputs\n notebook_names : str or iterable[str] (optional)\n notebook names to use in filtering on scraps to report\n include_data : bool (default: False)\n indicator that data-only scraps should be reported\n header : bool (default: True)\n indicator for if the scraps should render with a header\n \"\"\"\n\n def trim_repr(data):\n # Generate a small data representation for display purposes\n if not isinstance(data, string_types):\n data_str = repr(data)\n if len(data_str) > 102:\n data_str = data_str[:100] + \"...\"\n return data_str\n\n if isinstance(scrap_names, string_types):\n scrap_names = [scrap_names]\n scrap_names = set(scrap_names or [])\n\n if notebook_names is None:\n notebook_names = self._notebooks.keys()\n elif isinstance(notebook_names, string_types):\n notebook_names = [notebook_names]\n\n for i, nb_name in enumerate(notebook_names):\n notebook = self[nb_name]\n if headers:\n if i > 0:\n ip_display(Markdown(\"<hr>\")) # tag between outputs\n ip_display(Markdown(\"### {}\".format(nb_name)))\n\n for name in scrap_names or notebook.scraps.display_scraps.keys():\n if headers:\n ip_display(Markdown(\"#### {}\".format(name)))\n notebook.reglue(name, raise_on_missing=False, unattached=True)\n\n if include_data:\n for name, scrap in scrap_names or notebook.scraps.data_scraps.items():\n if scrap.display is None and scrap.data is not None:\n if headers:\n ip_display(Markdown(\"#### {}\".format(name)))\n ip_display(trim_repr(scrap.data))\n else:\n ip_display(\n \"{}: {}\".format(scrap.name, trim_repr(scrap.data))\n )\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
PacktPublishing/Computer-Vision-YOLO-Custom-Object-Detection-with-Colab-GPU
[ "f90db3c5f3326d89282f249ede92234812c824a5" ]
[ "pretrained_yolo_video_nms.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: abhilash\n\"\"\"\n\nimport numpy as np\nimport cv2\n\n#get the webcam video stream\nfile_video_stream = cv2.VideoCapture('images/testing/video_sample2.mp4')\n\n#create a while loop \nwhile (file_video_stream.isOpened):\n #get the current frame from video stream\n ret,current_frame = file_video_stream.read()\n #use the video current frame instead of image\n img_to_detect = current_frame\n\n img_height = img_to_detect.shape[0]\n img_width = img_to_detect.shape[1]\n \n # convert to blob to pass into model\n img_blob = cv2.dnn.blobFromImage(img_to_detect, 0.003922, (416, 416), swapRB=True, crop=False)\n #recommended by yolo authors, scale factor is 0.003922=1/255, width,height of blob is 320,320\n #accepted sizes are 320×320,416×416,609×609. More size means more accuracy but less speed\n \n # set of 80 class labels \n class_labels = [\"person\",\"bicycle\",\"car\",\"motorcycle\",\"airplane\",\"bus\",\"train\",\"truck\",\"boat\",\n \"trafficlight\",\"firehydrant\",\"stopsign\",\"parkingmeter\",\"bench\",\"bird\",\"cat\",\n \"dog\",\"horse\",\"sheep\",\"cow\",\"elephant\",\"bear\",\"zebra\",\"giraffe\",\"backpack\",\n \"umbrella\",\"handbag\",\"tie\",\"suitcase\",\"frisbee\",\"skis\",\"snowboard\",\"sportsball\",\n \"kite\",\"baseballbat\",\"baseballglove\",\"skateboard\",\"surfboard\",\"tennisracket\",\n \"bottle\",\"wineglass\",\"cup\",\"fork\",\"knife\",\"spoon\",\"bowl\",\"banana\",\"apple\",\n \"sandwich\",\"orange\",\"broccoli\",\"carrot\",\"hotdog\",\"pizza\",\"donut\",\"cake\",\"chair\",\n \"sofa\",\"pottedplant\",\"bed\",\"diningtable\",\"toilet\",\"tvmonitor\",\"laptop\",\"mouse\",\n \"remote\",\"keyboard\",\"cellphone\",\"microwave\",\"oven\",\"toaster\",\"sink\",\"refrigerator\",\n \"book\",\"clock\",\"vase\",\"scissors\",\"teddybear\",\"hairdrier\",\"toothbrush\"]\n \n #Declare List of colors as an array\n #Green, Blue, Red, cyan, yellow, purple\n #Split based on ',' and for every split, change type to int\n #convert that to a numpy array to apply color mask to the image numpy array\n class_colors = [\"0,255,0\",\"0,0,255\",\"255,0,0\",\"255,255,0\",\"0,255,255\"]\n class_colors = [np.array(every_color.split(\",\")).astype(\"int\") for every_color in class_colors]\n class_colors = np.array(class_colors)\n class_colors = np.tile(class_colors,(16,1))\n \n # Loading pretrained model \n # input preprocessed blob into model and pass through the model\n # obtain the detection predictions by the model using forward() method\n yolo_model = cv2.dnn.readNetFromDarknet('model/yolov3.cfg','model/yolov3.weights')\n \n # Get all layers from the yolo network\n # Loop and find the last layer (output layer) of the yolo network \n yolo_layers = yolo_model.getLayerNames()\n yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]\n \n # input preprocessed blob into model and pass through the model\n yolo_model.setInput(img_blob)\n # obtain the detection layers by forwarding through till the output layer\n obj_detection_layers = yolo_model.forward(yolo_output_layer)\n \n \n ############## NMS Change 1 ###############\n # initialization for non-max suppression (NMS)\n # declare list for [class id], [box center, width & height[], [confidences]\n class_ids_list = []\n boxes_list = []\n confidences_list = []\n ############## NMS Change 1 END ###########\n \n \n # loop over each of the layer outputs\n for object_detection_layer in obj_detection_layers:\n \t# loop over the detections\n for object_detection in object_detection_layer:\n \n # obj_detections[1 to 4] => will have the two center points, box width and box height\n # obj_detections[5] => will have scores for all objects within bounding box\n all_scores = object_detection[5:]\n predicted_class_id = np.argmax(all_scores)\n prediction_confidence = all_scores[predicted_class_id]\n \n # take only predictions with confidence more than 20%\n if prediction_confidence > 0.20:\n #get the predicted label\n predicted_class_label = class_labels[predicted_class_id]\n #obtain the bounding box co-oridnates for actual image from resized image size\n bounding_box = object_detection[0:4] * np.array([img_width, img_height, img_width, img_height])\n (box_center_x_pt, box_center_y_pt, box_width, box_height) = bounding_box.astype(\"int\")\n start_x_pt = int(box_center_x_pt - (box_width / 2))\n start_y_pt = int(box_center_y_pt - (box_height / 2))\n \n ############## NMS Change 2 ###############\n #save class id, start x, y, width & height, confidences in a list for nms processing\n #make sure to pass confidence as float and width and height as integers\n class_ids_list.append(predicted_class_id)\n confidences_list.append(float(prediction_confidence))\n boxes_list.append([start_x_pt, start_y_pt, int(box_width), int(box_height)])\n ############## NMS Change 2 END ###########\n \n ############## NMS Change 3 ###############\n # Applying the NMS will return only the selected max value ids while suppressing the non maximum (weak) overlapping bounding boxes \n # Non-Maxima Suppression confidence set as 0.5 & max_suppression threhold for NMS as 0.4 (adjust and try for better perfomance)\n max_value_ids = cv2.dnn.NMSBoxes(boxes_list, confidences_list, 0.5, 0.4)\n \n # loop through the final set of detections remaining after NMS and draw bounding box and write text\n for max_valueid in max_value_ids:\n max_class_id = max_valueid[0]\n box = boxes_list[max_class_id]\n start_x_pt = box[0]\n start_y_pt = box[1]\n box_width = box[2]\n box_height = box[3]\n \n #get the predicted class id and label\n predicted_class_id = class_ids_list[max_class_id]\n predicted_class_label = class_labels[predicted_class_id]\n prediction_confidence = confidences_list[max_class_id]\n ############## NMS Change 3 END ########### \n \n end_x_pt = start_x_pt + box_width\n end_y_pt = start_y_pt + box_height\n \n #get a random mask color from the numpy array of colors\n box_color = class_colors[predicted_class_id]\n \n #convert the color numpy array as a list and apply to text and box\n box_color = [int(c) for c in box_color]\n \n # print the prediction in console\n predicted_class_label = \"{}: {:.2f}%\".format(predicted_class_label, prediction_confidence * 100)\n print(\"predicted object {}\".format(predicted_class_label))\n \n # draw rectangle and text in the image\n cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), box_color, 1)\n cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 1)\n \n cv2.imshow(\"Detection Output\", img_to_detect)\n \n #terminate while loop if 'q' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n#releasing the stream and the camera\n#close all opencv windows\nfile_video_stream.release()\ncv2.destroyAllWindows()" ]
[ [ "numpy.array", "numpy.argmax", "numpy.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ishaiqbal/sqlalchemy-challenge-
[ "5b2b7bbb954e371bd1777b5cb04bfb22d7a5a25c" ]
[ "app.py" ]
[ "import numpy as np\nimport datetime as dt\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\n\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Session link from python to DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n\[email protected](\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to the Hawaii Climate Analysis API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/temp/start<br/>\"\n f\"/api/v1.0/temp/start/end\"\n )\n\n\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n\n data = session.query(Measurement.date, Measurement.prcp).\\\n order_by(Measurement.date).all()\n\n precip_dates = []\n\n for date, prcp in data:\n new_dict = {}\n new_dict[date] = prcp\n precip_dates.append(new_dict)\n\n session.close()\n\n return jsonify(precip_dates)\n\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n\n results = session.query(Station.station).all()\n stations = list(np.ravel(results))\n\n session.close()\n return jsonify(stations)\n\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n\n \n lastdate = session.query(Measurement.date).order_by(\n Measurement.date.desc()).first()\n\n last_date = dt.datetime.strptime(lastdate[0], '%Y-%m-%d')\n\n \n query_date = dt.date(last_date.year, last_date.month,\n last_date.day) - dt.timedelta(days=365)\n\n \n results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date >= query_date).all()\n\n all_tobs = []\n for row in results:\n tobs_dict = {}\n tobs_dict[\"date\"] = row.date\n tobs_dict[\"tobs\"] = row.tobs\n all_tobs.append(tobs_dict)\n\n session.close()\n return jsonify(all_tobs)\n\n\[email protected](\"/api/v1.0/temp/start\")\ndef stats():\n\n start_date = session.query(func.min(Measurement.date)).all()[0][0]\n\n sel = [func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs)]\n temp_lstuple = session.query(*sel).filter(Measurement.date >= start_date).all()\n\n session.close()\n\n temp_pram1_list = list(np.ravel(temp_lstuple))\n temp_list =[]\n for t in temp_lstuple:\n temp_dict = {}\n temp_dict[\"Min Temp\"] = temp_pram1_list[0]\n temp_dict[\"Avg Temp\"] = temp_pram1_list[1]\n temp_dict[\"Max Temp\"] = temp_pram1_list[2]\n temp_list.append(temp_dict)\n return jsonify(temp_list)\n\n\[email protected](\"/api/v1.0/temp/start/end\")\ndef tempstartend(start=None, end=None):\n\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n temps_q = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n temps = list(np.ravel(temps_q))\n\n return jsonify(temps)\n\nif __name__ == '__main__':\n app.run(debug=True)\n" ]
[ [ "numpy.ravel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ekatef/oemof-examples
[ "4805d5cef03141a917fd8a9e1141acfa8cc9d781", "4805d5cef03141a917fd8a9e1141acfa8cc9d781", "f16511d20008c30889a6e75a788a3a1a0bc632c2" ]
[ "oemof_examples/tespy/heat_pump/heat_pump_water.py", "oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py", "oemof_examples/oemof.solph/v0.4.x/storage_investment/v2_invest_optimize_only_gas_and_storage.py" ]
[ "# -*- coding: utf-8 -*-\nfrom tespy.networks import Network\nfrom tespy.components import (\n Sink, Source, Splitter, Compressor, Condenser, Pump, HeatExchangerSimple,\n Valve, Drum, HeatExchanger, CycleCloser\n)\nfrom tespy.connections import Connection, Ref\nfrom tespy.tools.characteristics import CharLine\nfrom tespy.tools.characteristics import load_default_char as ldc\nfrom tespy.tools import document_model\n\nimport numpy as np\nimport pandas as pd\n\n# %% network\n\nnw = Network(\n fluids=['water', 'NH3', 'air'], T_unit='C', p_unit='bar', h_unit='kJ / kg',\n m_unit='kg / s'\n)\n\n# %% components\n\n# sources & sinks\ncc = CycleCloser('coolant cycle closer')\ncc_cons = CycleCloser('consumer cycle closer')\namb = Source('ambient air')\namb_out1 = Sink('sink ambient 1')\namb_out2 = Sink('sink ambient 2')\n\n# ambient system\nsp = Splitter('splitter')\npu = Pump('pump')\n\n# consumer system\n\ncd = Condenser('condenser')\ndhp = Pump('district heating pump')\ncons = HeatExchangerSimple('consumer')\n\n# evaporator system\n\nves = Valve('valve')\ndr = Drum('drum')\nev = HeatExchanger('evaporator')\nsu = HeatExchanger('superheater')\nerp = Pump('evaporator reciculation pump')\n\n# compressor-system\n\ncp1 = Compressor('compressor 1')\ncp2 = Compressor('compressor 2')\nic = HeatExchanger('intercooler')\n\n# %% connections\n\n# consumer system\n\nc_in_cd = Connection(cc, 'out1', cd, 'in1')\n\ncb_dhp = Connection(cc_cons, 'out1', dhp, 'in1')\ndhp_cd = Connection(dhp, 'out1', cd, 'in2')\ncd_cons = Connection(cd, 'out2', cons, 'in1')\ncons_cf = Connection(cons, 'out1', cc_cons, 'in1')\n\nnw.add_conns(c_in_cd, cb_dhp, dhp_cd, cd_cons, cons_cf)\n\n# connection condenser - evaporator system\n\ncd_ves = Connection(cd, 'out1', ves, 'in1')\n\nnw.add_conns(cd_ves)\n\n# evaporator system\n\nves_dr = Connection(ves, 'out1', dr, 'in1')\ndr_erp = Connection(dr, 'out1', erp, 'in1')\nerp_ev = Connection(erp, 'out1', ev, 'in2')\nev_dr = Connection(ev, 'out2', dr, 'in2')\ndr_su = Connection(dr, 'out2', su, 'in2')\n\nnw.add_conns(ves_dr, dr_erp, erp_ev, ev_dr, dr_su)\n\namb_p = Connection(amb, 'out1', pu, 'in1')\np_sp = Connection(pu, 'out1', sp, 'in1')\nsp_su = Connection(sp, 'out1', su, 'in1')\nsu_ev = Connection(su, 'out1', ev, 'in1')\nev_amb_out = Connection(ev, 'out1', amb_out1, 'in1')\n\nnw.add_conns(amb_p, p_sp, sp_su, su_ev, ev_amb_out)\n\n# connection evaporator system - compressor system\n\nsu_cp1 = Connection(su, 'out2', cp1, 'in1')\n\nnw.add_conns(su_cp1)\n\n# compressor-system\n\ncp1_he = Connection(cp1, 'out1', ic, 'in1')\nhe_cp2 = Connection(ic, 'out1', cp2, 'in1')\ncp2_c_out = Connection(cp2, 'out1', cc, 'in1')\n\nsp_ic = Connection(sp, 'out2', ic, 'in2')\nic_out = Connection(ic, 'out2', amb_out2, 'in1')\n\nnw.add_conns(cp1_he, he_cp2, sp_ic, ic_out, cp2_c_out)\n\n# %% component parametrization\n\n# condenser system\n\ncd.set_attr(pr1=0.99, pr2=0.99, ttd_u=5, design=['pr2', 'ttd_u'],\n offdesign=['zeta2', 'kA_char'])\ndhp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])\ncons.set_attr(pr=0.99, design=['pr'], offdesign=['zeta'])\n\n# water pump\n\npu.set_attr(eta_s=0.75, design=['eta_s'], offdesign=['eta_s_char'])\n\n# evaporator system\n\nkA_char1 = ldc('heat exchanger', 'kA_char1', 'DEFAULT', CharLine)\nkA_char2 = ldc('heat exchanger', 'kA_char2', 'EVAPORATING FLUID', CharLine)\n\nev.set_attr(pr1=0.98, pr2=0.99, ttd_l=5,\n kA_char1=kA_char1, kA_char2=kA_char2,\n design=['pr1', 'ttd_l'], offdesign=['zeta1', 'kA_char'])\nsu.set_attr(pr1=0.98, pr2=0.99, ttd_u=2, design=['pr1', 'pr2', 'ttd_u'],\n offdesign=['zeta1', 'zeta2', 'kA_char'])\nerp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])\n\n# compressor system\n\ncp1.set_attr(eta_s=0.85, design=['eta_s'], offdesign=['eta_s_char'])\ncp2.set_attr(eta_s=0.9, pr=3, design=['eta_s'], offdesign=['eta_s_char'])\nic.set_attr(pr1=0.99, pr2=0.98, design=['pr1', 'pr2'],\n offdesign=['zeta1', 'zeta2', 'kA_char'])\n\n# %% connection parametrization\n\n# condenser system\n\nc_in_cd.set_attr(fluid={'air': 0, 'NH3': 1, 'water': 0})\ncb_dhp.set_attr(T=60, p=10, fluid={'air': 0, 'NH3': 0, 'water': 1})\ncd_cons.set_attr(T=90)\n\n# evaporator system cold side\n\nerp_ev.set_attr(m=Ref(ves_dr, 1.25, 0), p0=5)\nsu_cp1.set_attr(p0=5, state='g')\n\n# evaporator system hot side\n\n# pumping at constant rate in partload\namb_p.set_attr(T=12, p=2, fluid={'air': 0, 'NH3': 0, 'water': 1},\n offdesign=['v'])\nsp_su.set_attr(offdesign=['v'])\nev_amb_out.set_attr(p=2, T=9, design=['T'])\n\n# compressor-system\n\nhe_cp2.set_attr(Td_bp=5, p0=20, design=['Td_bp'])\nic_out.set_attr(T=30, design=['T'])\n\n# %% key paramter\n\ncons.set_attr(Q=-200e3)\n\n# %% Calculation\n\nnw.solve('design')\nnw.print_results()\nnw.save('heat_pump_water')\ndocument_model(nw, filename='report_water_design.tex')\n\n# offdesign test\nnw.solve('offdesign', design_path='heat_pump_water')\ndocument_model(nw, filename='report_water_offdesign.tex')\n\nT_range = [6, 12, 18, 24, 30]\nQ_range = np.array([100e3, 120e3, 140e3, 160e3, 180e3, 200e3, 220e3])\ndf = pd.DataFrame(columns=Q_range / -cons.Q.val)\n\nfor T in T_range:\n amb_p.set_attr(T=T)\n eps = []\n\n for Q in Q_range:\n cons.set_attr(Q=-Q)\n nw.solve('offdesign', design_path='heat_pump_water')\n\n if nw.lin_dep:\n eps += [np.nan]\n else:\n eps += [\n abs(cd.Q.val) / (cp1.P.val + cp2.P.val + erp.P.val + pu.P.val)\n ]\n\n df.loc[T] = eps\n\ndf.to_csv('COP_water.csv')\n", "# -*- coding: utf-8 -*-\n\n\"\"\"\nGeneral description\n-------------------\nThis example illustrates the effect of activity_costs.\n\nThere are the following components:\n\n - demand_heat: heat demand (constant, for the sake of simplicity)\n - fireplace: wood firing, burns \"for free\" if somebody is around\n - boiler: gas firing, consumes (paid) gas\n\nNotice that activity_costs is an attribute to NonConvex.\nThis is because it relies on the activity status of a component\nwhich is only available for nonconvex flows.\n\n\nInstallation requirements\n-------------------------\nThis example requires version 0.3 of oemof. Install by:\n\n pip install 'oemof.solph>=0.4,<0.5'\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom oemof import solph\n\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n plt = None\n\n##########################################################################\n# Calculate parameters and initialize the energy system and\n##########################################################################\n\nperiods = 24\ntime = pd.date_range('1/1/2018', periods=periods, freq='H')\n\ndemand_heat = np.full(periods, 5)\ndemand_heat[:4] = 0\ndemand_heat[4:18] = 4\n\nactivity_costs = np.full(periods, 5)\nactivity_costs[18:] = 0\n\nes = solph.EnergySystem(timeindex=time)\n\nb_heat = solph.Bus(label='b_heat')\n\nes.add(b_heat)\n\nsink_heat = solph.Sink(\n label='demand',\n inputs={b_heat: solph.Flow(fix=demand_heat, nominal_value=1)})\n\nfireplace = solph.Source(\n label='fireplace',\n outputs={b_heat: solph.Flow(nominal_value=3,\n variable_costs=0,\n nonconvex=solph.NonConvex(\n activity_costs=activity_costs))})\n\nboiler = solph.Source(\n label='boiler',\n outputs={b_heat: solph.Flow(nominal_value=10,\n variable_costs=1)})\n\nes.add(sink_heat, fireplace, boiler)\n\n##########################################################################\n# Optimise the energy system\n##########################################################################\n\n# create an optimization problem and solve it\nom = solph.Model(es)\n\n# solve model\nom.solve(solver='cbc', solve_kwargs={'tee': True})\n\n##########################################################################\n# Check and plot the results\n##########################################################################\n\nresults = solph.processing.results(om)\n\n# plot data\nif plt is not None:\n data = solph.views.node(results, 'b_heat')['sequences']\n ax = data.plot(kind='line', drawstyle='steps-post', grid=True, rot=0)\n ax.set_xlabel('Time')\n ax.set_ylabel('Heat (arb. units)')\n plt.show()\n", "# -*- coding: utf-8 -*-\n\n\"\"\"\nGeneral description\n-------------------\nThis example shows how to perform a capacity optimization for\nan energy system with storage. The following energy system is modeled:\n\n input/output bgas bel\n | | | |\n | | | |\n wind(FixedSource) |------------------>| |\n | | | |\n pv(FixedSource) |------------------>| |\n | | | |\n gas_resource |--------->| | |\n (Commodity) | | | |\n | | | |\n demand(Sink) |<------------------| |\n | | | |\n | | | |\n pp_gas(Transformer) |<---------| | |\n |------------------>| |\n | | | |\n storage(Storage) |<------------------| |\n |------------------>| |\n\nThe example exists in four variations. The following parameters describe\nthe main setting for the optimization variation 2:\n\n - optimize gas_resource and storage\n - set installed capacities for wind and pv\n - set investment cost for storage\n - set gas price for kWh\n\n Results show a higher renewable energy share than in variation 1\n (78% compared to 51%) due to preinstalled renewable capacities.\n Storage is not installed as the gas resource is cheaper.\n\n Have a look at different parameter settings. There are four variations\n of this example in the same folder.\n\nInstallation requirements\n-------------------------\nThis example requires the version v0.3.x of oemof. Install by:\n\n pip install 'oemof.solph>=0.4,<0.5'\n\n\"\"\"\n\n__copyright__ = \"oemof developer group\"\n__license__ = \"GPLv3\"\n\n###############################################################################\n# Imports\n###############################################################################\n\n# Default logger of oemof\nfrom oemof.tools import logger\nfrom oemof.tools import economics\nfrom oemof import solph\n\nimport logging\nimport os\nimport pandas as pd\nimport pprint as pp\n\nnumber_timesteps = 8760\n\n##########################################################################\n# Initialize the energy system and read/calculate necessary parameters\n##########################################################################\n\nlogger.define_logging()\nlogging.info(\"Initialize the energy system\")\ndate_time_index = pd.date_range(\"1/1/2012\", periods=number_timesteps, freq=\"H\")\n\nenergysystem = solph.EnergySystem(timeindex=date_time_index)\n\n# Read data file\nfull_filename = os.path.join(os.getcwd(), \"storage_investment.csv\")\ndata = pd.read_csv(full_filename, sep=\",\")\n\nprice_gas = 0.04\n\n# If the period is one year the equivalent periodical costs (epc) of an\n# investment are equal to the annuity. Use oemof's economic tools.\nepc_storage = economics.annuity(capex=1000, n=20, wacc=0.05)\n\n##########################################################################\n# Create oemof objects\n##########################################################################\n\nlogging.info(\"Create oemof objects\")\n# create natural gas bus\nbgas = solph.Bus(label=\"natural_gas\")\n\n# create electricity bus\nbel = solph.Bus(label=\"electricity\")\n\nenergysystem.add(bgas, bel)\n\n# create excess component for the electricity bus to allow overproduction\nexcess = solph.Sink(label=\"excess_bel\", inputs={bel: solph.Flow()})\n\n# create source object representing the natural gas commodity (annual limit)\ngas_resource = solph.Source(\n label=\"rgas\", outputs={bgas: solph.Flow(variable_costs=price_gas)}\n)\n\n# create fixed source object representing wind power plants\nwind = solph.Source(\n label=\"wind\",\n outputs={bel: solph.Flow(fix=data[\"wind\"], nominal_value=1000000)},\n)\n\n# create fixed source object representing pv power plants\npv = solph.Source(\n label=\"pv\", outputs={bel: solph.Flow(fix=data[\"pv\"], nominal_value=600000)}\n)\n\n# create simple sink object representing the electrical demand\ndemand = solph.Sink(\n label=\"demand\",\n inputs={bel: solph.Flow(fix=data[\"demand_el\"], nominal_value=1)},\n)\n\n# create simple transformer object representing a gas power plant\npp_gas = solph.Transformer(\n label=\"pp_gas\",\n inputs={bgas: solph.Flow()},\n outputs={bel: solph.Flow(nominal_value=10e10, variable_costs=0)},\n conversion_factors={bel: 0.58},\n)\n\n# create storage object representing a battery\nstorage = solph.components.GenericStorage(\n label=\"storage\",\n inputs={bel: solph.Flow(variable_costs=0.0001)},\n outputs={bel: solph.Flow()},\n loss_rate=0.00,\n initial_storage_level=0,\n invest_relation_input_capacity=1 / 6,\n invest_relation_output_capacity=1 / 6,\n inflow_conversion_factor=1,\n outflow_conversion_factor=0.8,\n investment=solph.Investment(ep_costs=epc_storage),\n)\n\nenergysystem.add(excess, gas_resource, wind, pv, demand, pp_gas, storage)\n\n##########################################################################\n# Optimise the energy system\n##########################################################################\n\nlogging.info(\"Optimise the energy system\")\n\n# initialise the operational model\nom = solph.Model(energysystem)\n\n# if tee_switch is true solver messages will be displayed\nlogging.info(\"Solve the optimization problem\")\nom.solve(solver=\"cbc\", solve_kwargs={\"tee\": True})\n\n##########################################################################\n# Check and plot the results\n##########################################################################\n\n# check if the new result object is working for custom components\nresults = solph.processing.results(om)\n\ncustom_storage = solph.views.node(results, \"storage\")\nelectricity_bus = solph.views.node(results, \"electricity\")\n\nmeta_results = solph.processing.meta_results(om)\npp.pprint(meta_results)\n\nmy_results = electricity_bus[\"scalars\"]\n\n# installed capacity of storage in GWh\nmy_results[\"storage_invest_GWh\"] = (\n results[(storage, None)][\"scalars\"][\"invest\"] / 1e6\n)\n\n# resulting renewable energy share\nmy_results[\"res_share\"] = (\n 1\n - results[(pp_gas, bel)][\"sequences\"].sum()\n / results[(bel, demand)][\"sequences\"].sum()\n)\n\npp.pprint(my_results)\n" ]
[ [ "numpy.array", "pandas.DataFrame" ], [ "matplotlib.pyplot.show", "numpy.full", "pandas.date_range" ], [ "pandas.read_csv", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
fitbenchmarking/fitbenchmarking
[ "ea398efa61f071dc64fe7c3b484d5bb4e1897856", "ea398efa61f071dc64fe7c3b484d5bb4e1897856" ]
[ "examples/benchmark_problems/scripts/generate_simulated_mantid.py", "fitbenchmarking/results_processing/base_table.py" ]
[ "\"\"\"\nThis script is used to generate simulated count data based on a Mantid\nscript.\n\"\"\"\n\nimport os\n\nimport numpy\n\n\ndef VariableStatsData(N, A0, omega, phi, sigma, bg):\n x = numpy.linspace(start=0.0, stop=32.0, num=2001)\n y = (1+A0*numpy.cos(omega*x+phi)*numpy.exp(-(sigma*x)**2)) * \\\n numpy.exp(-x/2.197)+bg\n NN = N/numpy.sum(y) # normalisation so whole spectrum has ~N counts\n return (x, numpy.random.poisson(y*NN))\n\n\ndef write_data(x, y, part=0):\n path = f'{os.path.dirname(__file__)}/../data_files'\n part_str = part if part != 0 else \"\"\n with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:\n f.write('# X Y\\n')\n lines = [[x[i], y[i]]\n # if y[i] != 0 # Uncomment to replace 0s with 1s\n # else [x[i], 1]\n for i in range(len(x))\n # if y[i] != 0 # Uncomment to ignore 0 values\n ]\n f.writelines([f'{i} {j}\\n' for i, j in lines])\n\n\ndef write_problem(N, part=0):\n path = f'{os.path.dirname(__file__)}/..'\n part_str = part if part != 0 else \"\"\n with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:\n f.write('# FitBenchmark Problem\\n')\n f.write(\"software = 'Mantid'\\n\")\n f.write(f\"name = 'Simulated poisson (Mantid) {part_str}'\\n\")\n f.write(\"description = 'A simulated dataset for testing poisson cost\"\n \"functions, based on a simple simulation from Mantid.'\\n\")\n f.write(f\"input_file = 'simulated_mantid{part_str}.txt'\\n\")\n f.write(\"function = 'name=UserFunction,\"\n \"Formula=N*((1+A*cos(omega*x+phi)*exp(-(sigma*x)^2))*\"\n \"exp(-x/2.197)+bg),\"\n f\"N={0.007*N},\"\n \"A=0.3,\"\n \"omega=0.9,\"\n \"phi=0.2,\"\n \"sigma=0.12,\"\n \"bg=0.001'\\n\")\n\n\nif __name__ == '__main__':\n chunks = [1] #,8,16,20,32,40,50,100]\n num = 1000\n N0 = 4e5\n for i, part in enumerate(chunks):\n args = {'N': 1000/part,\n 'A0': 0.25,\n 'omega': 1.0,\n 'phi': 0.1,\n 'sigma': 0.1,\n 'bg': 1.E-4}\n x, y = VariableStatsData(**args)\n write_data(x, y, part=i)\n write_problem(N=args['N'], part=i)\n", "\"\"\"\nImplements the base class for the tables.\n\"\"\"\nimport os\nfrom abc import ABCMeta, abstractmethod\n\nimport docutils.core\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom fitbenchmarking.utils.misc import get_js\n\nFORMAT_DESCRIPTION = \\\n {'abs': 'Absolute values are displayed in the table.',\n 'rel': 'Relative values are displayed in the table.',\n 'both': 'Absolute and relative values are displayed in '\n 'the table in the format ``abs (rel)``'}\n\n\nclass Table:\n \"\"\"\n Base class for the FitBenchmarking HTML and text output tables.\n\n When inheriting from this, it may be useful to override the following\n functions as required:\n\n - get_value\n - display_str\n - get_error_str\n - get_link_str\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, results, best_results, options, group_dir,\n pp_locations, table_name):\n \"\"\"\n Initialise the class.\n\n :param results: Results grouped by row and category (for colouring)\n :type results:\n dict[str, dict[str, list[utils.fitbm_result.FittingResult]]]\n :param best_results: The best results from each row/category\n :type best_results:\n dict[str, dict[str, utils.fitbm_result.FittingResult]]\n :param options: Options used in fitting\n :type options: utils.options.Options\n :param group_dir: path to the directory where group results should be\n stored\n :type group_dir: str\n :param pp_locations: tuple containing the locations of the\n performance profiles (acc then runtime)\n :type pp_locations: tuple(str,str)\n :param table_name: Name of the table\n :type table_name: str\n \"\"\"\n # Flatten to reduce the necessity on having problems as rows.\n self.results = results\n self.best_results = best_results\n self.options = options\n self.group_dir = group_dir\n self.pp_locations = pp_locations\n self.table_name = table_name\n self.name = None\n\n self.output_string_type = {\"abs\": '{:.4g}',\n \"rel\": '{:.4g}',\n \"both\": '{0:.4g} ({1:.4g})'}\n self.colour_template = 'background-color: {0}'\n\n self.has_pp = False\n self.pp_location = ''\n self._table_title = None\n self._file_path = None\n\n self.cbar_title = \"No colour bar description given\"\n self.cbar_left_label = \"Best (1)\"\n self.cbar_right_label = \"Worst (>{})\".format(self.options.colour_ulim)\n\n # Set up results as needed\n self.sorted_results = {}\n self.create_results_dict()\n\n @abstractmethod\n def get_value(self, result):\n \"\"\"\n Gets the main value to be reported in the tables for a given result\n\n If more than one value is returned please note that the first value\n will be used in the default colour handling.\n\n :param result: The result to generate the values for.\n :type result: FittingResult\n\n :return: The value to convert to a string for the tables\n :rtype: tuple(float)\n \"\"\"\n raise NotImplementedError\n\n def display_str(self, value):\n \"\"\"\n Converts a value generated by\n :meth:`~fitbenchmarking.results_processing.base_table.Table.get_value()`\n into a string respresentation to be used in the tables.\n Base class implementation takes\n the relative and absolute values and uses ``self.output_string_type``\n as a template for the string format. This can be overridden to\n adequately display the results.\n\n :param value: Relative and absolute values\n :type value: tuple\n\n :return: string representation of the value for display in the table.\n :rtype: str\n \"\"\"\n rel_value, abs_value = value\n comp_mode = self.options.comparison_mode\n result_template = self.output_string_type[self.options.comparison_mode]\n if comp_mode == \"abs\":\n return result_template.format(abs_value)\n if comp_mode == \"rel\":\n return result_template.format(rel_value)\n # comp_mode == \"both\"\n return result_template.format(abs_value, rel_value)\n\n def get_link_str(self, result):\n \"\"\"\n Get the link as a string for the result.\n This can be overridden if tables require different links.\n\n :param result: The result to get the link for\n :type result: FittingResult\n\n :return: The link to go to when the cell is selected\n :rtype: string\n \"\"\"\n return os.path.relpath(path=result.fitting_report_link,\n start=self.group_dir)\n\n @staticmethod\n def get_error_str(result, error_template='[{}]'):\n \"\"\"\n Get the error string for a result based on error_template\n This can be overridden if tables require different error formatting.\n\n :param result: The result to get the error string for\n :type result: FittingResult\n\n :return: A string representation of the error\n :rtype: str\n \"\"\"\n error_code = result.error_flag\n if error_code == 0:\n return ''\n\n return error_template.format(error_code)\n\n def create_results_dict(self):\n \"\"\"\n Generate a dictionary of results lists with rows and columns as the key\n and list elements respectively.\n This is used to create HTML and txt tables.\n This is stored in self.sorted_results\n \"\"\"\n self.sorted_results = {k: [r for cat in row.values() for r in cat]\n for k, row in self.results.items()}\n\n def get_str_dict(self, html=False):\n \"\"\"\n Create a dictionary with the table values as strings for display.\n\n :return: The dictionary of strings for the table\n :rtype: dict[list[str]]\n \"\"\"\n str_dict = {}\n for k, results in self.sorted_results.items():\n str_dict[k] = [self.get_str_result(r, html)\n for r in results]\n return str_dict\n\n def get_colour_df(self, like_df=None):\n \"\"\"\n Generate a dataframe of colours to add to the html rendering.\n\n If like_df is passed this will use the column and row indexes of that\n dataframe.\n\n :param like_df: The dataframe to copy headings from. Defaults to None.\n :type like_df: pandas.DataFrame\n\n :return: A dataframe with colourings as strings\n :rtype: pandas.DataFrame\n \"\"\"\n col_dict = {}\n for k, results in self.sorted_results.items():\n col_dict[k] = self.get_colours_for_row(results)\n\n table = pd.DataFrame.from_dict(col_dict, orient='index')\n\n if like_df is None:\n row = next(iter(self.sorted_results.values()))\n minimizers_list = [(r.software, r.minimizer) for r in row]\n table.columns = pd.MultiIndex.from_tuples(minimizers_list)\n else:\n table.columns = like_df.columns\n table.index = like_df.index\n return table\n\n def get_str_result(self, result, html=False):\n \"\"\"\n Given a single result, generate the string to display in this table.\n The html flag can be used to switch between a plain text and html\n format.\n\n This is intended to be easily extensible by overriding the following\n functions:\n\n - get_value\n - display_str\n - get_error_str\n - get_link_str\n\n If you find yourself overriding this, please consider if changes could\n be made to allow future tables to benefit.\n\n :param result: The result to generate a string for\n :type result: fitbenchmarking.utils.ftibm_result.FittingResult\n :param html: Flag to control whether to generate a html string or plain\n text. Defaults to False.\n :type html: bool\n\n :return: The string representation.\n :rtype: str\n \"\"\"\n if html:\n val = self.get_value(result)\n val_str = self.display_str(val)\n val_str += self.get_error_str(result,\n error_template=\"<sup>{}</sup>\")\n val_str = f'<a href=\"{self.get_link_str(result)}\">{val_str}</a>'\n else:\n val_str = self.display_str(self.get_value(result))\n val_str += self.get_error_str(result, error_template='[{}]')\n return val_str\n\n def get_colours_for_row(self, results):\n \"\"\"\n Get the colours as strings for the given results in the table.\n The base class implementation, for example,\n uses the first value from self.get_value and\n ``colour_map``, ``colour_ulim`` and ``cmap_range`` within\n :class:`~fitbenchmarking.utils.options.Options`.\n\n :param result: Results to get the colours for.\n :type result: list[fitbenchmarking.utils.fitbm_result.FittingResult]\n\n :return: The colour to use for each cell in the list\n :rtype: list[str]\n \"\"\"\n values = [self.get_value(r)[0] for r in results]\n\n cmap_name = self.options.colour_map\n cmap = plt.get_cmap(cmap_name)\n cmap_ulim = self.options.colour_ulim\n cmap_range = self.options.cmap_range\n log_ulim = np.log10(cmap_ulim) # colour map used with log spacing\n\n col_strs = [\"background-colour: #ffffff\" for _ in results]\n\n colours = self.vals_to_colour(values, cmap, cmap_range, log_ulim)\n for i, c in enumerate(colours):\n try:\n col_strs[i] = self.colour_template.format(c)\n except IndexError:\n col_strs[i] = self.colour_template.format(*c)\n\n return col_strs\n\n def create_pandas_data_frame(self, html=False):\n \"\"\"\n Creates a pandas data frame of results\n\n :param html: Whether to make the dataframe for html or plain text\n :type html: bool. defaults to False\n\n :return: DataFrame with string representations of results\n :rtype: pandas.DataFrame\n \"\"\"\n str_results = self.get_str_dict(html)\n row = next(iter(self.sorted_results.values()))\n minimizers_list = [(r.software, r.minimizer) for r in row]\n columns = pd.MultiIndex.from_tuples(minimizers_list)\n table = pd.DataFrame.from_dict(str_results,\n orient='index',\n columns=columns)\n return table\n\n def to_html(self):\n \"\"\"\n Generate a html version of the table.\n\n :return: HTML table output\n :rtype: str\n \"\"\"\n table = self.create_pandas_data_frame(html=True)\n\n # Format the table headers\n cost_func_template = '<a class=\"cost_function_header\" ' \\\n 'href=https://fitbenchmarking.readthedocs.io/' \\\n 'en/latest/users/options/fitting_option.html' \\\n '#cost-function-cost-func-type ' \\\n 'target=\"_blank\">{0}</a>'\n software_template = '<a class=\"software_header\" ' \\\n 'href=\"https://fitbenchmarking.readthedocs.io/' \\\n 'en/latest/users/options/minimizer_option.html' \\\n '#{0}\" target=\"_blank\">{0}</a>'\n minimizer_template = '<a class=\"minimizer_header\" col={0} ' \\\n 'title=\"{1}\"' \\\n 'href=\"https://fitbenchmarking.readthedocs.io/' \\\n 'en/latest/users/options/minimizer_option.html' \\\n '#{2}\" target=\"_blank\">{3}</a>'\n\n row = next(iter(self.sorted_results.values()))\n minimizers_list = [\n (cost_func_template.format(result.costfun_tag),\n software_template.format(result.software.replace('_', '-')),\n minimizer_template.format(\n i, self.options.minimizer_alg_type[result.minimizer],\n result.software.replace('_', '-'),\n result.minimizer))\n for i, result in enumerate(row)]\n columns = pd.MultiIndex.from_tuples(minimizers_list)\n table.columns = columns\n\n # Format the row labels\n index = []\n for b, i in zip(self.best_results.values(), table.index):\n b = next(iter(b.values()))\n rel_path = os.path.relpath(\n path=b.problem_summary_page_link,\n start=self.group_dir)\n index.append('<a class=\"problem_header\" href=\"{0}\">{1}</a>'\n .format(rel_path, i))\n table.index = index\n\n # Get columns where cost function changes\n column_dividers = [table.columns[0]]\n for column in table.columns[1:]:\n if column[0] != column_dividers[-1][0]:\n column_dividers.append(column)\n column_dividers = column_dividers[1:]\n\n # Set the cell colours and increase bars between cost functions\n table_style = table.style\\\n .apply(lambda df: self.get_colour_df(like_df=df), axis=None)\\\n .set_table_styles(table_styles={\n k: [{'selector': 'td',\n 'props': [('border-left-width', '3px')]},\n {'selector': 'th',\n 'props': [('border-left-width', '3px')]}]\n for k in column_dividers})\n\n return table_style.render()\n\n def to_txt(self):\n \"\"\"\n Generate a plain text version of the table\n\n :return: Plain text table output\n :rtype: str\n \"\"\"\n table = self.create_pandas_data_frame(html=False)\n return table.to_string()\n\n def get_description(self, html_description):\n \"\"\"\n Generates table description from class docstrings and converts them\n into html\n\n :param html_description: Dictionary containing table descriptions\n :type html_description: dict\n\n :return: Dictionary containing table descriptions\n :rtype: dict\n \"\"\"\n FORMAT_DESCRIPTION[self.name] = self.__doc__\n for name in [self.name, self.options.comparison_mode]:\n descrip = FORMAT_DESCRIPTION[name]\n descrip = descrip.replace(':ref:', '')\n js = get_js(self.options, self.group_dir)\n docsettings = {\n 'math_output': 'MathJax '+js['mathjax']\n }\n description_page = docutils.core.publish_parts(\n descrip,\n writer_name='html',\n settings_overrides=docsettings)\n html_description[name] = description_page['body']\n html_description[name] = html_description[name].replace(\n '<blockquote>\\n', '')\n return html_description\n\n @property\n def table_title(self):\n \"\"\"\n Getter function for table name if self._table_title is None\n\n :return: name of table\n :rtype: str\n \"\"\"\n if self._table_title is None:\n self._table_title = \"FitBenchmarking: {0} table\".format(self.name)\n return self._table_title\n\n @table_title.setter\n def table_title(self, value):\n \"\"\"\n Setting function to set the name of the table\n\n :param value: name of table\n :type value: str\n \"\"\"\n self._table_title = value\n\n @property\n def file_path(self):\n \"\"\"\n Getter function for the path to the table\n\n :return: path to table\n :rtype: str\n \"\"\"\n if self._file_path is None:\n self._file_path = os.path.join(self.group_dir, self.table_name)\n return self._file_path\n\n @file_path.setter\n def file_path(self, value):\n \"\"\"\n Setting function to set the path to the table\n\n :param value: path to table\n :type value: str\n \"\"\"\n self._file_path = value\n\n @staticmethod\n def vals_to_colour(vals, cmap, cmap_range, log_ulim):\n \"\"\"\n Converts an array of values to a list of hexadecimal colour\n strings using logarithmic sampling from a matplotlib colourmap\n according to relative value.\n\n :param vals: values in the range [0, 1] to convert to colour strings\n :type vals: list[float]\n :param cmap: matplotlib colourmap\n :type cmap: matplotlib colourmap object\n :param cmap_range: values in range [0, 1] for colourmap cropping\n :type cmap_range: list[float], 2 elements\n :param log_ulim: log10 of worst shading cutoff value\n :type log_ulim: float\n\n :return: colours as hex strings for each input value\n :rtype: list[str]\n \"\"\"\n log_vals = np.log10(vals)\n log_llim = min(log_vals)\n norm_vals = (log_vals - log_llim) /\\\n (log_ulim - log_llim)\n norm_vals[norm_vals > 1] = 1 # applying upper cutoff\n norm_vals[np.isnan(norm_vals)] = 1 # deal with nans\n # trimming colour map according to default/user input\n norm_vals = cmap_range[0] + \\\n norm_vals*(cmap_range[1] - cmap_range[0])\n rgba = cmap(norm_vals)\n hex_strs = [mpl.colors.rgb2hex(colour) for colour in rgba]\n\n return hex_strs\n\n def save_colourbar(self, fig_dir, n_divs=100, sz_in=(3, 0.8)) -> str:\n \"\"\"\n Generates a png of a labelled colourbar using matplotlib.\n\n :param fig_dir: path to figures directory\n :type fig_dir: str\n :param n_divs: number of divisions of shading in colourbar\n :type n_divs: int\n :param sz_in: dimensions of png in inches [width, height]\n :type sz_in: list[float] - 2 elements\n\n :return: The relative path to the colourbar image.\n :rtype: str\n \"\"\"\n fig_path = os.path.join(fig_dir, \"{0}_cbar.png\".format(self.name))\n\n figh = 0.77\n fig, ax = plt.subplots(nrows=1, figsize=(6.4, figh))\n fig.subplots_adjust(top=1 - 0.35 / figh, bottom=0.15 / figh,\n left=0.3, right=0.7, hspace=1)\n\n cmap_range = self.options.cmap_range\n gradient = np.linspace(cmap_range[0], cmap_range[1], n_divs)\n gradient = np.vstack((gradient, gradient))\n\n ax.imshow(gradient, aspect='auto',\n cmap=plt.get_cmap(self.options.colour_map), vmin=0, vmax=1)\n\n ax.text(-0.02, 0.5, self.cbar_left_label,\n va='center', ha='right', fontsize=6,\n transform=ax.transAxes)\n ax.text(1.02, 0.5, self.cbar_right_label,\n va='center', ha='left', fontsize=6,\n transform=ax.transAxes)\n ax.set_title(self.cbar_title, fontsize=6)\n ax.set_axis_off()\n fig.set_size_inches(sz_in[0], sz_in[1])\n\n plt.savefig(fig_path, dpi=150)\n\n return os.path.relpath(fig_path, self.group_dir)\n\n def problem_dropdown_html(self) -> str:\n \"\"\"\n Generates the HTML for a dropdown checklist of problem sets.\n\n :return: HTML for a dropdown checklist of problem sets.\n :rtype: str\n \"\"\"\n items = [f' <li><label class=\"noselect\"><input '\n f'type=\"checkbox\" checked=true '\n f'onclick=\"toggle_problem(\\'{problem_name}\\')\"/> '\n f'{problem_name}</label></li>'\n for problem_name in self.sorted_results.keys()]\n\n return self._dropdown_html(\"problem_dropdown\", \"Select Problems\",\n items)\n\n def minimizer_dropdown_html(self) -> str:\n \"\"\"\n Generates the HTML for a dropdown checklist of minimizers.\n\n :return: HTML for a dropdown checklist of minimizers.\n :rtype: str\n \"\"\"\n minimizers = [(result.software.replace('_', '-'), result.minimizer)\n for result in next(iter(self.sorted_results.values()))]\n # Remove duplicates\n minimizers = list(dict.fromkeys(minimizers))\n\n items = [f' <li><label class=\"noselect\"><input '\n f'type=\"checkbox\" checked=true '\n f'onclick=\"toggle_minimizer(\\'{software}\\', '\n f'\\'{minimizer}\\')\"/> {minimizer}</label></li>'\n for software, minimizer in minimizers]\n\n return self._dropdown_html(\"minimizer_dropdown\",\n \"Select Minimizers\", items)\n\n @staticmethod\n def _dropdown_html(list_id: str, selector_text: str,\n checklist: list) -> str:\n \"\"\"\n Generates the HTML for a dropdown checklist. The list of items\n must be provided to this function.\n\n :param list_id: The ID to give the dropdown button.\n :type list_id: str\n :param selector_text: The text to display on the dropdown button.\n :type selector_text: str\n :param checklist: A list of HTML checkboxes to include in the\n dropdown.\n :type checklist: list\n :return: HTML for a dropdown checklist.\n :rtype: str\n \"\"\"\n checklist_str = \"\\n\".join(checklist)\n html = f'<div id=\"{list_id}\" class=\"dropdown-check-list\" ' \\\n f'tabindex=\"100\">\\n' \\\n f' <span class=\"anchor\" onclick=\"show_dropdown' \\\n f'(\\'{list_id}\\')\">{selector_text}</span>\\n' \\\n ' <ul class=\"items\">\\n' \\\n f'{checklist_str}\\n' \\\n ' </ul>\\n' \\\n '</div>'\n return html\n" ]
[ [ "numpy.linspace", "numpy.cos", "numpy.random.poisson", "numpy.exp", "numpy.sum" ], [ "numpy.linspace", "numpy.isnan", "matplotlib.pyplot.subplots", "pandas.MultiIndex.from_tuples", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.savefig", "numpy.log10", "pandas.DataFrame.from_dict", "matplotlib.colors.rgb2hex", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Zerwer/EEGMachineLearning
[ "d0dfcf617b22317a88018a86545c4f7e37a290b9" ]
[ "data/live_predict.py" ]
[ "# Unsure majority of time but more correct then wrong when thinking of\n# Requires more data for training\nfrom data import *\nfrom tkinter import *\nfrom keras.models import load_model\nimport numpy as np\nimport threading\nimport time\n\n# Time variables\nstart_wait = 10000\nwait = 2100\n\n# Set dimensions\nw = 900\nh = 556\n\nroot = Tk()\nroot.geometry(str(w)+'x'+str(h))\nroot.title('Predictor')\n\ngraphing_area = Canvas(root, width=w, height=h)\ngraphing_area.pack()\n\n# Import model to be used\nsaved_model = load_model('model.h5')\n\n# Begin data thread\nthread = threading.Thread(target=data_loop, args=[False, False, False, 1, False])\nthread.start()\n\n\n# Predicts the input values and returns predicted letter\ndef predict(values, model):\n processed_data = np.expand_dims(np.array([np.abs(np.fft.rfft(np.array(values)))/85000]), 3)\n prediction = model.predict(processed_data)\n print(prediction[0][0])\n if prediction[0][0] < 0.1:\n return 'B'\n elif prediction[0][0] > 0.9:\n return 'A'\n else:\n return '?'\n\n\ndef display_prediction(canvas, frame, model):\n prediction = predict(last_values[-1500:], model)\n\n canvas.delete('all')\n canvas.create_text(w / 2, h / 2, font=\"Arial \" + str(int(round(h / 3, 0))), text='Collecting...', anchor='center')\n time.sleep(1)\n canvas.delete('all')\n canvas.create_text(w / 2, h / 2, font=\"Arial \" + str(int(round(h / 3, 0))), text=prediction, anchor='center')\n\n root.after(wait, display_prediction, canvas, frame, model)\n\n\nroot.after(start_wait, display_prediction, graphing_area, root, saved_model)\nroot.mainloop()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
st2yang/garage
[ "50186a9630df038aeba36d6b06b006ab32ed48f5", "50186a9630df038aeba36d6b06b006ab32ed48f5", "50186a9630df038aeba36d6b06b006ab32ed48f5" ]
[ "tests/garage/sampler/test_sampler.py", "tests/garage/tf/baselines/test_baselines.py", "tests/garage/torch/policies/test_deterministic_mlp_policy.py" ]
[ "from dowel import logger\nimport numpy as np\n\nfrom garage.sampler.utils import truncate_paths\n\nfrom tests.fixtures.logger import NullOutput\n\n\nclass TestSampler:\n\n def setup_method(self):\n logger.add_output(NullOutput())\n\n def teardown_method(self):\n logger.remove_all()\n\n def test_truncate_paths(self):\n paths = [\n dict(\n observations=np.zeros((100, 1)),\n actions=np.zeros((100, 1)),\n rewards=np.zeros(100),\n env_infos=dict(),\n agent_infos=dict(lala=np.zeros(100)),\n ),\n dict(\n observations=np.zeros((50, 1)),\n actions=np.zeros((50, 1)),\n rewards=np.zeros(50),\n env_infos=dict(),\n agent_infos=dict(lala=np.zeros(50)),\n ),\n ]\n\n truncated = truncate_paths(paths, 130)\n assert len(truncated) == 2\n assert len(truncated[-1]['observations']) == 30\n assert len(truncated[0]['observations']) == 100\n # make sure not to change the original one\n assert len(paths) == 2\n assert len(paths[-1]['observations']) == 50\n", "\"\"\"\nThis script creates a test that fails when\ngarage.tf.baselines failed to initialize.\n\"\"\"\nimport tensorflow as tf\n\nfrom garage.envs import GarageEnv\nfrom garage.tf.baselines import ContinuousMLPBaseline, GaussianMLPBaseline\n\nfrom tests.fixtures import TfGraphTestCase\nfrom tests.fixtures.envs.dummy import DummyBoxEnv\n\n\nclass TestTfBaselines(TfGraphTestCase):\n\n def test_baseline(self):\n \"\"\"Test the baseline initialization.\"\"\"\n box_env = GarageEnv(DummyBoxEnv())\n deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env)\n gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env)\n\n self.sess.run(tf.compat.v1.global_variables_initializer())\n deterministic_mlp_baseline.get_param_values()\n gaussian_mlp_baseline.get_param_values()\n\n box_env.close()\n", "import pickle\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom garage.envs import GarageEnv\nfrom garage.torch.policies import DeterministicMLPPolicy\n\nfrom tests.fixtures.envs.dummy import DummyBoxEnv\n\n\nclass TestDeterministicMLPPolicies:\n # yapf: disable\n @pytest.mark.parametrize('hidden_sizes', [\n (1, ), (2, ), (3, ), (1, 1), (2, 2)])\n # yapf: enable\n def test_get_action(self, hidden_sizes):\n env_spec = GarageEnv(DummyBoxEnv())\n obs_dim = env_spec.observation_space.flat_dim\n act_dim = env_spec.action_space.flat_dim\n obs = torch.ones([1, obs_dim], dtype=torch.float32)\n\n policy = DeterministicMLPPolicy(env_spec=env_spec,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n expected_output = np.full([1, act_dim],\n fill_value=obs_dim * np.prod(hidden_sizes),\n dtype=np.float32)\n assert np.array_equal(policy.get_action(obs)[0], expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('batch_size, hidden_sizes', [\n (1, (1, )),\n (4, (2, )),\n (6, (3, )),\n (20, (1, 1)),\n (32, (2, 6, 8)),\n ])\n # yapf: enable\n def test_get_actions(self, batch_size, hidden_sizes):\n env_spec = GarageEnv(DummyBoxEnv())\n obs_dim = env_spec.observation_space.flat_dim\n act_dim = env_spec.action_space.flat_dim\n obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)\n\n policy = DeterministicMLPPolicy(env_spec=env_spec,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n expected_output = np.full([batch_size, act_dim],\n fill_value=obs_dim * np.prod(hidden_sizes),\n dtype=np.float32)\n assert np.array_equal(policy.get_actions(obs)[0], expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('batch_size, hidden_sizes', [\n (1, (1, )),\n (4, (2, )),\n (10, (3, )),\n (25, (2, 4)),\n (34, (2, 6, 11)),\n ])\n # yapf: enable\n def test_is_pickleable(self, batch_size, hidden_sizes):\n env_spec = GarageEnv(DummyBoxEnv())\n obs_dim = env_spec.observation_space.flat_dim\n obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)\n\n policy = DeterministicMLPPolicy(env_spec=env_spec,\n hidden_nonlinearity=None,\n hidden_sizes=hidden_sizes,\n hidden_w_init=nn.init.ones_,\n output_w_init=nn.init.ones_)\n\n output1 = policy.get_actions(obs)[0]\n\n p = pickle.dumps(policy)\n policy_pickled = pickle.loads(p)\n output2 = policy_pickled.get_actions(obs)[0]\n assert np.array_equal(output1, output2)\n" ]
[ [ "numpy.zeros" ], [ "tensorflow.compat.v1.global_variables_initializer" ], [ "numpy.prod", "torch.ones", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lorentzenchr/scipy
[ "393a05ee927883ad6316b7092c851afea8f16816", "393a05ee927883ad6316b7092c851afea8f16816", "393a05ee927883ad6316b7092c851afea8f16816", "393a05ee927883ad6316b7092c851afea8f16816" ]
[ "scipy/signal/tests/test_savitzky_golay.py", "scipy/sparse/tests/test_array_api.py", "scipy/sparse/linalg/setup.py", "scipy/optimize/_tstutils.py" ]
[ "import numpy as np\nfrom numpy.testing import (assert_allclose, assert_equal,\n assert_almost_equal, assert_array_equal,\n assert_array_almost_equal)\n\nfrom scipy.ndimage import convolve1d\n\nfrom scipy.signal import savgol_coeffs, savgol_filter\nfrom scipy.signal._savitzky_golay import _polyder\n\n\ndef check_polyder(p, m, expected):\n dp = _polyder(p, m)\n assert_array_equal(dp, expected)\n\n\ndef test_polyder():\n cases = [\n ([5], 0, [5]),\n ([5], 1, [0]),\n ([3, 2, 1], 0, [3, 2, 1]),\n ([3, 2, 1], 1, [6, 2]),\n ([3, 2, 1], 2, [6]),\n ([3, 2, 1], 3, [0]),\n ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),\n ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),\n ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),\n ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),\n ]\n for p, m, expected in cases:\n check_polyder(np.array(p).T, m, np.array(expected).T)\n\n\n#--------------------------------------------------------------------\n# savgol_coeffs tests\n#--------------------------------------------------------------------\n\ndef alt_sg_coeffs(window_length, polyorder, pos):\n \"\"\"This is an alternative implementation of the SG coefficients.\n\n It uses numpy.polyfit and numpy.polyval. The results should be\n equivalent to those of savgol_coeffs(), but this implementation\n is slower.\n\n window_length should be odd.\n\n \"\"\"\n if pos is None:\n pos = window_length // 2\n t = np.arange(window_length)\n unit = (t == pos).astype(int)\n h = np.polyval(np.polyfit(t, unit, polyorder), t)\n return h\n\n\ndef test_sg_coeffs_trivial():\n # Test a trivial case of savgol_coeffs: polyorder = window_length - 1\n h = savgol_coeffs(1, 0)\n assert_allclose(h, [1])\n\n h = savgol_coeffs(3, 2)\n assert_allclose(h, [0, 1, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4)\n assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4, pos=1)\n assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4, pos=1, use='dot')\n assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)\n\n\ndef compare_coeffs_to_alt(window_length, order):\n # For the given window_length and order, compare the results\n # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.\n # Also include pos=None.\n for pos in [None] + list(range(window_length)):\n h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')\n h2 = alt_sg_coeffs(window_length, order, pos=pos)\n assert_allclose(h1, h2, atol=1e-10,\n err_msg=(\"window_length = %d, order = %d, pos = %s\" %\n (window_length, order, pos)))\n\n\ndef test_sg_coeffs_compare():\n # Compare savgol_coeffs() to alt_sg_coeffs().\n for window_length in range(1, 8, 2):\n for order in range(window_length):\n compare_coeffs_to_alt(window_length, order)\n\n\ndef test_sg_coeffs_exact():\n polyorder = 4\n window_length = 9\n halflen = window_length // 2\n\n x = np.linspace(0, 21, 43)\n delta = x[1] - x[0]\n\n # The data is a cubic polynomial. We'll use an order 4\n # SG filter, so the filtered values should equal the input data\n # (except within half window_length of the edges).\n y = 0.5 * x ** 3 - x\n h = savgol_coeffs(window_length, polyorder)\n y0 = convolve1d(y, h)\n assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])\n\n # Check the same input, but use deriv=1. dy is the exact result.\n dy = 1.5 * x ** 2 - 1\n h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)\n y1 = convolve1d(y, h)\n assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])\n\n # Check the same input, but use deriv=2. d2y is the exact result.\n d2y = 3.0 * x\n h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)\n y2 = convolve1d(y, h)\n assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])\n\n\ndef test_sg_coeffs_deriv():\n # The data in `x` is a sampled parabola, so using savgol_coeffs with an\n # order 2 or higher polynomial should give exact results.\n i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])\n x = i ** 2 / 4\n dx = i / 2\n d2x = np.full_like(i, 0.5)\n for pos in range(x.size):\n coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')\n assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)\n coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)\n assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)\n coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)\n assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)\n\n\ndef test_sg_coeffs_deriv_gt_polyorder():\n \"\"\"\n If deriv > polyorder, the coefficients should be all 0.\n This is a regression test for a bug where, e.g.,\n savgol_coeffs(5, polyorder=1, deriv=2)\n raised an error.\n \"\"\"\n coeffs = savgol_coeffs(5, polyorder=1, deriv=2)\n assert_array_equal(coeffs, np.zeros(5))\n coeffs = savgol_coeffs(7, polyorder=4, deriv=6)\n assert_array_equal(coeffs, np.zeros(7))\n\n\ndef test_sg_coeffs_large():\n # Test that for large values of window_length and polyorder the array of\n # coefficients returned is symmetric. The aim is to ensure that\n # no potential numeric overflow occurs.\n coeffs0 = savgol_coeffs(31, 9)\n assert_array_almost_equal(coeffs0, coeffs0[::-1])\n coeffs1 = savgol_coeffs(31, 9, deriv=1)\n assert_array_almost_equal(coeffs1, -coeffs1[::-1])\n\n# --------------------------------------------------------------------\n# savgol_coeffs tests for even window length\n# --------------------------------------------------------------------\n\n\ndef test_sg_coeffs_even_window_length():\n # Simple case - deriv=0, polyorder=0, 1\n window_lengths = [4, 6, 8, 10, 12, 14, 16]\n for length in window_lengths:\n h_p_d = savgol_coeffs(length, 0, 0)\n assert_allclose(h_p_d, 1/length)\n\n # Verify with closed forms\n # deriv=1, polyorder=1, 2\n def h_p_d_closed_form_1(k, m):\n return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))\n\n # deriv=2, polyorder=2\n def h_p_d_closed_form_2(k, m):\n numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)\n denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)\n return numer/denom\n\n for length in window_lengths:\n m = length//2\n expected_output = [h_p_d_closed_form_1(k, m)\n for k in range(-m + 1, m + 1)][::-1]\n actual_output = savgol_coeffs(length, 1, 1)\n assert_allclose(expected_output, actual_output)\n actual_output = savgol_coeffs(length, 2, 1)\n assert_allclose(expected_output, actual_output)\n\n expected_output = [h_p_d_closed_form_2(k, m)\n for k in range(-m + 1, m + 1)][::-1]\n actual_output = savgol_coeffs(length, 2, 2)\n assert_allclose(expected_output, actual_output)\n actual_output = savgol_coeffs(length, 3, 2)\n assert_allclose(expected_output, actual_output)\n\n#--------------------------------------------------------------------\n# savgol_filter tests\n#--------------------------------------------------------------------\n\n\ndef test_sg_filter_trivial():\n \"\"\" Test some trivial edge cases for savgol_filter().\"\"\"\n x = np.array([1.0])\n y = savgol_filter(x, 1, 0)\n assert_equal(y, [1.0])\n\n # Input is a single value. With a window length of 3 and polyorder 1,\n # the value in y is from the straight-line fit of (-1,0), (0,3) and\n # (1, 0) at 0. This is just the average of the three values, hence 1.0.\n x = np.array([3.0])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_almost_equal(y, [1.0], decimal=15)\n\n x = np.array([3.0])\n y = savgol_filter(x, 3, 1, mode='nearest')\n assert_almost_equal(y, [3.0], decimal=15)\n\n x = np.array([1.0] * 3)\n y = savgol_filter(x, 3, 1, mode='wrap')\n assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)\n\n\ndef test_sg_filter_basic():\n # Some basic test cases for savgol_filter().\n x = np.array([1.0, 2.0, 1.0])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_allclose(y, [1.0, 4.0 / 3, 1.0])\n\n y = savgol_filter(x, 3, 1, mode='mirror')\n assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])\n\n y = savgol_filter(x, 3, 1, mode='wrap')\n assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])\n\n\ndef test_sg_filter_2d():\n x = np.array([[1.0, 2.0, 1.0],\n [2.0, 4.0, 2.0]])\n expected = np.array([[1.0, 4.0 / 3, 1.0],\n [2.0, 8.0 / 3, 2.0]])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_allclose(y, expected)\n\n y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)\n assert_allclose(y, expected.T)\n\n\ndef test_sg_filter_interp_edges():\n # Another test with low degree polynomial data, for which we can easily\n # give the exact results. In this test, we use mode='interp', so\n # savgol_filter should match the exact solution for the entire data set,\n # including the edges.\n t = np.linspace(-5, 5, 21)\n delta = t[1] - t[0]\n # Polynomial test data.\n x = np.array([t,\n 3 * t ** 2,\n t ** 3 - t])\n dx = np.array([np.ones_like(t),\n 6 * t,\n 3 * t ** 2 - 1.0])\n d2x = np.array([np.zeros_like(t),\n np.full_like(t, 6),\n 6 * t])\n\n window_length = 7\n\n y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')\n assert_allclose(y, x, atol=1e-12)\n\n y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',\n deriv=1, delta=delta)\n assert_allclose(y1, dx, atol=1e-12)\n\n y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',\n deriv=2, delta=delta)\n assert_allclose(y2, d2x, atol=1e-12)\n\n # Transpose everything, and test again with axis=0.\n\n x = x.T\n dx = dx.T\n d2x = d2x.T\n\n y = savgol_filter(x, window_length, 3, axis=0, mode='interp')\n assert_allclose(y, x, atol=1e-12)\n\n y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',\n deriv=1, delta=delta)\n assert_allclose(y1, dx, atol=1e-12)\n\n y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',\n deriv=2, delta=delta)\n assert_allclose(y2, d2x, atol=1e-12)\n\n\ndef test_sg_filter_interp_edges_3d():\n # Test mode='interp' with a 3-D array.\n t = np.linspace(-5, 5, 21)\n delta = t[1] - t[0]\n x1 = np.array([t, -t])\n x2 = np.array([t ** 2, 3 * t ** 2 + 5])\n x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])\n dx1 = np.array([np.ones_like(t), -np.ones_like(t)])\n dx2 = np.array([2 * t, 6 * t])\n dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])\n\n # z has shape (3, 2, 21)\n z = np.array([x1, x2, x3])\n dz = np.array([dx1, dx2, dx3])\n\n y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n\n # z has shape (3, 21, 2)\n z = np.array([x1.T, x2.T, x3.T])\n dz = np.array([dx1.T, dx2.T, dx3.T])\n\n y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n\n # z has shape (21, 3, 2)\n z = z.swapaxes(0, 1).copy()\n dz = dz.swapaxes(0, 1).copy()\n\n y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n", "import pytest\nimport numpy as np\nimport numpy.testing as npt\nimport scipy.sparse\nimport scipy.sparse.linalg as spla\n\nsparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil')\n\nsparray_classes = [\n getattr(scipy.sparse, f'{T}_array') for T in sparray_types\n]\n\nA = np.array([\n [0, 1, 2, 0],\n [2, 0, 0, 3],\n [1, 4, 0, 0]\n])\n\nB = np.array([\n [0, 1],\n [2, 0]\n])\n\nX = np.array([\n [1, 0, 0, 1],\n [2, 1, 2, 0],\n [0, 2, 1, 0],\n [0, 0, 1, 2]\n], dtype=float)\n\n\nsparrays = [sparray(A) for sparray in sparray_classes]\nsquare_sparrays = [sparray(B) for sparray in sparray_classes]\neig_sparrays = [sparray(X) for sparray in sparray_classes]\n\nparametrize_sparrays = pytest.mark.parametrize(\n \"A\", sparrays, ids=sparray_types\n)\nparametrize_square_sparrays = pytest.mark.parametrize(\n \"B\", square_sparrays, ids=sparray_types\n)\nparametrize_eig_sparrays = pytest.mark.parametrize(\n \"X\", eig_sparrays, ids=sparray_types\n)\n\n\n@parametrize_sparrays\ndef test_sum(A):\n assert not isinstance(A.sum(axis=0), np.matrix), \\\n \"Expected array, got matrix\"\n assert A.sum(axis=0).shape == (4,)\n assert A.sum(axis=1).shape == (3,)\n\n\n@parametrize_sparrays\ndef test_mean(A):\n assert not isinstance(A.mean(axis=1), np.matrix), \\\n \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_todense(A):\n assert not isinstance(A.todense(), np.matrix), \\\n \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_indexing(A):\n if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'):\n return\n\n with pytest.raises(NotImplementedError):\n A[1, :]\n\n with pytest.raises(NotImplementedError):\n A[:, 1]\n\n with pytest.raises(NotImplementedError):\n A[1, [1, 2]]\n\n with pytest.raises(NotImplementedError):\n A[[1, 2], 1]\n\n assert A[[0]]._is_array, \"Expected sparse array, got sparse matrix\"\n assert A[1, [[1, 2]]]._is_array, \"Expected ndarray, got sparse array\"\n assert A[[[1, 2]], 1]._is_array, \"Expected ndarray, got sparse array\"\n assert A[:, [1, 2]]._is_array, \"Expected sparse array, got something else\"\n\n\n@parametrize_sparrays\ndef test_dense_addition(A):\n X = np.random.random(A.shape)\n assert not isinstance(A + X, np.matrix), \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_sparse_addition(A):\n assert (A + A)._is_array, \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_elementwise_mul(A):\n assert np.all((A * A).todense() == A.power(2).todense())\n\n\n@parametrize_sparrays\ndef test_matmul(A):\n assert np.all((A @ A.T).todense() == A.dot(A.T).todense())\n\n\n@parametrize_square_sparrays\ndef test_pow(B):\n assert (B**0)._is_array, \"Expected array, got matrix\"\n assert (B**2)._is_array, \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_sparse_divide(A):\n assert isinstance(A / A, np.ndarray)\n\n\n@parametrize_sparrays\ndef test_dense_divide(A):\n assert (A / 2)._is_array, \"Expected array, got matrix\"\n\n\n@parametrize_sparrays\ndef test_no_A_attr(A):\n with pytest.warns(np.VisibleDeprecationWarning):\n A.A\n\n\n@parametrize_sparrays\ndef test_no_H_attr(A):\n with pytest.warns(np.VisibleDeprecationWarning):\n A.H\n\n\n@parametrize_sparrays\ndef test_getrow_getcol(A):\n assert A.getcol(0)._is_array\n assert A.getrow(0)._is_array\n\n\n@parametrize_sparrays\ndef test_docstr(A):\n if A.__doc__ is None:\n return\n\n docstr = A.__doc__.lower()\n for phrase in ('matrix', 'matrices'):\n assert phrase not in docstr\n\n\n# -- linalg --\n\n@parametrize_sparrays\ndef test_as_linearoperator(A):\n L = spla.aslinearoperator(A)\n npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4])\n\n\n@parametrize_square_sparrays\ndef test_inv(B):\n if B.__class__.__name__[:3] != 'csc':\n return\n\n C = spla.inv(B)\n\n assert C._is_array\n npt.assert_allclose(C.todense(), np.linalg.inv(B.todense()))\n\n\n@parametrize_square_sparrays\ndef test_expm(B):\n if B.__class__.__name__[:3] != 'csc':\n return\n\n Bmat = scipy.sparse.csc_matrix(B)\n\n C = spla.expm(B)\n\n assert C._is_array\n npt.assert_allclose(\n C.todense(),\n spla.expm(Bmat).todense()\n )\n\n\n@parametrize_square_sparrays\ndef test_expm_multiply(B):\n if B.__class__.__name__[:3] != 'csc':\n return\n\n npt.assert_allclose(\n spla.expm_multiply(B, np.array([1, 2])),\n spla.expm(B) @ [1, 2]\n )\n\n\n@parametrize_sparrays\ndef test_norm(A):\n C = spla.norm(A)\n npt.assert_allclose(C, np.linalg.norm(A.todense()))\n\n\n@parametrize_square_sparrays\ndef test_onenormest(B):\n C = spla.onenormest(B)\n npt.assert_allclose(C, np.linalg.norm(B.todense(), 1))\n\n\n@parametrize_square_sparrays\ndef test_spsolve(B):\n if B.__class__.__name__[:3] not in ('csc', 'csr'):\n return\n\n npt.assert_allclose(\n spla.spsolve(B, [1, 2]),\n np.linalg.solve(B.todense(), [1, 2])\n )\n\n\ndef test_spsolve_triangular():\n X = scipy.sparse.csr_array([\n [1, 0, 0, 0],\n [2, 1, 0, 0],\n [3, 2, 1, 0],\n [4, 3, 2, 1],\n ])\n spla.spsolve_triangular(X, [1, 2, 3, 4])\n\n\n@parametrize_square_sparrays\ndef test_factorized(B):\n if B.__class__.__name__[:3] != 'csc':\n return\n\n LU = spla.factorized(B)\n npt.assert_allclose(\n LU(np.array([1, 2])),\n np.linalg.solve(B.todense(), [1, 2])\n )\n\n\n@parametrize_square_sparrays\[email protected](\n \"solver\",\n [\"bicg\", \"bicgstab\", \"cg\", \"cgs\", \"gmres\", \"lgmres\", \"minres\", \"qmr\",\n \"gcrotmk\", \"tfqmr\"]\n)\ndef test_solvers(B, solver):\n if solver == \"minres\":\n kwargs = {}\n else:\n kwargs = {'atol': 1e-5}\n\n x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs)\n assert info >= 0 # no errors, even if perhaps did not converge fully\n npt.assert_allclose(x, [1, 1], atol=1e-1)\n\n\n@parametrize_sparrays\[email protected](\n \"solver\",\n [\"lsqr\", \"lsmr\"]\n)\ndef test_lstsqr(A, solver):\n x, *_ = getattr(spla, solver)(A, [1, 2, 3])\n npt.assert_allclose(A @ x, [1, 2, 3])\n\n\n@parametrize_eig_sparrays\ndef test_eigs(X):\n e, v = spla.eigs(X, k=1)\n npt.assert_allclose(\n X @ v,\n e[0] * v\n )\n\n\n@parametrize_eig_sparrays\ndef test_eigsh(X):\n X = X + X.T\n e, v = spla.eigsh(X, k=1)\n npt.assert_allclose(\n X @ v,\n e[0] * v\n )\n\n\n@parametrize_eig_sparrays\ndef test_svds(X):\n u, s, vh = spla.svds(X, k=3)\n u2, s2, vh2 = np.linalg.svd(X.todense())\n s = np.sort(s)\n s2 = np.sort(s2[:3])\n npt.assert_allclose(s, s2, atol=1e-3)\n\n\ndef test_splu():\n X = scipy.sparse.csc_array([\n [1, 0, 0, 0],\n [2, 1, 0, 0],\n [3, 2, 1, 0],\n [4, 3, 2, 1],\n ])\n LU = spla.splu(X)\n npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])\n\n\ndef test_spilu():\n X = scipy.sparse.csc_array([\n [1, 0, 0, 0],\n [2, 1, 0, 0],\n [3, 2, 1, 0],\n [4, 3, 2, 1],\n ])\n LU = spla.spilu(X)\n npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])\n", "\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration('linalg', parent_package, top_path)\n\n config.add_subpackage('_isolve')\n config.add_subpackage('_dsolve')\n config.add_subpackage('_eigen')\n\n config.add_data_dir('tests')\n\n # PROPACK\n config.add_subpackage('_propack')\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "r\"\"\"\nParameters used in test and benchmark methods.\n\nCollections of test cases suitable for testing 1-D root-finders\n 'original': The original benchmarking functions.\n Real-valued functions of real-valued inputs on an interval\n with a zero.\n f1, .., f3 are continuous and infinitely differentiable\n f4 has a left- and right- discontinuity at the root\n f5 has a root at 1 replacing a 1st order pole\n f6 is randomly positive on one side of the root,\n randomly negative on the other.\n f4 - f6 are not continuous at the root.\n\n 'aps': The test problems in the 1995 paper\n TOMS \"Algorithm 748: Enclosing Zeros of Continuous Functions\"\n by Alefeld, Potra and Shi. Real-valued functions of\n real-valued inputs on an interval with a zero.\n Suitable for methods which start with an enclosing interval, and\n derivatives up to 2nd order.\n\n 'complex': Some complex-valued functions of complex-valued inputs.\n No enclosing bracket is provided.\n Suitable for methods which use one or more starting values, and\n derivatives up to 2nd order.\n\n The test cases are provided as a list of dictionaries. The dictionary\n keys will be a subset of:\n [\"f\", \"fprime\", \"fprime2\", \"args\", \"bracket\", \"smoothness\",\n \"a\", \"b\", \"x0\", \"x1\", \"root\", \"ID\"]\n\"\"\"\n\n# Sources:\n# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun,\n# \"Algorithm 748: Enclosing Zeros of Continuous Functions\",\n# ACM Trans. Math. Softw. Volume 221(1995)\n# doi = {10.1145/210089.210111},\n\nfrom random import random\n\nimport numpy as np\n\nfrom scipy.optimize import _zeros_py as cc\n\n# \"description\" refers to the original functions\ndescription = \"\"\"\nf2 is a symmetric parabola, x**2 - 1\nf3 is a quartic polynomial with large hump in interval\nf4 is step function with a discontinuity at 1\nf5 is a hyperbola with vertical asymptote at 1\nf6 has random values positive to left of 1, negative to right\n\nOf course, these are not real problems. They just test how the\n'good' solvers behave in bad circumstances where bisection is\nreally the best. A good solver should not be much worse than\nbisection in such circumstance, while being faster for smooth\nmonotone sorts of functions.\n\"\"\"\n\n\ndef f1(x):\n r\"\"\"f1 is a quadratic with roots at 0 and 1\"\"\"\n return x * (x - 1.)\n\n\ndef f1_fp(x):\n return 2 * x - 1\n\n\ndef f1_fpp(x):\n return 2\n\n\ndef f2(x):\n r\"\"\"f2 is a symmetric parabola, x**2 - 1\"\"\"\n return x**2 - 1\n\n\ndef f2_fp(x):\n return 2 * x\n\n\ndef f2_fpp(x):\n return 2\n\n\ndef f3(x):\n r\"\"\"A quartic with roots at 0, 1, 2 and 3\"\"\"\n return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x\n\n\ndef f3_fp(x):\n return 4 * x**3 - 18 * x**2 + 22 * x - 6\n\n\ndef f3_fpp(x):\n return 12 * x**2 - 36 * x + 22\n\n\ndef f4(x):\n r\"\"\"Piecewise linear, left- and right- discontinuous at x=1, the root.\"\"\"\n if x > 1:\n return 1.0 + .1 * x\n if x < 1:\n return -1.0 + .1 * x\n return 0\n\n\ndef f5(x):\n r\"\"\"Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root.\"\"\"\n if x != 1:\n return 1.0 / (1. - x)\n return 0\n\n\n# f6(x) returns random value. Without memoization, calling twice with the\n# same x returns different values, hence a \"random value\", not a\n# \"function with random values\"\n_f6_cache = {}\ndef f6(x):\n v = _f6_cache.get(x, None)\n if v is None:\n if x > 1:\n v = random()\n elif x < 1:\n v = -random()\n else:\n v = 0\n _f6_cache[x] = v\n return v\n\n\n# Each Original test case has\n# - a function and its two derivatives,\n# - additional arguments,\n# - a bracket enclosing a root,\n# - the order of differentiability (smoothness) on this interval\n# - a starting value for methods which don't require a bracket\n# - the root (inside the bracket)\n# - an Identifier of the test case\n\n_ORIGINAL_TESTS_KEYS = [\"f\", \"fprime\", \"fprime2\", \"args\", \"bracket\", \"smoothness\", \"x0\", \"root\", \"ID\"]\n_ORIGINAL_TESTS = [\n [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, \"original.01.00\"],\n [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, \"original.02.00\"],\n [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, \"original.03.00\"],\n [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, \"original.04.00\"],\n [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, \"original.05.00\"],\n [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, \"original.05.00\"]\n]\n\n_ORIGINAL_TESTS_DICTS = [dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS]\n\n# ##################\n# \"APS\" test cases\n# Functions and test cases that appear in [1]\n\n\ndef aps01_f(x):\n r\"\"\"Straightforward sum of trigonometric function and polynomial\"\"\"\n return np.sin(x) - x / 2\n\n\ndef aps01_fp(x):\n return np.cos(x) - 1.0 / 2\n\n\ndef aps01_fpp(x):\n return -np.sin(x)\n\n\ndef aps02_f(x):\n r\"\"\"poles at x=n**2, 1st and 2nd derivatives at root are also close to 0\"\"\"\n ii = np.arange(1, 21)\n return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3)\n\n\ndef aps02_fp(x):\n ii = np.arange(1, 21)\n return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4)\n\n\ndef aps02_fpp(x):\n ii = np.arange(1, 21)\n return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5)\n\n\ndef aps03_f(x, a, b):\n r\"\"\"Rapidly changing at the root\"\"\"\n return a * x * np.exp(b * x)\n\n\ndef aps03_fp(x, a, b):\n return a * (b * x + 1) * np.exp(b * x)\n\n\ndef aps03_fpp(x, a, b):\n return a * (b * (b * x + 1) + b) * np.exp(b * x)\n\n\ndef aps04_f(x, n, a):\n r\"\"\"Medium-degree polynomial\"\"\"\n return x**n - a\n\n\ndef aps04_fp(x, n, a):\n return n * x**(n - 1)\n\n\ndef aps04_fpp(x, n, a):\n return n * (n - 1) * x**(n - 2)\n\n\ndef aps05_f(x):\n r\"\"\"Simple Trigonometric function\"\"\"\n return np.sin(x) - 1.0 / 2\n\n\ndef aps05_fp(x):\n return np.cos(x)\n\n\ndef aps05_fpp(x):\n return -np.sin(x)\n\n\ndef aps06_f(x, n):\n r\"\"\"Exponential rapidly changing from -1 to 1 at x=0\"\"\"\n return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1\n\n\ndef aps06_fp(x, n):\n return 2 * np.exp(-n) + 2 * n * np.exp(-n * x)\n\n\ndef aps06_fpp(x, n):\n return -2 * n * n * np.exp(-n * x)\n\n\ndef aps07_f(x, n):\n r\"\"\"Upside down parabola with parametrizable height\"\"\"\n return (1 + (1 - n)**2) * x - (1 - n * x)**2\n\n\ndef aps07_fp(x, n):\n return (1 + (1 - n)**2) + 2 * n * (1 - n * x)\n\n\ndef aps07_fpp(x, n):\n return -2 * n * n\n\n\ndef aps08_f(x, n):\n r\"\"\"Degree n polynomial\"\"\"\n return x * x - (1 - x)**n\n\n\ndef aps08_fp(x, n):\n return 2 * x + n * (1 - x)**(n - 1)\n\n\ndef aps08_fpp(x, n):\n return 2 - n * (n - 1) * (1 - x)**(n - 2)\n\n\ndef aps09_f(x, n):\n r\"\"\"Upside down quartic with parametrizable height\"\"\"\n return (1 + (1 - n)**4) * x - (1 - n * x)**4\n\n\ndef aps09_fp(x, n):\n return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3\n\n\ndef aps09_fpp(x, n):\n return -12 * n * (1 - n * x)**2\n\n\ndef aps10_f(x, n):\n r\"\"\"Exponential plus a polynomial\"\"\"\n return np.exp(-n * x) * (x - 1) + x**n\n\n\ndef aps10_fp(x, n):\n return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1)\n\n\ndef aps10_fpp(x, n):\n return np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + n * (n - 1) * x**(n - 2)\n\n\ndef aps11_f(x, n):\n r\"\"\"Rational function with a zero at x=1/n and a pole at x=0\"\"\"\n return (n * x - 1) / ((n - 1) * x)\n\n\ndef aps11_fp(x, n):\n return 1 / (n - 1) / x**2\n\n\ndef aps11_fpp(x, n):\n return -2 / (n - 1) / x**3\n\n\ndef aps12_f(x, n):\n r\"\"\"nth root of x, with a zero at x=n\"\"\"\n return np.power(x, 1.0 / n) - np.power(n, 1.0 / n)\n\n\ndef aps12_fp(x, n):\n return np.power(x, (1.0 - n) / n) / n\n\n\ndef aps12_fpp(x, n):\n return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n\n\n\n_MAX_EXPABLE = np.log(np.finfo(float).max)\n\n\ndef aps13_f(x):\n r\"\"\"Function with *all* derivatives 0 at the root\"\"\"\n if x == 0:\n return 0\n # x2 = 1.0/x**2\n # if x2 > 708:\n # return 0\n y = 1 / x**2\n if y > _MAX_EXPABLE:\n return 0\n return x / np.exp(y)\n\n\ndef aps13_fp(x):\n if x == 0:\n return 0\n y = 1 / x**2\n if y > _MAX_EXPABLE:\n return 0\n return (1 + 2 / x**2) / np.exp(y)\n\n\ndef aps13_fpp(x):\n if x == 0:\n return 0\n y = 1 / x**2\n if y > _MAX_EXPABLE:\n return 0\n return 2 * (2 - x**2) / x**5 / np.exp(y)\n\n\ndef aps14_f(x, n):\n r\"\"\"0 for negative x-values, trigonometric+linear for x positive\"\"\"\n if x <= 0:\n return -n / 20.0\n return n / 20.0 * (x / 1.5 + np.sin(x) - 1)\n\n\ndef aps14_fp(x, n):\n if x <= 0:\n return 0\n return n / 20.0 * (1.0 / 1.5 + np.cos(x))\n\n\ndef aps14_fpp(x, n):\n if x <= 0:\n return 0\n return -n / 20.0 * (np.sin(x))\n\n\ndef aps15_f(x, n):\n r\"\"\"piecewise linear, constant outside of [0, 0.002/(1+n)]\"\"\"\n if x < 0:\n return -0.859\n if x > 2 * 1e-3 / (1 + n):\n return np.e - 1.859\n return np.exp((n + 1) * x / 2 * 1000) - 1.859\n\n\ndef aps15_fp(x, n):\n if not 0 <= x <= 2 * 1e-3 / (1 + n):\n return np.e - 1.859\n return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000\n\n\ndef aps15_fpp(x, n):\n if not 0 <= x <= 2 * 1e-3 / (1 + n):\n return np.e - 1.859\n return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000\n\n\n# Each APS test case has\n# - a function and its two derivatives,\n# - additional arguments,\n# - a bracket enclosing a root,\n# - the order of differentiability of the the function on this interval\n# - a starting value for methods which don't require a bracket\n# - the root (inside the bracket)\n# - an Identifier of the test case\n#\n# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided\n# in [1] for each test case. Newton and Halley methods need a single\n# starting point x0, which was chosen to be near the middle of the interval,\n# unless that would have made the problem too easy.\n\n_APS_TESTS_KEYS = [\"f\", \"fprime\", \"fprime2\", \"args\", \"bracket\", \"smoothness\", \"x0\", \"root\", \"ID\"]\n_APS_TESTS = [\n [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, 3, 1.89549426703398094e+00, \"aps.01.00\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, 2, 3.02291534727305677e+00, \"aps.02.00\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, 5, 6.68375356080807848e+00, \"aps.02.01\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, 10, 1.12387016550022114e+01, \"aps.02.02\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, 17, 1.96760000806234103e+01, \"aps.02.03\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, 26, 2.98282273265047557e+01, \"aps.02.04\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, 37, 4.19061161952894139e+01, \"aps.02.05\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, 50, 5.59535958001430913e+01, \"aps.02.06\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, 65, 7.19856655865877997e+01, \"aps.02.07\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, 82, 9.00088685391666701e+01, \"aps.02.08\"],\n [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, 101, 1.10026532748330197e+02, \"aps.02.09\"],\n [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, -2, 0, \"aps.03.00\"],\n [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, -2, 0, \"aps.03.01\"],\n [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, -2, 0, \"aps.03.02\"],\n [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, 2.5, 6.68740304976422006e-01, \"aps.04.00\"],\n [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, 2.5, 7.64724491331730039e-01, \"aps.04.01\"],\n [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, 2.5, 8.17765433957942545e-01, \"aps.04.02\"],\n [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, 2.5, 8.51339922520784609e-01, \"aps.04.03\"],\n [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, 2.5, 8.74485272221167897e-01, \"aps.04.04\"],\n [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, 2.5, 1, \"aps.04.05\"],\n [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, 2.5, 1, \"aps.04.06\"],\n [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, 2.5, 1, \"aps.04.07\"],\n [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, 2.5, 1, \"aps.04.08\"],\n [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, 2.5, 1, \"aps.04.09\"],\n [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, 1.5, 1, \"aps.04.10\"],\n [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, 1.5, 1, \"aps.04.11\"],\n [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, 1.5, 1, \"aps.04.12\"],\n [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, 1.5, 1, \"aps.04.13\"],\n [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, 1.3, np.pi / 6, \"aps.05.00\"],\n [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, 0.5, 4.22477709641236709e-01, \"aps.06.00\"],\n [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, 0.5, 3.06699410483203705e-01, \"aps.06.01\"],\n [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, 0.5, 2.23705457654662959e-01, \"aps.06.02\"],\n [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, 0.5, 1.71719147519508369e-01, \"aps.06.03\"],\n [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, 0.4, 1.38257155056824066e-01, \"aps.06.04\"],\n [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, 0.1, 3.46573590208538521e-02, \"aps.06.05\"],\n [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, 5e-02, 1.73286795139986315e-02, \"aps.06.06\"],\n [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, 1.0 / 30, 1.15524530093324210e-02, \"aps.06.07\"],\n [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, 2.5e-02, 8.66433975699931573e-03, \"aps.06.08\"],\n [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, 2e-02, 6.93147180559945415e-03, \"aps.06.09\"],\n [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, 0.4, 3.84025518406218985e-02, \"aps.07.00\"],\n [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, 0.4, 9.90000999800049949e-03, \"aps.07.01\"],\n [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, 0.4, 2.49375003906201174e-03, \"aps.07.02\"],\n [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, 0.9, 0.5, \"aps.08.00\"],\n [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, 0.9, 3.45954815848242059e-01, \"aps.08.01\"],\n [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, 0.9, 2.45122333753307220e-01, \"aps.08.02\"],\n [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, 0.9, 1.95547623536565629e-01, \"aps.08.03\"],\n [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, 0.9, 1.64920957276440960e-01, \"aps.08.04\"],\n [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, 0.5, 2.75508040999484394e-01, \"aps.09.00\"],\n [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, 0.5, 1.37754020499742197e-01, \"aps.09.01\"],\n [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, 0.5, 1.03052837781564422e-02, \"aps.09.02\"],\n [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, 0.5, 3.61710817890406339e-03, \"aps.09.03\"],\n [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, 0.5, 4.10872918496395375e-04, \"aps.09.04\"],\n [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, 0.5, 2.59895758929076292e-05, \"aps.09.05\"],\n [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, 0.5, 7.66859512218533719e-06, \"aps.09.06\"],\n [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, 0.9, 4.01058137541547011e-01, \"aps.10.00\"],\n [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, 0.9, 5.16153518757933583e-01, \"aps.10.01\"],\n [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, 0.9, 5.39522226908415781e-01, \"aps.10.02\"],\n [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, 0.9, 5.48182294340655241e-01, \"aps.10.03\"],\n [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, 0.9, 5.52704666678487833e-01, \"aps.10.04\"],\n [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, 1e-02, 1.0 / 2, \"aps.11.00\"],\n [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, 1e-02, 1.0 / 5, \"aps.11.01\"],\n [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, 1e-02, 1.0 / 15, \"aps.11.02\"],\n [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, 1e-02, 1.0 / 20, \"aps.11.03\"],\n [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, 1.1, 2, \"aps.12.00\"],\n [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, 1.1, 3, \"aps.12.01\"],\n [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, 1.1, 4, \"aps.12.02\"],\n [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, 1.1, 5, \"aps.12.03\"],\n [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, 1.1, 6, \"aps.12.04\"],\n [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, 1.1, 7, \"aps.12.05\"],\n [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, 1.1, 9, \"aps.12.06\"],\n [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, 1.1, 11, \"aps.12.07\"],\n [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, 1.1, 13, \"aps.12.08\"],\n [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, 1.1, 15, \"aps.12.09\"],\n [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, 1.1, 17, \"aps.12.10\"],\n [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, 1.1, 19, \"aps.12.11\"],\n [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, 1.1, 21, \"aps.12.12\"],\n [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, 1.1, 23, \"aps.12.13\"],\n [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, 1.1, 25, \"aps.12.14\"],\n [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, 1.1, 27, \"aps.12.15\"],\n [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, 1.1, 29, \"aps.12.16\"],\n [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, 1.1, 31, \"aps.12.17\"],\n [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, 1.1, 33, \"aps.12.18\"],\n [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, 1.5, 1.54720911915117165e-02, \"aps.13.00\"],\n [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.00\"],\n [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.01\"],\n [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.02\"],\n [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.03\"],\n [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.04\"],\n [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.05\"],\n [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.06\"],\n [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.07\"],\n [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.08\"],\n [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.09\"],\n [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.10\"],\n [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.11\"],\n [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.12\"],\n [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.13\"],\n [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.14\"],\n [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.15\"],\n [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.16\"],\n [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.17\"],\n [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.18\"],\n [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.19\"],\n [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.20\"],\n [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.21\"],\n [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.22\"],\n [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.23\"],\n [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.24\"],\n [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.25\"],\n [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.26\"],\n [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.27\"],\n [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.28\"],\n [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.29\"],\n [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.30\"],\n [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.31\"],\n [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.32\"],\n [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.33\"],\n [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.34\"],\n [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.35\"],\n [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.36\"],\n [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.37\"],\n [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.38\"],\n [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, \"aps.14.39\"],\n [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, -2, 5.90513055942197166e-05, \"aps.15.00\"],\n [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, -2, 5.63671553399369967e-05, \"aps.15.01\"],\n [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, -2, 5.39164094555919196e-05, \"aps.15.02\"],\n [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, -2, 5.16698923949422470e-05, \"aps.15.03\"],\n [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, -2, 4.96030966991445609e-05, \"aps.15.04\"],\n [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, -2, 4.76952852876389951e-05, \"aps.15.05\"],\n [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, -2, 4.59287932399486662e-05, \"aps.15.06\"],\n [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, -2, 4.42884791956647841e-05, \"aps.15.07\"],\n [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, -2, 4.27612902578832391e-05, \"aps.15.08\"],\n [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, -2, 4.13359139159538030e-05, \"aps.15.09\"],\n [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, -2, 4.00024973380198076e-05, \"aps.15.10\"],\n [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, -2, 3.87524192962066869e-05, \"aps.15.11\"],\n [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, -2, 3.75781035599579910e-05, \"aps.15.12\"],\n [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, -2, 3.64728652199592355e-05, \"aps.15.13\"],\n [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, -2, 3.54307833565318273e-05, \"aps.15.14\"],\n [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, -2, 3.44465949299614980e-05, \"aps.15.15\"],\n [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, -2, 3.35156058778003705e-05, \"aps.15.16\"],\n [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, -2, 3.26336162494372125e-05, \"aps.15.17\"],\n [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, -2, 3.17968568584260013e-05, \"aps.15.18\"],\n [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, -2, 3.10019354369653455e-05, \"aps.15.19\"],\n [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, -2, 3.02457906702100968e-05, \"aps.15.20\"],\n [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, -2, 1.22779942324615231e-05, \"aps.15.21\"],\n [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, -2, 6.16953939044086617e-06, \"aps.15.22\"],\n [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, -2, 4.11985852982928163e-06, \"aps.15.23\"],\n [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, -2, 3.09246238772721682e-06, \"aps.15.24\"],\n [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, -2, 2.47520442610501789e-06, \"aps.15.25\"],\n [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, -2, 2.06335676785127107e-06, \"aps.15.26\"],\n [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, -2, 1.76901200781542651e-06, \"aps.15.27\"],\n [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, -2, 1.54816156988591016e-06, \"aps.15.28\"],\n [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, -2, 1.37633453660223511e-06, \"aps.15.29\"],\n [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, -2, 1.23883857889971403e-06, \"aps.15.30\"]\n]\n\n_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS]\n\n\n# ##################\n# \"complex\" test cases\n# A few simple, complex-valued, functions, defined on the complex plane.\n\n\ndef cplx01_f(z, n, a):\n r\"\"\"z**n-a: Use to find the nth root of a\"\"\"\n return z**n - a\n\n\ndef cplx01_fp(z, n, a):\n return n * z**(n - 1)\n\n\ndef cplx01_fpp(z, n, a):\n return n * (n - 1) * z**(n - 2)\n\n\ndef cplx02_f(z, a):\n r\"\"\"e**z - a: Use to find the log of a\"\"\"\n return np.exp(z) - a\n\n\ndef cplx02_fp(z, a):\n return np.exp(z)\n\n\ndef cplx02_fpp(z, a):\n return np.exp(z)\n\n\n# Each \"complex\" test case has\n# - a function and its two derivatives,\n# - additional arguments,\n# - the order of differentiability of the the function on this interval\n# - two starting values x0 and x1\n# - the root\n# - an Identifier of the test case\n#\n# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided\n# in [1] for each test case. Newton and Halley need a single starting point\n# x0, which was chosen to be near the middle of the interval, unless that\n# would make the problem too easy.\n\n\n_COMPLEX_TESTS_KEYS = [\"f\", \"fprime\", \"fprime2\", \"args\", \"smoothness\", \"x0\", \"x1\", \"root\", \"ID\"]\n_COMPLEX_TESTS = [\n [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, (1 + 1j), (0.5 + 0.5j), 1j, \"complex.01.00\"],\n [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j),\n \"complex.01.01\"],\n [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j),\n \"complex.01.02\"],\n [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, 5, 4, 2, \"complex.01.03\"],\n [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, \"complex.02.00\"],\n [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, \"complex.02.01\"],\n]\n\n_COMPLEX_TESTS_DICTS = [dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS]\n\n\ndef _add_a_b(tests):\n r\"\"\"Add \"a\" and \"b\" keys to each test from the \"bracket\" value\"\"\"\n for d in tests:\n for k, v in zip(['a', 'b'], d.get('bracket', [])):\n d[k] = v\n\n\n_add_a_b(_ORIGINAL_TESTS_DICTS)\n_add_a_b(_APS_TESTS_DICTS)\n_add_a_b(_COMPLEX_TESTS_DICTS)\n\n\ndef get_tests(collection='original', smoothness=None):\n r\"\"\"Return the requested collection of test cases, as an array of dicts with subset-specific keys\n\n Allowed values of collection:\n 'original': The original benchmarking functions.\n Real-valued functions of real-valued inputs on an interval with a zero.\n f1, .., f3 are continuous and infinitely differentiable\n f4 has a single discontinuity at the root\n f5 has a root at 1 replacing a 1st order pole\n f6 is randomly positive on one side of the root, randomly negative on the other\n 'aps': The test problems in the TOMS \"Algorithm 748: Enclosing Zeros of Continuous Functions\"\n paper by Alefeld, Potra and Shi. Real-valued functions of\n real-valued inputs on an interval with a zero.\n Suitable for methods which start with an enclosing interval, and\n derivatives up to 2nd order.\n 'complex': Some complex-valued functions of complex-valued inputs.\n No enclosing bracket is provided.\n Suitable for methods which use one or more starting values, and\n derivatives up to 2nd order.\n\n The dictionary keys will be a subset of\n [\"f\", \"fprime\", \"fprime2\", \"args\", \"bracket\", \"a\", b\", \"smoothness\", \"x0\", \"x1\", \"root\", \"ID\"]\n \"\"\"\n collection = collection or \"original\"\n subsets = {\"aps\": _APS_TESTS_DICTS,\n \"complex\": _COMPLEX_TESTS_DICTS,\n \"original\": _ORIGINAL_TESTS_DICTS}\n tests = subsets.get(collection, [])\n if smoothness is not None:\n tests = [tc for tc in tests if tc['smoothness'] >= smoothness]\n return tests\n\n\n# Backwards compatibility\nmethods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq]\nmstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq']\nfunctions = [f2, f3, f4, f5, f6]\nfstrings = ['f2', 'f3', 'f4', 'f5', 'f6']\n" ]
[ [ "numpy.testing.assert_equal", "numpy.polyfit", "numpy.ones_like", "numpy.testing.assert_array_almost_equal", "numpy.linspace", "numpy.arange", "scipy.signal.savgol_coeffs", "numpy.testing.assert_array_equal", "numpy.full_like", "numpy.testing.assert_almost_equal", "numpy.zeros_like", "numpy.testing.assert_allclose", "scipy.signal._savitzky_golay._polyder", "numpy.array", "numpy.zeros", "scipy.signal.savgol_filter", "scipy.ndimage.convolve1d" ], [ "numpy.random.random", "scipy.sparse.linalg.inv", "scipy.sparse.linalg.expm", "scipy.sparse.linalg.splu", "scipy.sparse.linalg.spsolve", "scipy.sparse.linalg.onenormest", "numpy.sort", "scipy.sparse.linalg.svds", "scipy.sparse.linalg.spilu", "scipy.sparse.linalg.factorized", "scipy.sparse.linalg.norm", "scipy.sparse.linalg.aslinearoperator", "scipy.sparse.linalg.spsolve_triangular", "numpy.testing.assert_allclose", "scipy.sparse.linalg.eigs", "numpy.array", "scipy.sparse.linalg.eigsh" ], [ "numpy.distutils.misc_util.Configuration" ], [ "numpy.sqrt", "numpy.power", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.finfo", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DelphianCalamity/PrivateKube
[ "14f575e77021ab7baca30f4061140ec83bdc96a7", "14f575e77021ab7baca30f4061140ec83bdc96a7" ]
[ "evaluation/macrobenchmark/workload/models/classification.py", "examples/pipeline/dp_train/src/main.py" ]
[ "import sys, os, shutil\nimport h5py\nimport time\nimport io\nimport random\nimport tempfile\nfrom tqdm import tqdm\nfrom absl import app, flags, logging\nfrom ray.util.multiprocessing import Pool\nimport gcsfs\nimport numpy as np\nfrom pathlib import Path\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\n\n\nimport torchtext\nimport torch\n\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.nn as nn\nfrom transformers import BertTokenizer, BertModel, BertForSequenceClassification\nimport opacus\n\nfrom privatekube.experiments.datasets import (\n EventLevelDataset,\n split_review_batch,\n UserTimeLevelDataset,\n select_blocks_by_timeframe,\n)\nfrom privatekube.experiments.utils import (\n build_flags,\n flags_to_dict,\n load_yaml,\n results_to_dict,\n save_yaml,\n save_model,\n binary_accuracy,\n multiclass_accuracy,\n epoch_time,\n)\nfrom privatekube.privacy.text import build_public_vocab\nfrom privatekube.privacy.rdp import (\n compute_noise_from_target_epsilon,\n ALPHAS,\n compute_rdp_sgm,\n)\n\nimport models\n\n\nDEFAULT_DATA_PATH = Path(__file__).resolve().parent.parent.parent.joinpath(\"data\")\n\n# Define default args\ndataset_args = {\n \"n_blocks\": 200,\n \"max_text_len\": 140,\n \"vocab_size\": 10_000,\n \"n_blocks_test\": 200,\n}\n\ninput_path_args = {\n \"dataset_dir\": \"\",\n \"dataset_monofile\": \"\",\n \"block_counts\": str(DEFAULT_DATA_PATH.joinpath(\"block_counts.yaml\")),\n \"emb_path\": str(DEFAULT_DATA_PATH.joinpath(\".vector_cache\")),\n}\n\nmodel_args = {\n \"task\": \"product\",\n \"model\": \"bow\",\n \"embedding_dim\": 100,\n \"hidden_dim_1\": 240,\n \"hidden_dim_2\": 195,\n \"hidden_dim\": 100,\n \"dropout\": 0.25,\n}\n\ntraining_args = {\n \"device\": \"cuda\",\n \"learning_rate\": 0.01,\n \"dp\": 0,\n \"dp_eval\": 0,\n \"user_level\": 0,\n \"epsilon\": 5.0,\n \"delta\": 1e-5,\n \"n_epochs\": 15,\n \"batch_size\": 64,\n \"virtual_batch_multiplier\": 2,\n \"adaptive_batch_size\": 1,\n \"noise\": -1.0,\n \"timeframe_days\": 0,\n \"learning_rate_scheduler\": 1,\n \"dynamic_clipping\": 0,\n \"max_grad_norm\": 1.0,\n \"per_layer_clipping\": 0,\n \"n_workers\": 6,\n \"non_dp_batch_size\": 256,\n}\n\noutput_args = {\n \"log_path\": \"\",\n \"model_path\": \"\",\n \"metrics_path\": \"\",\n}\n\nbuild_flags(dataset_args, model_args, training_args, input_path_args, output_args)\nFLAGS = flags.FLAGS\n\n\nnp.random.seed(0)\n\n\ndef build_split_dataset():\n\n block_dir = tempfile.mkdtemp()\n test_block_dir = tempfile.mkdtemp()\n\n if FLAGS.dataset_dir[0:5] == \"gs://\":\n os.system(\n \"gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS\"\n )\n fs = gcsfs.GCSFileSystem(\n project=os.get_env(\"GCP_PROJECT\"), token=\"google_default\"\n ) # Get the local Gcloud token\n logging.info(\"Listing bucket files.\")\n all_blocks = list(\n map(\n lambda blob: os.path.basename(blob[\"name\"]),\n fs.listdir(FLAGS.dataset_dir),\n )\n )\n logging.info(f\"Got {len(all_blocks)} blocks.\")\n logging.warning(f\"The evaluation set is not fixed.\")\n elif FLAGS.dataset_dir == \"\":\n logging.info(\"Listing the block names.\")\n all_blocks = list(load_yaml(FLAGS.block_counts).keys())\n else:\n all_blocks = os.listdir(FLAGS.dataset_dir)\n\n logging.info(f\"Selecting {FLAGS.n_blocks_test} test blocks (fixed randomness).\")\n test_blocks = np.random.choice(all_blocks, FLAGS.n_blocks_test, replace=False)\n\n for tb in test_blocks:\n all_blocks.remove(tb)\n\n # Use every user to the maximum.\n def sort_by_user(block_name):\n if block_name.endswith(\".h5\"):\n block_name = block_name[: -len(\".h5\")]\n name = block_name.split(\"-\")\n user_slice = int(name[1])\n return user_slice\n\n logging.info(\n f\"Selecting as few users as possible.\\n Pseudorandom and deterministic (hashed user ids).\"\n )\n selected_blocks = sorted(all_blocks, key=sort_by_user)[0 : FLAGS.n_blocks]\n\n if FLAGS.dataset_dir[0:5] == \"gs://\":\n pool = Pool()\n\n bucket_path = FLAGS.dataset_dir\n\n def download_datasource(block_name):\n block_path = os.path.join(bucket_path, block_name)\n dest = os.path.join(block_dir, block_name)\n os.system(f\"gsutil cp {block_path} {dest}\")\n return\n\n logging.warning(\"Downloading the blocks in parallel.\")\n b = pool.map(download_datasource, selected_blocks)\n pool.close()\n pool.join()\n block_names = None\n test_block_names = None\n elif FLAGS.dataset_dir == \"\":\n block_dir = None\n test_block_dir = None\n block_names = selected_blocks\n test_block_names = test_blocks\n\n else:\n for b in selected_blocks:\n os.symlink(os.path.join(FLAGS.dataset_dir, b), os.path.join(block_dir, b))\n for b in test_blocks:\n os.symlink(\n os.path.join(FLAGS.dataset_dir, b), os.path.join(test_block_dir, b)\n )\n block_names = None\n test_block_names = None\n\n # Store for the logs\n FLAGS.dataset_dir = block_dir\n if not FLAGS.dataset_monofile:\n if FLAGS.model == \"bert\":\n from_h5 = DEFAULT_DATA_PATH.joinpath(\"reviews.h5\")\n else:\n from_h5 = DEFAULT_DATA_PATH.joinpath(\"reviews_custom_vocab.h5\")\n else:\n from_h5 = FLAGS.dataset_monofile\n\n if FLAGS.dp and FLAGS.user_level:\n train_data = UserTimeLevelDataset(\n blocks_dir=block_dir,\n timeframe=FLAGS.timeframe_days * 86400,\n from_h5=from_h5,\n block_names=block_names,\n )\n else:\n train_data = EventLevelDataset(\n blocks_dir=block_dir,\n from_h5=from_h5,\n block_names=block_names,\n )\n\n test_data = EventLevelDataset(\n blocks_dir=test_block_dir,\n from_h5=from_h5,\n block_names=test_block_names,\n )\n test_data, valid_data = test_data.split([0.75, 0.25])\n logging.info(f\"Test size: {len(test_data)}\\n Valid size: {len(valid_data)}\")\n\n # Values from the preprocessing\n # (max text len doesn't matter here)\n text_field = torchtext.data.Field(\n batch_first=True,\n use_vocab=True,\n init_token=\"<bos>\",\n eos_token=\"<eos>\",\n pad_token=\"<pad>\",\n unk_token=\"<unk>\",\n include_lengths=True,\n )\n build_public_vocab(\n text_field,\n max_size=FLAGS.vocab_size - 4,\n vectors=f\"glove.6B.{FLAGS.embedding_dim}d\",\n unk_init=torch.Tensor.normal_,\n vectors_cache=FLAGS.emb_path,\n )\n\n return train_data, test_data, valid_data, text_field\n\n\ndef compute_optimal_batch_size(real_batch_size, dataset_len):\n logging.info(\n f\"Computing the optimal batch size. Dataset {dataset_len}, real batch {real_batch_size}\"\n )\n # Under approximate\n optimal_batch_size = int(np.sqrt(dataset_len))\n if optimal_batch_size <= real_batch_size:\n return optimal_batch_size, 0\n else:\n return (real_batch_size, optimal_batch_size // real_batch_size)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef build_model(text_field):\n INPUT_DIM = len(text_field.vocab)\n word_embeddings = text_field.vocab.vectors\n PAD_IDX = text_field.vocab.stoi[text_field.pad_token]\n UNK_IDX = text_field.vocab.stoi[text_field.unk_token]\n\n if FLAGS.task == \"sentiment\":\n output_dim = 1\n elif FLAGS.task == \"product\":\n output_dim = 11\n\n if FLAGS.model == \"lstm\":\n model = models.LSTMClassifier(\n batch_size=FLAGS.batch_size,\n output_size=output_dim,\n hidden_size=FLAGS.hidden_dim,\n vocab_size=INPUT_DIM,\n embedding_length=FLAGS.embedding_dim,\n weights=word_embeddings,\n dropout=FLAGS.dropout,\n dp=FLAGS.dp,\n )\n elif FLAGS.model == \"bow\":\n model = models.NBOW(\n input_dim=word_embeddings.shape[0],\n emb_dim=FLAGS.embedding_dim,\n output_dim=output_dim,\n pad_idx=PAD_IDX,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"feedforward\":\n model = models.FeedforwardModel(\n vocab_size=INPUT_DIM,\n embedding_dim=FLAGS.embedding_dim,\n pad_idx=PAD_IDX,\n H_1=FLAGS.hidden_dim_1,\n H_2=FLAGS.hidden_dim_2,\n D_out=output_dim,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"bert\":\n # The dataset has been preprocessed with the bert tokenizer, so the indices should be correct\n logging.info(f\"Pad and unk index {PAD_IDX, UNK_IDX}\")\n model = models.FineTunedBert.build_new(output_dim=output_dim)\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n # Bert has its own pretrained embeddings\n return model\n\n pretrained_embeddings = text_field.vocab.vectors\n\n model.embedding.weight.data.copy_(pretrained_embeddings)\n model.embedding.weight.data[UNK_IDX] = torch.zeros(FLAGS.embedding_dim)\n model.embedding.weight.data[PAD_IDX] = torch.zeros(FLAGS.embedding_dim)\n\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n\n return model\n\n\ndef train(model, iterator, optimizer, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n optimizer.zero_grad()\n\n for i, batch in enumerate(tqdm(iterator)):\n\n # batch = batch.to(FLAGS.device)\n\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n # logging.info(f\"Inputs {inputs}\")\n # The model outputs loss, logits\n outputs = model(**inputs)[1]\n # logging.info(f\"Outputs {outputs}\")\n else:\n outputs = model(text)\n # logging.info(f\"Outputs {outputs}\")\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs.detach(), label)\n\n loss.backward()\n\n if FLAGS.dp and FLAGS.virtual_batch_multiplier > 1:\n # NOTE: step is not called at every minibatch, so the RDP accountant need to know this\n\n if (i + 1) % FLAGS.virtual_batch_multiplier == 0 or (i + 1) == len(\n iterator\n ):\n # For the (virtual_batch_multiplier)th batch, call a clip-noise-step\n optimizer.step()\n optimizer.zero_grad()\n else:\n # For the first (virtual_batch_multiplier - 1) batches, just accumulate the gradients\n optimizer.virtual_step()\n else:\n # Regular optimizer step (either non-DP or DP with no virtual step)\n optimizer.step()\n optimizer.zero_grad()\n\n epoch_loss += loss.item()\n # epoch_loss += loss.detach().item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef evaluate(model, iterator, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n with torch.no_grad():\n\n for batch in iterator:\n\n # batch = batch.to(FLAGS.device)\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n outputs = model(**inputs)[1]\n else:\n outputs = model(text)\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n # print(f\"Training. Outputs: {outputs}, labels: {batch.label}\")\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs, label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef train_validate(\n train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler\n):\n validation_accuracy_epochs = []\n validation_loss_epochs = []\n training_loss_epochs = []\n training_accuracy_epochs = []\n\n logging.info(f\"n workers: {FLAGS.n_workers}\")\n train_iterator = torch.utils.data.DataLoader(\n train_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=True,\n )\n\n valid_iterator = torch.utils.data.DataLoader(\n valid_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n\n criterion = criterion.to(FLAGS.device)\n\n best_valid_loss = float(\"inf\")\n\n for epoch in range(FLAGS.n_epochs):\n\n start_time = time.time()\n logging.info(f\"Starting epoch {epoch + 1}.\")\n train_loss, train_acc = train(\n model, train_iterator, optimizer, criterion, accuracy_fn\n )\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, accuracy_fn)\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), \"tut2-model.pt\")\n\n logging.info(f\"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\")\n logging.info(\n f\"\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%\"\n )\n scheduler.step(train_loss)\n logging.info(\n f\"\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%\"\n )\n\n validation_accuracy_epochs.append(valid_acc)\n validation_loss_epochs.append(valid_loss)\n training_loss_epochs.append(train_loss)\n training_accuracy_epochs.append(train_acc)\n\n return (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n )\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main(argv):\n start_time = time.time()\n\n # Convert flags for the epsilon = -1 shortcut\n if FLAGS.dp and FLAGS.epsilon < 0 and FLAGS.noise < 0:\n FLAGS.dp = False\n\n # No multiprocessing for large datasets (save RAM)\n if FLAGS.n_blocks > 50_000:\n logging.info(f\"Large dataset, we use a single thread for the loader.\")\n FLAGS.n_workers = 0\n\n # Build the dataset, either event level or user level\n train_data, test_data, valid_data, text_field = build_split_dataset()\n logging.info(\n f\"Number of samples for training: {len(train_data)}, validation: {len(valid_data)} and testing: {len(test_data)}\"\n )\n\n # Adapt the batch size and the virtual step size, unless it has been specified manually\n if FLAGS.dp and FLAGS.adaptive_batch_size and FLAGS.virtual_batch_multiplier <= 0:\n FLAGS.batch_size, FLAGS.virtual_batch_multiplier = compute_optimal_batch_size(\n FLAGS.batch_size, len(train_data)\n )\n logging.info(\n f\"Using real batch {FLAGS.batch_size} with multiplier {FLAGS.virtual_batch_multiplier}\"\n )\n if not FLAGS.dp:\n FLAGS.batch_size = FLAGS.non_dp_batch_size\n\n # Prepare the model and optimizer\n model = build_model(text_field).to(FLAGS.device)\n\n logging.info(f\"Number of trainable parameters: {count_parameters(model)}\")\n\n # optimizer = optim.Adam(model.parameters())\n optimizer = optim.AdamW(model.parameters(), lr=FLAGS.learning_rate, eps=1e-8)\n\n scheduler = ReduceLROnPlateau(optimizer, mode=\"min\", patience=3)\n\n # train_it = torch.utils.data.DataLoader(\n # train_data,\n # batch_size=2048,\n # shuffle=False,\n # num_workers=FLAGS.n_workers,\n # drop_last=False,\n # )\n # counts = {}\n # for i in range(11):\n # counts[i] = 0\n # for b in train_it:\n # for cat in b[:, 3]:\n # counts[int(cat)] += 1\n # s = sum(counts.values())\n # for cat, count in counts.items():\n # counts[cat] = count / s\n # logging.info(counts)\n\n if FLAGS.task == \"sentiment\":\n criterion = nn.BCEWithLogitsLoss().to(FLAGS.device)\n accuracy_fn = binary_accuracy\n\n # automotive: 0.03036145803296712\n # books: 0.41258122723567553\n # cds: 0.012897189083383703\n # clothing: 0.2025265712144095\n # games: 0.031613111956201506\n # groceries: 0.01949595483554337\n # home: 0.119920985593197\n # movies: 0.0484712255807162\n # pets: 0.03665525816121956\n # sports: 0.04961580907019007\n # tools: 0.035861209236496445\n\n elif FLAGS.task == \"product\":\n # criterion = nn.CrossEntropyLoss(\n # weight=torch.Tensor(\n # [0.05, 0.035, 0.03, 0.035, 0.05, 0.02, 0.12, 0.01, 0.03, 0.20, 0.41]\n # )\n # )\n criterion = nn.CrossEntropyLoss()\n accuracy_fn = multiclass_accuracy\n\n # Plug Opacus if DP training is activated\n if FLAGS.dp:\n if FLAGS.noise >= 0:\n logging.info(f\"User-provided noise: {FLAGS.noise}.\")\n else:\n logging.info(\"Computing noise for the given parameters.\")\n FLAGS.noise = compute_noise_from_target_epsilon(\n target_epsilon=FLAGS.epsilon,\n target_delta=FLAGS.delta,\n epochs=FLAGS.n_epochs,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n dataset_size=len(train_data),\n alphas=ALPHAS,\n )\n logging.info(f\"Noise computed from RDP budget: {FLAGS.noise}.\")\n\n # NOTE: when user-level DP is activated, the training dataset __len__ method returns\n # the number of users, and the DataLoader calls the batch-of-user method that overrides\n # the regular __getitem__ method\n\n # WARNING: fishy non-DP adaptive clipping\n privacy_engine = opacus.PrivacyEngine(\n module=model,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n sample_size=len(train_data),\n alphas=ALPHAS,\n noise_multiplier=FLAGS.noise,\n max_grad_norm=FLAGS.max_grad_norm,\n experimental=bool(FLAGS.dynamic_clipping),\n clipping_method=FLAGS.dynamic_clipping,\n clip_per_layer=bool(FLAGS.per_layer_clipping),\n )\n privacy_engine.attach(optimizer)\n\n # Do the actual training\n t = time.time()\n (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n ) = train_validate(\n train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler\n )\n training_time = time.time() - t\n\n if FLAGS.dp:\n epsilon_consumed, best_alpha = optimizer.privacy_engine.get_privacy_spent(\n FLAGS.delta\n )\n epsilon_consumed = float(epsilon_consumed)\n best_alpha = float(best_alpha)\n logging.info(f\"Best alpha: {best_alpha}\")\n rdp_epsilons_consumed = (\n optimizer.privacy_engine.get_renyi_divergence()\n * optimizer.privacy_engine.steps\n ).tolist()\n\n logging.info(f\"RDP budget consumed: {rdp_epsilons_consumed} for orders.\")\n\n # Identical to planned budget when we don't have early stopping\n # rdp_epsilon_planned = compute_rdp_sgm(\n # epochs=FLAGS.n_epochs,\n # batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n # if FLAGS.virtual_batch_multiplier > 0\n # else FLAGS.batch_size,\n # dataset_size=len(train_data),\n # noise=FLAGS.noise,\n # alphas=ALPHAS,\n # )\n # logging.info(f\"Planned RDP budget: {rdp_epsilon_planned}\")\n else:\n epsilon_consumed = None\n rdp_epsilons_consumed = None\n best_alpha = None\n\n # Evaluate the model (non-DP evaluation here)\n testing_size = len(test_data)\n test_iterator = torch.utils.data.DataLoader(\n test_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n final_loss, final_accuracy = evaluate(model, test_iterator, criterion, accuracy_fn)\n\n # Collect the metrics and the logs\n logs = {\n \"training_time\": training_time,\n \"total_time\": time.time() - start_time,\n \"test_size\": testing_size,\n \"n_trainable_parameters\": count_parameters(model),\n }\n\n # Update the logs with the training data\n if isinstance(train_data, UserTimeLevelDataset):\n logs[\"train_size\"] = train_data.get_n_events()\n logs[\"n_train_users\"] = len(train_data)\n else:\n logs[\"train_size\"] = len(train_data)\n\n logs.update(\n flags_to_dict(dataset_args, model_args, training_args)\n ) # Dump the configuration flags\n metrics = {\n \"accuracy\": final_accuracy,\n \"training_loss_epochs\": training_loss_epochs,\n \"training_accuracy_epochs\": training_accuracy_epochs,\n \"validation_loss_epochs\": validation_loss_epochs,\n \"validation_accuracy_epochs\": validation_accuracy_epochs,\n \"loss\": final_loss,\n \"epsilon\": epsilon_consumed,\n \"target_epsilon\": FLAGS.epsilon,\n \"alphas\": ALPHAS,\n \"rdp_epsilons\": rdp_epsilons_consumed,\n \"best_alpha\": best_alpha,\n # \"dataset_files\": os.listdir(FLAGS.dataset_dir),\n }\n\n # Save or logging.info the outputs\n # Useless to separate for our experiments\n if FLAGS.metrics_path != \"\":\n save_yaml(FLAGS.metrics_path, metrics)\n logging.info(f\"Saved metrics: {FLAGS.metrics_path}\")\n else:\n logging.info(\"Metrics not saved but concatenated to the logs.\")\n logs.update(metrics)\n\n if FLAGS.log_path != \"\":\n save_yaml(FLAGS.log_path, logs)\n logging.info(f\"Saved logs: {FLAGS.log_path}\")\n\n if FLAGS.model_path != \"\":\n save_model(FLAGS.model_path, model)\n logging.info(f\"Saved model: {FLAGS.model_path}\")\n\n logging.info(logs)\n logging.info(metrics)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n", "# Modified version of the main training script, adapted for this example.\n\n\nimport sys, os, shutil\nimport h5py\nimport time\nimport io\nimport random\nimport tempfile\nfrom tqdm import tqdm\nfrom absl import app, flags, logging\nfrom ray.util.multiprocessing import Pool\nimport gcsfs\nimport numpy as np\nfrom pathlib import Path\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\n\n\nimport torchtext\nimport torch\n\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.nn as nn\nfrom transformers import BertTokenizer, BertModel, BertForSequenceClassification\nimport opacus\n\nfrom privatekube.experiments.datasets import (\n EventLevelDataset,\n split_review_batch,\n UserTimeLevelDataset,\n select_blocks_by_timeframe,\n)\nfrom privatekube.experiments.utils import (\n build_flags,\n flags_to_dict,\n load_yaml,\n results_to_dict,\n save_yaml,\n save_model,\n binary_accuracy,\n multiclass_accuracy,\n epoch_time,\n)\nfrom privatekube.privacy.text import build_public_vocab\nfrom privatekube.privacy.rdp import (\n compute_noise_from_target_epsilon,\n ALPHAS,\n compute_rdp_sgm,\n)\n\nimport models\n\n\nDEFAULT_DATA_PATH = Path(\"/PrivateKube/evaluation/macrobenchmark/data\")\n\n# Define default args\ndataset_args = {\n \"n_blocks\": 200,\n \"max_text_len\": 140,\n \"vocab_size\": 10_000,\n \"n_blocks_test\": 200,\n}\n\ninput_path_args = {\n \"dataset_dir\": \"\",\n \"dataset_monofile\": \"\",\n \"block_counts\": str(DEFAULT_DATA_PATH.joinpath(\"block_counts.yaml\")),\n \"emb_path\": str(DEFAULT_DATA_PATH.joinpath(\".vector_cache\")),\n}\n\nmodel_args = {\n \"task\": \"product\",\n \"model\": \"bow\",\n \"embedding_dim\": 100,\n \"hidden_dim_1\": 240,\n \"hidden_dim_2\": 195,\n \"hidden_dim\": 100,\n \"dropout\": 0.25,\n}\n\ntraining_args = {\n \"device\": \"cuda\",\n \"learning_rate\": 0.01,\n \"dp\": 0,\n \"dp_eval\": 0,\n \"user_level\": 0,\n \"epsilon\": 5.0,\n \"delta\": 1e-5,\n \"n_epochs\": 15,\n \"batch_size\": 64,\n \"virtual_batch_multiplier\": 2,\n \"adaptive_batch_size\": 1,\n \"noise\": -1.0,\n \"timeframe_days\": 0,\n \"learning_rate_scheduler\": 1,\n \"dynamic_clipping\": 0,\n \"max_grad_norm\": 1.0,\n \"per_layer_clipping\": 0,\n \"n_workers\": 6,\n \"non_dp_batch_size\": 256,\n \"epsilon_fraction\": 1.0,\n \"delta_fraction\": 1.0,\n}\n\noutput_args = {\n \"log_path\": \"\",\n \"model_path\": \"\",\n \"metrics_path\": \"\",\n}\n\nbuild_flags(dataset_args, model_args, training_args, input_path_args, output_args)\nFLAGS = flags.FLAGS\n\n\nnp.random.seed(0)\n\n\ndef build_split_dataset():\n\n block_dir = tempfile.mkdtemp()\n test_block_dir = tempfile.mkdtemp()\n\n assert FLAGS.dataset_dir == \"\"\n logging.info(\"Listing the block names.\")\n all_blocks = list(load_yaml(FLAGS.block_counts).keys())\n\n # logging.info(f\"Selecting {FLAGS.n_blocks_test} test blocks (fixed randomness).\")\n # test_blocks = np.random.choice(all_blocks, FLAGS.n_blocks_test, replace=False)\n\n # for tb in test_blocks:\n # all_blocks.remove(tb)\n\n # Use every user to the maximum.\n def sort_by_user(block_name):\n if block_name.endswith(\".h5\"):\n block_name = block_name[: -len(\".h5\")]\n name = block_name.split(\"-\")\n user_slice = int(name[1])\n return user_slice\n\n logging.info(\n f\"Selecting as few users as possible.\\n Pseudorandom and deterministic (hashed user ids).\"\n )\n selected_blocks = sorted(all_blocks, key=sort_by_user)[0 : FLAGS.n_blocks]\n\n assert FLAGS.dataset_dir == \"\"\n\n block_dir = None\n block_names = selected_blocks\n\n # Store for the logs\n FLAGS.dataset_dir = block_dir\n assert FLAGS.dataset_monofile\n\n from_h5 = FLAGS.dataset_monofile\n\n if FLAGS.dp and FLAGS.user_level:\n train_data = UserTimeLevelDataset(\n blocks_dir=block_dir,\n timeframe=FLAGS.timeframe_days * 86400,\n from_h5=from_h5,\n block_names=block_names,\n )\n else:\n train_data = EventLevelDataset(\n blocks_dir=block_dir,\n from_h5=from_h5,\n block_names=block_names,\n )\n\n # We split with a fixed seed to work on the proper subsets even in other containers\n train_data, test_data = train_data.split([0.8, 0.2])\n valid_data = []\n # test_data, valid_data = test_data.split([0.75, 0.25])\n # logging.info(f\"Test size: {len(test_data)}\\n Valid size: {len(valid_data)}\")\n\n # Values from the preprocessing\n # (max text len doesn't matter here)\n text_field = torchtext.data.Field(\n batch_first=True,\n use_vocab=True,\n init_token=\"<bos>\",\n eos_token=\"<eos>\",\n pad_token=\"<pad>\",\n unk_token=\"<unk>\",\n include_lengths=True,\n )\n build_public_vocab(\n text_field,\n max_size=FLAGS.vocab_size - 4,\n vectors=f\"glove.6B.{FLAGS.embedding_dim}d\",\n unk_init=torch.Tensor.normal_,\n vectors_cache=FLAGS.emb_path,\n )\n\n return train_data, test_data, valid_data, text_field\n\n\ndef compute_optimal_batch_size(real_batch_size, dataset_len):\n logging.info(\n f\"Computing the optimal batch size. Dataset {dataset_len}, real batch {real_batch_size}\"\n )\n # Under approximate\n optimal_batch_size = int(np.sqrt(dataset_len))\n if optimal_batch_size <= real_batch_size:\n return optimal_batch_size, 0\n else:\n return (real_batch_size, optimal_batch_size // real_batch_size)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef build_model(text_field):\n INPUT_DIM = len(text_field.vocab)\n word_embeddings = text_field.vocab.vectors\n PAD_IDX = text_field.vocab.stoi[text_field.pad_token]\n UNK_IDX = text_field.vocab.stoi[text_field.unk_token]\n\n if FLAGS.task == \"sentiment\":\n output_dim = 1\n elif FLAGS.task == \"product\":\n output_dim = 11\n\n if FLAGS.model == \"lstm\":\n model = models.LSTMClassifier(\n batch_size=FLAGS.batch_size,\n output_size=output_dim,\n hidden_size=FLAGS.hidden_dim,\n vocab_size=INPUT_DIM,\n embedding_length=FLAGS.embedding_dim,\n weights=word_embeddings,\n dropout=FLAGS.dropout,\n dp=FLAGS.dp,\n )\n elif FLAGS.model == \"bow\":\n model = models.NBOW(\n input_dim=word_embeddings.shape[0],\n emb_dim=FLAGS.embedding_dim,\n output_dim=output_dim,\n pad_idx=PAD_IDX,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"feedforward\":\n model = models.FeedforwardModel(\n vocab_size=INPUT_DIM,\n embedding_dim=FLAGS.embedding_dim,\n pad_idx=PAD_IDX,\n H_1=FLAGS.hidden_dim_1,\n H_2=FLAGS.hidden_dim_2,\n D_out=output_dim,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"bert\":\n # The dataset has been preprocessed with the bert tokenizer, so the indices should be correct\n logging.info(f\"Pad and unk index {PAD_IDX, UNK_IDX}\")\n model = models.FineTunedBert.build_new(output_dim=output_dim)\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n # Bert has its own pretrained embeddings\n return model\n\n pretrained_embeddings = text_field.vocab.vectors\n\n model.embedding.weight.data.copy_(pretrained_embeddings)\n model.embedding.weight.data[UNK_IDX] = torch.zeros(FLAGS.embedding_dim)\n model.embedding.weight.data[PAD_IDX] = torch.zeros(FLAGS.embedding_dim)\n\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n\n return model\n\n\ndef train(model, iterator, optimizer, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n optimizer.zero_grad()\n\n for i, batch in enumerate(tqdm(iterator)):\n\n # batch = batch.to(FLAGS.device)\n\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n # logging.info(f\"Inputs {inputs}\")\n # The model outputs loss, logits\n outputs = model(**inputs)[1]\n # logging.info(f\"Outputs {outputs}\")\n else:\n outputs = model(text)\n # logging.info(f\"Outputs {outputs}\")\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs.detach(), label)\n\n loss.backward()\n\n if FLAGS.dp and FLAGS.virtual_batch_multiplier > 1:\n # NOTE: step is not called at every minibatch, so the RDP accountant need to know this\n\n if (i + 1) % FLAGS.virtual_batch_multiplier == 0 or (i + 1) == len(\n iterator\n ):\n # For the (virtual_batch_multiplier)th batch, call a clip-noise-step\n optimizer.step()\n optimizer.zero_grad()\n else:\n # For the first (virtual_batch_multiplier - 1) batches, just accumulate the gradients\n optimizer.virtual_step()\n else:\n # Regular optimizer step (either non-DP or DP with no virtual step)\n optimizer.step()\n optimizer.zero_grad()\n\n epoch_loss += loss.item()\n # epoch_loss += loss.detach().item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef evaluate(model, iterator, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n with torch.no_grad():\n\n for batch in iterator:\n\n # batch = batch.to(FLAGS.device)\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n outputs = model(**inputs)[1]\n else:\n outputs = model(text)\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n # print(f\"Training. Outputs: {outputs}, labels: {batch.label}\")\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs, label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef train_validate(\n train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler\n):\n validation_accuracy_epochs = []\n validation_loss_epochs = []\n training_loss_epochs = []\n training_accuracy_epochs = []\n\n logging.info(f\"n workers: {FLAGS.n_workers}\")\n train_iterator = torch.utils.data.DataLoader(\n train_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=True,\n )\n\n valid_iterator = torch.utils.data.DataLoader(\n valid_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n\n criterion = criterion.to(FLAGS.device)\n\n best_valid_loss = float(\"inf\")\n\n for epoch in range(FLAGS.n_epochs):\n\n start_time = time.time()\n logging.info(f\"Starting epoch {epoch + 1}.\")\n train_loss, train_acc = train(\n model, train_iterator, optimizer, criterion, accuracy_fn\n )\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, accuracy_fn)\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), \"tut2-model.pt\")\n\n logging.info(f\"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\")\n logging.info(\n f\"\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%\"\n )\n scheduler.step(train_loss)\n logging.info(\n f\"\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%\"\n )\n\n validation_accuracy_epochs.append(valid_acc)\n validation_loss_epochs.append(valid_loss)\n training_loss_epochs.append(train_loss)\n training_accuracy_epochs.append(train_acc)\n\n return (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n )\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main(argv):\n start_time = time.time()\n\n # Convert flags for the epsilon = -1 shortcut\n if FLAGS.dp and FLAGS.epsilon < 0 and FLAGS.noise < 0:\n FLAGS.dp = False\n\n # No multiprocessing for large datasets (save RAM)\n if FLAGS.n_blocks > 50_000:\n logging.info(f\"Large dataset, we use a single thread for the loader.\")\n FLAGS.n_workers = 0\n\n # Build the dataset, either event level or user level\n train_data, test_data, valid_data, text_field = build_split_dataset()\n logging.info(\n f\"Number of samples for training: {len(train_data)}, validation: {len(valid_data)} and testing: {len(test_data)}\"\n )\n\n # Adapt the batch size and the virtual step size, unless it has been specified manually\n if FLAGS.dp and FLAGS.adaptive_batch_size and FLAGS.virtual_batch_multiplier <= 0:\n FLAGS.batch_size, FLAGS.virtual_batch_multiplier = compute_optimal_batch_size(\n FLAGS.batch_size, len(train_data)\n )\n logging.info(\n f\"Using real batch {FLAGS.batch_size} with multiplier {FLAGS.virtual_batch_multiplier}\"\n )\n if not FLAGS.dp:\n FLAGS.batch_size = FLAGS.non_dp_batch_size\n\n # Prepare the model and optimizer\n model = build_model(text_field).to(FLAGS.device)\n\n logging.info(f\"Number of trainable parameters: {count_parameters(model)}\")\n\n # optimizer = optim.Adam(model.parameters())\n optimizer = optim.AdamW(model.parameters(), lr=FLAGS.learning_rate, eps=1e-8)\n\n scheduler = ReduceLROnPlateau(optimizer, mode=\"min\", patience=3)\n\n # train_it = torch.utils.data.DataLoader(\n # train_data,\n # batch_size=2048,\n # shuffle=False,\n # num_workers=FLAGS.n_workers,\n # drop_last=False,\n # )\n # counts = {}\n # for i in range(11):\n # counts[i] = 0\n # for b in train_it:\n # for cat in b[:, 3]:\n # counts[int(cat)] += 1\n # s = sum(counts.values())\n # for cat, count in counts.items():\n # counts[cat] = count / s\n # logging.info(counts)\n\n if FLAGS.task == \"sentiment\":\n criterion = nn.BCEWithLogitsLoss().to(FLAGS.device)\n accuracy_fn = binary_accuracy\n\n # automotive: 0.03036145803296712\n # books: 0.41258122723567553\n # cds: 0.012897189083383703\n # clothing: 0.2025265712144095\n # games: 0.031613111956201506\n # groceries: 0.01949595483554337\n # home: 0.119920985593197\n # movies: 0.0484712255807162\n # pets: 0.03665525816121956\n # sports: 0.04961580907019007\n # tools: 0.035861209236496445\n\n elif FLAGS.task == \"product\":\n # criterion = nn.CrossEntropyLoss(\n # weight=torch.Tensor(\n # [0.05, 0.035, 0.03, 0.035, 0.05, 0.02, 0.12, 0.01, 0.03, 0.20, 0.41]\n # )\n # )\n criterion = nn.CrossEntropyLoss()\n accuracy_fn = multiclass_accuracy\n\n # Plug Opacus if DP training is activated\n if FLAGS.dp:\n if FLAGS.noise >= 0:\n logging.info(f\"User-provided noise: {FLAGS.noise}.\")\n else:\n\n logging.info(\"Updated budget with the fraction for eps/delta\")\n FLAGS.epsilon = FLAGS.epsilon_fraction * FLAGS.epsilon\n FLAGS.delta = FLAGS.delta_fraction * FLAGS.delta\n\n logging.info(\"Computing noise for the given parameters.\")\n FLAGS.noise = compute_noise_from_target_epsilon(\n target_epsilon=FLAGS.epsilon,\n target_delta=FLAGS.delta,\n epochs=FLAGS.n_epochs,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n dataset_size=len(train_data),\n alphas=ALPHAS,\n )\n logging.info(f\"Noise computed from RDP budget: {FLAGS.noise}.\")\n\n # NOTE: when user-level DP is activated, the training dataset __len__ method returns\n # the number of users, and the DataLoader calls the batch-of-user method that overrides\n # the regular __getitem__ method\n\n # WARNING: fishy non-DP adaptive clipping\n privacy_engine = opacus.PrivacyEngine(\n module=model,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n sample_size=len(train_data),\n alphas=ALPHAS,\n noise_multiplier=FLAGS.noise,\n max_grad_norm=FLAGS.max_grad_norm,\n experimental=bool(FLAGS.dynamic_clipping),\n clipping_method=FLAGS.dynamic_clipping,\n clip_per_layer=bool(FLAGS.per_layer_clipping),\n )\n privacy_engine.attach(optimizer)\n\n # Do the actual training\n # We validate on the test set for debugging purposes\n # The results stay in the logs and are not used. The real DP evaluation comes later.\n t = time.time()\n (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n ) = train_validate(\n train_data, test_data, model, optimizer, criterion, accuracy_fn, scheduler\n )\n training_time = time.time() - t\n\n if FLAGS.dp:\n epsilon_consumed, best_alpha = optimizer.privacy_engine.get_privacy_spent(\n FLAGS.delta\n )\n epsilon_consumed = float(epsilon_consumed)\n best_alpha = float(best_alpha)\n logging.info(f\"Best alpha: {best_alpha}\")\n rdp_epsilons_consumed = (\n optimizer.privacy_engine.get_renyi_divergence()\n * optimizer.privacy_engine.steps\n ).tolist()\n\n logging.info(f\"RDP budget consumed: {rdp_epsilons_consumed} for orders.\")\n\n # Identical to planned budget when we don't have early stopping\n # rdp_epsilon_planned = compute_rdp_sgm(\n # epochs=FLAGS.n_epochs,\n # batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n # if FLAGS.virtual_batch_multiplier > 0\n # else FLAGS.batch_size,\n # dataset_size=len(train_data),\n # noise=FLAGS.noise,\n # alphas=ALPHAS,\n # )\n # logging.info(f\"Planned RDP budget: {rdp_epsilon_planned}\")\n else:\n epsilon_consumed = None\n rdp_epsilons_consumed = None\n best_alpha = None\n\n # Evaluate the model (non-DP evaluation here)\n testing_size = len(test_data)\n test_iterator = torch.utils.data.DataLoader(\n test_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n final_loss, final_accuracy = evaluate(model, test_iterator, criterion, accuracy_fn)\n\n # Collect the metrics and the logs\n logs = {\n \"training_time\": training_time,\n \"total_time\": time.time() - start_time,\n \"test_size\": testing_size,\n \"n_trainable_parameters\": count_parameters(model),\n }\n\n # Update the logs with the training data\n if isinstance(train_data, UserTimeLevelDataset):\n logs[\"train_size\"] = train_data.get_n_events()\n logs[\"n_train_users\"] = len(train_data)\n else:\n logs[\"train_size\"] = len(train_data)\n\n logs.update(\n flags_to_dict(dataset_args, model_args, training_args)\n ) # Dump the configuration flags\n metrics = {\n \"accuracy\": final_accuracy,\n \"training_loss_epochs\": training_loss_epochs,\n \"training_accuracy_epochs\": training_accuracy_epochs,\n \"validation_loss_epochs\": validation_loss_epochs,\n \"validation_accuracy_epochs\": validation_accuracy_epochs,\n \"loss\": final_loss,\n \"epsilon\": epsilon_consumed,\n \"target_epsilon\": FLAGS.epsilon,\n \"alphas\": ALPHAS,\n \"rdp_epsilons\": rdp_epsilons_consumed,\n \"best_alpha\": best_alpha,\n # \"dataset_files\": os.listdir(FLAGS.dataset_dir),\n }\n\n # Save or logging.info the outputs\n # Useless to separate for our experiments\n if FLAGS.metrics_path != \"\":\n save_yaml(FLAGS.metrics_path, metrics)\n logging.info(f\"Saved metrics: {FLAGS.metrics_path}\")\n else:\n logging.info(\"Metrics not saved but concatenated to the logs.\")\n logs.update(metrics)\n\n if FLAGS.log_path != \"\":\n save_yaml(FLAGS.log_path, logs)\n logging.info(f\"Saved logs: {FLAGS.log_path}\")\n\n if FLAGS.model_path != \"\":\n path = Path(FLAGS.model_path).parent\n path.mkdir(parents=True, exist_ok=True)\n torch.save(model.state_dict(), FLAGS.model_path)\n logging.info(f\"Saved model: {FLAGS.model_path}\")\n\n logging.info(logs)\n logging.info(metrics)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.sqrt", "numpy.random.seed", "torch.zeros", "numpy.random.choice", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.ones_like" ], [ "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.sqrt", "numpy.random.seed", "torch.zeros", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LEGOS-CTOH/xarray
[ "d543d09aaa7fdfc4f5f92edcd4e3c0af1207c95b" ]
[ "xarray/core/nanops.py" ]
[ "import numpy as np\n\nfrom . import dtypes, nputils, utils\nfrom .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method\nfrom .pycompat import dask_array_type\n\ntry:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None\n\n\ndef _replace_nan(a, val):\n \"\"\"\n replace nan in a by val, and returns the replaced array and the nan\n position\n \"\"\"\n mask = isnull(a)\n return where_method(val, mask, a), mask\n\n\ndef _maybe_null_out(result, axis, mask, min_count=1):\n \"\"\"\n xarray version of pandas.core.nanops._maybe_null_out\n \"\"\"\n if hasattr(axis, \"__len__\"): # if tuple or list\n raise ValueError(\n \"min_count is not available for reduction with more than one dimensions.\"\n )\n\n if axis is not None and getattr(result, \"ndim\", False):\n null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0\n if null_mask.any():\n dtype, fill_value = dtypes.maybe_promote(result.dtype)\n result = result.astype(dtype)\n result[null_mask] = fill_value\n\n elif getattr(result, \"dtype\", None) not in dtypes.NAT_TYPES:\n null_mask = mask.size - mask.sum()\n if null_mask < min_count:\n result = np.nan\n\n return result\n\n\ndef _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanargmin, nanargmax for object arrays. Always return integer\n type\n \"\"\"\n valid_count = count(value, axis=axis)\n value = fillna(value, fill_value)\n data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)\n\n # TODO This will evaluate dask arrays and might be costly.\n if (valid_count == 0).any():\n raise ValueError(\"All-NaN slice encountered\")\n\n return data\n\n\ndef _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanmin and nanmax for object array \"\"\"\n valid_count = count(value, axis=axis)\n filled_value = fillna(value, fill_value)\n data = getattr(np, func)(filled_value, axis=axis, **kwargs)\n if not hasattr(data, \"dtype\"): # scalar case\n data = fill_value if valid_count == 0 else data\n # we've computed a single min, max value of type object.\n # don't let np.array turn a tuple back into an array\n return utils.to_0d_object_array(data)\n return where_method(data, valid_count != 0)\n\n\ndef nanmin(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"min\", dtypes.get_pos_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmin(a, axis=axis)\n\n\ndef nanmax(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"max\", dtypes.get_neg_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmax(a, axis=axis)\n\n\ndef nanargmin(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_pos_infinity(a.dtype)\n return _nan_argminmax_object(\"argmin\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmin(a, axis=axis)\n\n\ndef nanargmax(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_neg_infinity(a.dtype)\n return _nan_argminmax_object(\"argmax\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmax(a, axis=axis)\n\n\ndef nansum(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 0)\n result = _dask_or_eager_func(\"sum\")(a, axis=axis, dtype=dtype)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):\n \"\"\" In house nanmean. ddof argument will be used in _nanvar method \"\"\"\n from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method\n\n valid_count = count(value, axis=axis)\n value = fillna(value, 0)\n # As dtype inference is impossible for object dtype, we assume float\n # https://github.com/dask/dask/issues/3162\n if dtype is None and value.dtype.kind == \"O\":\n dtype = value.dtype if value.dtype.kind in [\"cf\"] else float\n\n data = _dask_or_eager_func(\"sum\")(value, axis=axis, dtype=dtype, **kwargs)\n data = data / (valid_count - ddof)\n return where_method(data, valid_count != 0)\n\n\ndef nanmean(a, axis=None, dtype=None, out=None):\n if a.dtype.kind == \"O\":\n return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)\n\n if isinstance(a, dask_array_type):\n return dask_array.nanmean(a, axis=axis, dtype=dtype)\n\n return np.nanmean(a, axis=axis, dtype=dtype)\n\n\ndef nanmedian(a, axis=None, out=None):\n return _dask_or_eager_func(\"nanmedian\", eager_module=nputils)(a, axis=axis)\n\n\ndef _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):\n value_mean = _nanmean_ddof_object(\n ddof=0, value=value, axis=axis, keepdims=True, **kwargs\n )\n squared = (value.astype(value_mean.dtype) - value_mean) ** 2\n return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)\n\n\ndef nanvar(a, axis=None, dtype=None, out=None, ddof=0):\n if a.dtype.kind == \"O\":\n return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)\n\n return _dask_or_eager_func(\"nanvar\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanstd(a, axis=None, dtype=None, out=None, ddof=0):\n return _dask_or_eager_func(\"nanstd\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanprod(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 1)\n result = _dask_or_eager_func(\"nanprod\")(a, axis=axis, dtype=dtype, out=out)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef nancumsum(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumsum\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n\n\ndef nancumprod(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumprod\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n" ]
[ [ "numpy.nanmean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thilinicooray/mac-network-pytorch
[ "0e4bf3f7f301570b652490f697758361c866f3c1", "0e4bf3f7f301570b652490f697758361c866f3c1", "0e4bf3f7f301570b652490f697758361c866f3c1" ]
[ "main_verbq_working.py", "model_roles_working.py", "main_agentplace365_verbq_withnone_diffeval.py" ]
[ "import torch\n#from imsitu_encoder_verbq import imsitu_encoder\nfrom imsitu_encoder_roleqverbq_embdhz import imsitu_encoder\nfrom imsitu_loader import imsitu_loader_roleq_updated\nfrom imsitu_scorer_log import imsitu_scorer\nimport json\nimport model_verbq_working\nimport os\nimport utils\nimport time\nimport random\n#from torchviz import make_dot\n#from graphviz import Digraph\n\n\ndef train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, lr_max, model_name, args,eval_frequency=4):\n model.train()\n train_loss = 0\n total_steps = 0\n print_freq = 400\n dev_score_list = []\n time_all = time.time()\n\n if model.gpu_mode >= 0 :\n ngpus = 2\n device_array = [i for i in range(0,ngpus)]\n\n pmodel = torch.nn.DataParallel(model, device_ids=device_array)\n else:\n pmodel = model\n #pmodel = model\n\n '''if scheduler.get_lr()[0] < lr_max:\n scheduler.step()'''\n\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n '''print('init param data check :')\n for f in model.parameters():\n if f.requires_grad:\n print(f.data.size())'''\n\n\n for epoch in range(max_epoch):\n\n #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())\n #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1\n mx = len(train_loader)\n for i, (id, img, verb, labels) in enumerate(train_loader):\n #print(\"epoch{}-{}/{} batches\\r\".format(epoch,i+1,mx)) ,\n t0 = time.time()\n t1 = time.time()\n total_steps += 1\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n labels = torch.autograd.Variable(labels.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n labels = torch.autograd.Variable(labels)\n\n\n\n '''print('all inputs')\n print(img)\n print('=========================================================================')\n print(verb)\n print('=========================================================================')\n print(roles)\n print('=========================================================================')\n print(labels)'''\n\n verb_predict, loss = pmodel(img, verb, labels)\n #verb_predict, rol1pred, role_predict = pmodel.forward_eval5(img)\n #print (\"forward time = {}\".format(time.time() - t1))\n t1 = time.time()\n\n '''g = make_dot(verb_predict, model.state_dict())\n g.view()'''\n\n #loss = model.calculate_loss(verb_predict, verb)\n #loss = model.calculate_eval_loss_new(verb_predict, verb, rol1pred, labels, args)\n #loss = loss_ * random.random() #try random loss\n #print (\"loss time = {}\".format(time.time() - t1))\n t1 = time.time()\n #print('current loss = ', loss)\n if gpu_mode >= 0 :\n #loss.backward(torch.ones([2,1]).to(torch.device('cuda')))\n loss.mean().backward()\n else:\n loss.backward()\n #loss.backward()\n #print (\"backward time = {}\".format(time.time() - t1))\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)\n\n\n '''for param in filter(lambda p: p.requires_grad,model.parameters()):\n print(param.grad.data.sum())'''\n\n #start debugger\n #import pdb; pdb.set_trace()\n\n\n optimizer.step()\n\n '''print('grad check after:')\n for f in model.conv.parameters():\n print('data is')\n print(f.data [0][0])\n #print('grad is')\n #print(f.grad[0][0].item())\n break'''\n\n optimizer.zero_grad()\n\n\n\n train_loss += float(loss.mean())\n\n #top1.add_point_eval5(verb_predict, verb, role_predict, labels)\n #top5.add_point_eval5(verb_predict, verb, role_predict, labels)\n\n top1.add_point_verb_only_eval(id, verb_predict, verb)\n top5.add_point_verb_only_eval(id, verb_predict, verb)\n\n\n if total_steps % print_freq == 0:\n top1_a = top1.get_average_results()\n top5_a = top5.get_average_results()\n print (\"{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}\"\n .format(total_steps-1,epoch,i, utils.format_dict(top1_a, \"{:.2f}\", \"1-\"),\n utils.format_dict(top5_a,\"{:.2f}\",\"5-\"), loss.mean().item(),\n train_loss / ((total_steps-1)%eval_frequency) ))\n\n\n if total_steps % eval_frequency == 0:\n top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"]\n avg_score /= 8\n\n print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n #print('Dev loss :', val_loss)\n\n dev_score_list.append(avg_score)\n max_score = max(dev_score_list)\n\n if max_score == dev_score_list[-1]:\n torch.save(model.state_dict(), model_dir + \"/{}_verbq_iter0_change.model\".format( model_name))\n print ('New best model saved! {0}'.format(max_score))\n\n #eval on the trainset\n\n '''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))'''\n\n print('current train loss', train_loss)\n train_loss = 0\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n del verb_predict, loss, img, verb, labels\n #break\n print('Epoch ', epoch, ' completed!')\n scheduler.step()\n #break\n\ndef eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):\n model.eval()\n val_loss = 0\n\n print ('evaluating model...')\n top1 = imsitu_scorer(encoder, 1, 3, write_to_file)\n top5 = imsitu_scorer(encoder, 5, 3)\n with torch.no_grad():\n mx = len(dev_loader)\n for i, (img_id, img, verb, labels) in enumerate(dev_loader):\n #print(\"{}/{} batches\\r\".format(i+1,mx)) ,\n '''im_data = torch.squeeze(im_data,0)\n im_info = torch.squeeze(im_info,0)\n gt_boxes = torch.squeeze(gt_boxes,0)\n num_boxes = torch.squeeze(num_boxes,0)\n verb = torch.squeeze(verb,0)\n roles = torch.squeeze(roles,0)\n labels = torch.squeeze(labels,0)'''\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n labels = torch.autograd.Variable(labels.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n labels = torch.autograd.Variable(labels)\n\n verb_predict, _= model(img, verb, labels)\n '''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)\n val_loss += loss.item()'''\n top1.add_point_verb_only_eval(img_id, verb_predict, verb)\n top5.add_point_verb_only_eval(img_id, verb_predict, verb)\n\n del img, verb, labels\n break\n\n #return top1, top5, val_loss/mx\n\n return top1, top5, 0\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser(description=\"imsitu VSRL. Training, evaluation and prediction.\")\n parser.add_argument(\"--gpuid\", default=-1, help=\"put GPU id > -1 in GPU mode\", type=int)\n #parser.add_argument(\"--command\", choices = [\"train\", \"eval\", \"resume\", 'predict'], required = True)\n parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')\n parser.add_argument('--resume_model', type=str, default='', help='The model we resume')\n parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module')\n parser.add_argument('--role_module', type=str, default='', help='pretrained role module')\n parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch')\n parser.add_argument('--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch')\n parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch')\n parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')\n parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')\n parser.add_argument('--test', action='store_true', help='Only use the testing mode')\n parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')\n parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')\n parser.add_argument('--frcnn_feat_dir', type=str, help='Location of output from detectron')\n #todo: train role module separately with gt verbs\n\n args = parser.parse_args()\n\n batch_size = 640\n #lr = 5e-6\n lr = 0.0001\n lr_max = 5e-4\n lr_gamma = 0.1\n lr_step = 15\n clip_norm = 0.5\n weight_decay = 1e-4\n n_epoch = 500\n n_worker = 3\n\n #dataset_folder = 'imSitu'\n #imgset_folder = 'resized_256'\n dataset_folder = args.dataset_folder\n imgset_folder = args.imgset_dir\n\n print('model spec :, top down att with role q ')\n\n train_set = json.load(open(dataset_folder + \"/updated_train_new.json\"))\n imsitu_roleq = json.load(open(\"imsitu_data/imsitu_questions_prev.json\"))\n verb_templates = json.load(open(\"imsitu_data/verb_questions_template_new.json\"))\n encoder = imsitu_encoder(train_set, imsitu_roleq, verb_templates)\n\n model = model_verbq_working.BaseModel(encoder, args.gpuid)\n\n # To group up the features\n #cnn_features, role_features = utils.group_features_noun(model)\n cnn_features, role_features = utils.group_features_noun(model)\n\n train_set = imsitu_loader_roleq_updated(imgset_folder, train_set, encoder, model.train_preprocess())\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=n_worker)\n\n dev_set = json.load(open(dataset_folder +\"/dev.json\"))\n dev_set = imsitu_loader_roleq_updated(imgset_folder, dev_set, encoder, model.dev_preprocess())\n dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=4, shuffle=True, num_workers=n_worker)\n\n test_set = json.load(open(dataset_folder +\"/test.json\"))\n test_set = imsitu_loader_roleq_updated(imgset_folder, test_set, encoder, model.dev_preprocess())\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker)\n\n traindev_set = json.load(open(dataset_folder +\"/dev.json\"))\n traindev_set = imsitu_loader_roleq_updated(imgset_folder, traindev_set, encoder, model.dev_preprocess())\n traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker)\n\n\n #utils.load_net(args.verb_module, [model.verb_module])\n #utils.load_net(args.role_module, [model.role_module])\n model_name = 'train_full'\n\n\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n torch.manual_seed(1234)\n if args.gpuid >= 0:\n #print('GPU enabled')\n model.cuda()\n torch.cuda.manual_seed(1234)\n torch.backends.cudnn.deterministic = True\n\n optimizer = torch.optim.Adamax([\n {'params': cnn_features, 'lr': 5e-5},\n {'params': role_features}\n ], lr=1e-3)\n\n #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma)\n #gradient clipping, grad check\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)\n\n if args.evaluate:\n top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Dev average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n #write results to csv file\n role_dict = top1.role_dict\n fail_val_all = top1.value_all_dict\n pass_val_dict = top1.vall_all_correct\n\n with open('role_pred_data.json', 'w') as fp:\n json.dump(role_dict, fp, indent=4)\n\n with open('fail_val_all.json', 'w') as fp:\n json.dump(fail_val_all, fp, indent=4)\n\n with open('pass_val_all.json', 'w') as fp:\n json.dump(pass_val_dict, fp, indent=4)\n\n print('Writing predictions to file completed !')\n\n elif args.test:\n top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Test average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n\n else:\n\n print('Model training started!')\n train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n", "import torch\nimport torch.nn as nn\nfrom attention import Attention, NewAttention\nfrom language_model import WordEmbedding, QuestionEmbedding\nfrom classifier import SimpleClassifier\nfrom fc import FCNet\nimport torch.nn.functional as F\nimport torchvision as tv\nimport utils\nimport numpy as np\n\nclass vgg16_modified(nn.Module):\n def __init__(self):\n super(vgg16_modified, self).__init__()\n vgg = tv.models.vgg16_bn(pretrained=True)\n self.vgg_features = vgg.features\n\n def rep_size(self):\n return 1024\n\n def base_size(self):\n return 512\n\n def forward(self,x):\n #return self.dropout2(self.relu2(self.lin2(self.dropout1(self.relu1(self.lin1(self.vgg_features(x).view(-1, 512*7*7)))))))\n features = self.vgg_features(x)\n\n return features\n\nclass TopDown(nn.Module):\n def __init__(self,\n max_role_count,\n vocab_size,\n gpu_mode,\n embed_hidden=300,\n mlp_hidden=512):\n super(TopDown, self).__init__()\n\n self.vocab_size = vocab_size\n self.max_role_count = max_role_count\n self.gpu_mode = gpu_mode\n\n '''self.q_emb = nn.LSTM(embed_hidden, mlp_hidden,\n batch_first=True, bidirectional=True)\n self.lstm_proj = nn.Linear(mlp_hidden * 2, mlp_hidden)'''\n self.q_proj = nn.Sequential(\n nn.Linear(embed_hidden*2, mlp_hidden),\n nn.ReLU(),\n )\n self.v_att = Attention(mlp_hidden, mlp_hidden, mlp_hidden)\n self.q_net = FCNet([mlp_hidden, mlp_hidden])\n self.v_net = FCNet([mlp_hidden, mlp_hidden])\n self.classifier = SimpleClassifier(\n mlp_hidden, 2 * mlp_hidden, self.vocab_size, 0.5)\n\n self.mlp_hidden= mlp_hidden\n self.dropout = nn.Dropout(0.2)\n\n\n def forward(self, img_org, q):\n batch_size = img_org.size(0) // self.max_role_count\n w_emb = q\n '''self.q_emb.flatten_parameters()\n lstm_out, (h, _) = self.q_emb(w_emb)\n q_emb = h.permute(1, 0, 2).contiguous().view(batch_size, -1)\n q_emb = self.lstm_proj(q_emb)'''\n q_emb = self.q_proj(q)\n joint_repr = torch.zeros(batch_size * self.max_role_count, self.mlp_hidden)\n if self.gpu_mode >= 0:\n joint_repr = joint_repr.to(torch.device('cuda'))\n\n for i in range(3):\n\n labelrep = joint_repr.contiguous().view(batch_size, -1, self.mlp_hidden)\n labelrep_expand = labelrep.expand(self.max_role_count, labelrep.size(0), labelrep.size(1), labelrep.size(2))\n labelrep_expand = labelrep_expand.transpose(0,1)\n labelrep_expand_new = torch.zeros([batch_size, self.max_role_count, self.max_role_count-1, self.mlp_hidden])\n for i in range(self.max_role_count):\n if i == 0:\n labelrep_expand_new[:,i] = labelrep_expand[:,i,1:]\n elif i == self.max_role_count -1:\n labelrep_expand_new[:,i] = labelrep_expand[:,i,:i]\n else:\n labelrep_expand_new[:,i] = torch.cat([labelrep_expand[:,i,:i], labelrep_expand[:,i,i+1:]], 1)\n\n if self.gpu_mode >= 0:\n labelrep_expand_new = labelrep_expand_new.to(torch.device('cuda'))\n\n labelrep_expand = labelrep_expand_new.contiguous().view(-1, self.max_role_count-1, self.mlp_hidden)\n\n img = torch.cat([img_org,labelrep_expand], 1)\n\n att = self.v_att(img, q_emb)\n v_emb = (att * img).sum(1) # [batch, v_dim]\n\n q_repr = self.q_net(q_emb)\n v_repr = self.v_net(v_emb)\n joint_repr_new = q_repr * v_repr\n\n joint_repr = joint_repr + self.dropout(joint_repr_new)\n\n\n\n logits = self.classifier(joint_repr)\n\n return logits\n\nclass BaseModel(nn.Module):\n def __init__(self, encoder,\n gpu_mode,\n embed_hidden=300,\n mlp_hidden=512):\n super(BaseModel, self).__init__()\n\n self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n self.train_transform = tv.transforms.Compose([\n tv.transforms.RandomRotation(10),\n tv.transforms.RandomResizedCrop(224),\n tv.transforms.RandomHorizontalFlip(),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.dev_transform = tv.transforms.Compose([\n tv.transforms.Resize(224),\n tv.transforms.CenterCrop(224),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.encoder = encoder\n self.gpu_mode = gpu_mode\n self.n_roles = self.encoder.get_num_roles()\n self.n_verbs = self.encoder.get_num_verbs()\n self.vocab_size = self.encoder.get_num_labels()\n self.max_role_count = self.encoder.get_max_role_count()\n self.n_role_q_vocab = len(self.encoder.question_words)\n\n self.conv = vgg16_modified()\n self.role_lookup = nn.Embedding(self.n_roles+1, embed_hidden, padding_idx=self.n_roles)\n self.verb_lookup = nn.Embedding(self.n_verbs, embed_hidden)\n #self.verb_lookup = nn.Embedding(self.n_verbs, embed_hidden)\n #self.w_emb = nn.Embedding(self.n_role_q_vocab + 1, embed_hidden, padding_idx=self.n_role_q_vocab)\n self.roles = TopDown(self.max_role_count, self.vocab_size, self.gpu_mode)\n\n self.conv_hidden = self.conv.base_size()\n self.mlp_hidden = mlp_hidden\n self.embed_hidden = embed_hidden\n\n def train_preprocess(self):\n return self.train_transform\n\n def dev_preprocess(self):\n return self.dev_transform\n\n def forward(self, img, verb):\n\n img_features = self.conv(img)\n batch_size, n_channel, conv_h, conv_w = img_features.size()\n img = img_features.view(batch_size, n_channel, -1)\n img = img.permute(0, 2, 1)\n\n img = img.expand(self.max_role_count,img.size(0), img.size(1), img.size(2))\n img = img.transpose(0,1)\n img = img.contiguous().view(batch_size* self.max_role_count, -1, self.mlp_hidden)\n\n role_ids = self.encoder.get_role_ids_batch(verb)\n if self.gpu_mode >= 0:\n role_ids = role_ids.to(torch.device('cuda'))\n\n role_ids = role_ids.view(batch_size*self.max_role_count, -1)\n verb_embd = self.verb_lookup(verb)\n role_embd = self.role_lookup(role_ids)\n\n verb_embed_expand = verb_embd.expand(self.max_role_count, verb_embd.size(0), verb_embd.size(1))\n verb_embed_expand = verb_embed_expand.transpose(0,1)\n verb_embed_expand = verb_embed_expand.contiguous().view(-1, self.embed_hidden)\n role_verb = torch.cat([role_embd.squeeze(), verb_embed_expand], -1)\n\n logits = self.roles(img, role_verb)\n\n role_label_pred = logits.contiguous().view(batch_size, -1, self.vocab_size)\n return role_label_pred\n\n def calculate_loss(self, gt_verbs, role_label_pred, gt_labels,args):\n\n batch_size = role_label_pred.size()[0]\n if args.train_all:\n loss = 0\n for i in range(batch_size):\n for index in range(gt_labels.size()[1]):\n frame_loss = 0\n #verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n #frame_loss = criterion(role_label_pred[i], gt_labels[i,index])\n for j in range(0, self.max_role_count):\n frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)\n frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])\n #print('frame loss', frame_loss, 'verb loss', verb_loss)\n loss += frame_loss\n else:\n #verb from pre-trained\n loss = 0\n for i in range(batch_size):\n for index in range(gt_labels.size()[1]):\n frame_loss = 0\n #verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n #frame_loss = criterion(role_label_pred[i], gt_labels[i,index])\n for j in range(0, self.max_role_count):\n frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)\n frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])\n #print('frame loss', frame_loss, 'verb loss', verb_loss)\n loss += frame_loss\n\n\n final_loss = loss/batch_size\n #print('loss :', final_loss)\n return final_loss", "import torch\nfrom imsitu_encoder_agentplace365_verbq_withnone_diffeval import imsitu_encoder\nfrom imsitu_loader import imsitu_loader_verbq_mul\nfrom imsitu_scorer_log import imsitu_scorer\nimport json\nimport model_agentplace365_verbq_withnone_diffeval\nimport os\nimport utils\nimport time\nimport random\n#from torchviz import make_dot\n#from graphviz import Digraph\n\n\ndef train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, lr_max, model_name, args,eval_frequency=4000):\n model.train()\n train_loss = 0\n total_steps = 0\n print_freq = 400\n dev_score_list = []\n time_all = time.time()\n\n '''if model.gpu_mode >= 0 :\n ngpus = 2\n device_array = [i for i in range(0,ngpus)]\n\n pmodel = torch.nn.DataParallel(model, device_ids=device_array)\n else:\n pmodel = model'''\n pmodel = model\n\n '''if scheduler.get_lr()[0] < lr_max:\n scheduler.step()'''\n\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n '''print('init param data check :')\n for f in model.parameters():\n if f.requires_grad:\n print(f.data.size())'''\n\n\n for epoch in range(max_epoch):\n\n #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())\n #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1\n mx = len(train_loader)\n for i, (img_id, img, verb) in enumerate(train_loader):\n #print(\"epoch{}-{}/{} batches\\r\".format(epoch,i+1,mx)) ,\n t0 = time.time()\n t1 = time.time()\n total_steps += 1\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n\n\n\n '''print('all inputs')\n print(img)\n print('=========================================================================')\n print(verb)\n print('=========================================================================')\n print(roles)\n print('=========================================================================')\n print(labels)'''\n\n verb_predict = pmodel(img, img_id)\n #verb_predict, rol1pred, role_predict = pmodel.forward_eval5(img)\n #print (\"forward time = {}\".format(time.time() - t1))\n t1 = time.time()\n\n '''g = make_dot(verb_predict, model.state_dict())\n g.view()'''\n\n loss = model.calculate_loss(verb_predict, verb)\n #loss = model.calculate_eval_loss_new(verb_predict, verb, rol1pred, labels, args)\n #loss = loss_ * random.random() #try random loss\n #print (\"loss time = {}\".format(time.time() - t1))\n t1 = time.time()\n #print('current loss = ', loss)\n\n loss.backward()\n #print (\"backward time = {}\".format(time.time() - t1))\n\n #torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)\n\n\n '''for param in filter(lambda p: p.requires_grad,model.parameters()):\n print(param.grad.data.sum())'''\n\n #start debugger\n #import pdb; pdb.set_trace()\n\n\n optimizer.step()\n optimizer.zero_grad()\n\n '''print('grad check :')\n for f in model.parameters():\n print('data is')\n print(f.data)\n print('grad is')\n print(f.grad)'''\n\n train_loss += loss.item()\n\n #top1.add_point_eval5(verb_predict, verb, role_predict, labels)\n #top5.add_point_eval5(verb_predict, verb, role_predict, labels)\n\n top1.add_point_multi_verb_avg(img_id, verb_predict, verb)\n top5.add_point_multi_verb_avg(img_id, verb_predict, verb)\n\n\n if total_steps % print_freq == 0:\n top1_a = top1.get_average_results()\n top5_a = top5.get_average_results()\n print (\"{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}\"\n .format(total_steps-1,epoch,i, utils.format_dict(top1_a, \"{:.2f}\", \"1-\"),\n utils.format_dict(top5_a,\"{:.2f}\",\"5-\"), loss.item(),\n train_loss / ((total_steps-1)%eval_frequency) ))\n\n\n if total_steps % eval_frequency == 0:\n top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"]\n avg_score /= 8\n\n print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n #print('Dev loss :', val_loss)\n\n dev_score_list.append(avg_score)\n max_score = max(dev_score_list)\n\n if max_score == dev_score_list[-1]:\n torch.save(model.state_dict(), model_dir + \"/{}_agentplace365_verbq_td_agpred_withnone_plzpred.model\".format( model_name))\n print ('New best model saved! {0}'.format(max_score))\n\n #eval on the trainset\n\n '''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))'''\n\n print('current train loss', train_loss)\n train_loss = 0\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n del verb_predict, loss, img, verb\n #break\n print('Epoch ', epoch, ' completed!')\n scheduler.step()\n #break\n\ndef eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):\n model.eval()\n val_loss = 0\n\n print ('evaluating model...')\n top1 = imsitu_scorer(encoder, 1, 3, write_to_file)\n top5 = imsitu_scorer(encoder, 5, 3)\n with torch.no_grad():\n mx = len(dev_loader)\n for i, (img_id, img, verb) in enumerate(dev_loader):\n #print(\"{}/{} batches\\r\".format(i+1,mx)) ,\n '''im_data = torch.squeeze(im_data,0)\n im_info = torch.squeeze(im_info,0)\n gt_boxes = torch.squeeze(gt_boxes,0)\n num_boxes = torch.squeeze(num_boxes,0)\n verb = torch.squeeze(verb,0)\n roles = torch.squeeze(roles,0)\n labels = torch.squeeze(labels,0)'''\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n\n verb_predict = model.forward_eval(img, img_id)\n '''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)\n val_loss += loss.item()'''\n top1.add_point_verb_only_eval(img_id, verb_predict, verb)\n top5.add_point_verb_only_eval(img_id, verb_predict, verb)\n\n del verb_predict, img, verb\n #break\n\n #return top1, top5, val_loss/mx\n #pass_list = top1.pass_list\n\n '''with open('passverb_gtagent.txt', 'w') as filehandle:\n for listitem in pass_list:\n filehandle.write('{}\\n'.format(listitem))'''\n\n return top1, top5, 0\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser(description=\"imsitu VSRL. Training, evaluation and prediction.\")\n parser.add_argument(\"--gpuid\", default=-1, help=\"put GPU id > -1 in GPU mode\", type=int)\n #parser.add_argument(\"--command\", choices = [\"train\", \"eval\", \"resume\", 'predict'], required = True)\n parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')\n parser.add_argument('--resume_model', type=str, default='', help='The model we resume')\n parser.add_argument('--agent_module', type=str, default='', help='pretrained agent module')\n parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch')\n parser.add_argument('--train_verb', action='store_true', help='cnn fix, agent fix, verb train from the scratch')\n parser.add_argument('--finetune_agent', action='store_true', help='cnn fix, agent finetune, verb train from the scratch')\n parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch')\n parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')\n parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')\n parser.add_argument('--test', action='store_true', help='Only use the testing mode')\n parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')\n parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')\n parser.add_argument('--frcnn_feat_dir', type=str, help='Location of output from detectron')\n\n args = parser.parse_args()\n\n batch_size = 640\n #lr = 5e-6\n lr = 0.0001\n lr_max = 5e-4\n lr_gamma = 0.1\n lr_step = 25\n clip_norm = 50\n weight_decay = 1e-4\n n_epoch = 500\n n_worker = 3\n\n #dataset_folder = 'imSitu'\n #imgset_folder = 'resized_256'\n dataset_folder = args.dataset_folder\n imgset_folder = args.imgset_dir\n\n print('model spec :, top down att with verb q ')\n\n train_set = json.load(open(dataset_folder + \"/updated_train_new.json\"))\n imsitu_verbq = json.load(open(\"imsitu_data/verb_questions_new_with_unk_agentplaces365_all.json\"))\n places = json.load(open(\"imsitu_data/places_predict_imsitu_updated_unk.json\"))\n space = json.load(open(\"imsitu_data/imsitu_space.json\"))\n encoder = imsitu_encoder(train_set, imsitu_verbq, space['nouns'], places)\n\n model = model_agentplace365_verbq_withnone_diffeval.BaseModel(encoder, args.gpuid)\n\n # To group up the features\n cnn_agent_features, cnn_verb_features, agent_features, verb_features = utils.group_features_agent2verb(model)\n\n train_set = imsitu_loader_verbq_mul(imgset_folder, train_set, encoder, model.train_preprocess())\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True, num_workers=n_worker)\n\n dev_set = json.load(open(dataset_folder +\"/dev.json\"))\n dev_set = imsitu_loader_verbq_mul(imgset_folder, dev_set, encoder, model.dev_preprocess())\n dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=64, shuffle=True, num_workers=n_worker)\n\n test_set = json.load(open(dataset_folder +\"/test.json\"))\n test_set = imsitu_loader_verbq_mul(imgset_folder, test_set, encoder, model.dev_preprocess())\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker)\n\n traindev_set = json.load(open(dataset_folder +\"/dev.json\"))\n traindev_set = imsitu_loader_verbq_mul(imgset_folder, traindev_set, encoder, model.dev_preprocess())\n traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker)\n\n utils.set_trainable(model, False)\n if args.train_verb:\n print('CNN fix, agent fix, train verb from the scratch from: {}'.format(args.agent_module))\n args.train_all = False\n if len(args.agent_module) == 0:\n raise Exception('[pretrained agent module] not specified')\n utils.load_net(args.agent_module, [model.conv_agent, model.agent], ['conv', 'agent'])\n optimizer_select = 1\n model_name = 'cfx_afx_vtrain'\n\n elif args.finetune_agent:\n print('CNN fix, agent finetune, train verb from the scratch from: {}'.format(args.agent_module))\n args.train_all = True\n if len(args.agent_module) == 0:\n raise Exception('[pretrained agent module] not specified')\n utils.load_net(args.agent_module, [model.conv, model.agent], ['conv', 'agent'])\n optimizer_select = 2\n model_name = 'cfx_aft_vtrain'\n\n else:\n print('Training from the scratch.')\n optimizer_select = 0\n args.train_all = True\n model_name = 'train_full'\n\n optimizer = utils.get_optimizer_agent2verb(lr,weight_decay,optimizer_select,\n cnn_agent_features, cnn_verb_features, agent_features, verb_features)\n\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n if args.gpuid >= 0:\n #print('GPU enabled')\n model.cuda()\n\n optimizer = torch.optim.Adam([{'params': cnn_verb_features, 'lr': 5e-5},\n {'params': verb_features}],\n lr=1e-3)\n\n #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma)\n #gradient clipping, grad check\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)\n\n if args.evaluate:\n top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results_nouns()\n top5_avg = top5.get_average_results_nouns()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Dev average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n #write results to csv file\n role_dict = top1.role_dict\n fail_val_all = top1.value_all_dict\n pass_val_dict = top1.vall_all_correct\n\n with open('role_pred_data.json', 'w') as fp:\n json.dump(role_dict, fp, indent=4)\n\n with open('fail_val_all.json', 'w') as fp:\n json.dump(fail_val_all, fp, indent=4)\n\n with open('pass_val_all.json', 'w') as fp:\n json.dump(pass_val_dict, fp, indent=4)\n\n print('Writing predictions to file completed !')\n\n elif args.test:\n top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results_nouns()\n top5_avg = top5.get_average_results_nouns()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Test average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n\n else:\n\n print('Model training started!')\n train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.cuda.manual_seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.ExponentialLR", "torch.autograd.Variable", "torch.no_grad", "torch.nn.DataParallel", "torch.optim.Adamax" ], [ "torch.nn.Dropout", "torch.zeros", "torch.cat", "torch.nn.Embedding", "torch.nn.Linear", "torch.device", "torch.nn.ReLU" ], [ "torch.optim.Adam", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.ExponentialLR", "torch.no_grad", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GabrielWen/spartan
[ "ce3bf7f2bb551d7f996a1884acef819b620cc854" ]
[ "spartan/examples/ssvd/qr.py" ]
[ "import spartan\nfrom spartan import expr, core\nimport numpy as np\nfrom sys import stderr\n\ndef qr(Y):\n ''' Compute the thin qr factorization of a matrix.\n Factor the matrix Y as QR, where Q is orthonormal and R is\n upper-triangular.\n\n Parameters\n ----------\n Y: Spartan array of shape (M, K).\n\n Notes\n ----------\n Y'Y must fit in memory. Y is a Spartan array of shape (M, K).\n Since this QR decomposition is mainly used in Stochastic SVD,\n K will be the rank of the matrix of shape (M, N) and the assumption\n is that the rank K should be far less than M or N.\n\n Returns\n -------\n Q : Spartan array of shape (M, K).\n R : Numpy array of shape (K, K).\n '''\n # Since the K should be far less than M. So the matrix multiplication\n # should be the bottleneck instead of local cholesky decomposition and\n # finding inverse of R. So we just parallelize the matrix mulitplication.\n # If K is really large, we may consider using our Spartan cholesky\n # decomposition, but for now, we use numpy version, it works fine.\n\n # YTY = Y'Y. YTY has shape of (K, K).\n YTY = expr.dot(expr.transpose(Y), Y).optimized().glom()\n\n # Do cholesky decomposition and get R.\n R = np.linalg.cholesky(YTY).T\n\n # Find the inverse of R\n inv_R = np.linalg.inv(R)\n\n # Q = Y * inv(R)\n Q = expr.dot(Y, inv_R).optimized().evaluate()\n\n return Q, R\n" ]
[ [ "numpy.linalg.inv", "numpy.linalg.cholesky" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
GrantRoss-Tenki/Malawi-CQC-CSC-OSU-Work
[ "a720e0451579945ba10eafdafe2e0d59a86d5cfb", "a720e0451579945ba10eafdafe2e0d59a86d5cfb" ]
[ "Graphing_Summaries.py", "Sensitivity_Fuel_Threshold.py" ]
[ "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n#from pylab import plot, show, xlim,figure,hold, ylim,legend, boxplot, setup, axes\r\n\r\nimport seaborn as sns\r\n\r\n# Is this a personal or work computer\r\n# Are you graphing for hood or no hood\r\n\r\nComputer = 'personal' #or 'personal' or 'work'\r\nHood_or_no = 'no_hood' # 'no_hood' or 'hood'\r\n#what household do you want to remove make sure it is in ascending order\r\n# if there is nothing, then put a placeholder of 1045 or higher\r\nHousehold_removal = [1045]\r\n#Household_removal = Household_removal.sort(reverse=False)\r\nHousehold_removal_NO_Hood_fuel_day_adult = [1045]\r\nHousehold_removal_Hood_fuel_day_adult = [2020]\r\n\r\nHousehold_removal_NO_Hood_PM = [1045]\r\nHousehold_removal_Hood_PM = [2020]\r\n\r\n\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\n\r\nif Hood_or_no == 'hood':\r\n C_Place_holder = 2001\r\nelse:\r\n C_Place_holder = 1001\r\n \r\nif Computer == 'personal' and Hood_or_no == 'no_hood':\r\n # 1N\r\n datafile_path_day_1N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv\"\r\n Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)\r\n datafile_path_event_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv\"\r\n Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)\r\n # there is no second exact in phase 1N\r\n #1N Survey\r\n datafile_path_survey_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)\r\n #print(Filter_1n_survey.iloc[0:40, :])\r\n Survey_1N = Filter_1n_survey.iloc[0:40,:]\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)\r\n \r\n #2N\r\n datafile_path_day_2N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv\"\r\n Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)\r\n datafile_path_event_2N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv\"\r\n Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)\r\n #2N second Exact\r\n datafile_path_event_2N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv\"\r\n Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)\r\n #2N Survey\r\n datafile_path_survey_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv\"\r\n Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)\r\n \r\n #3N\r\n datafile_path_day_3N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv\"\r\n Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv\"\r\n Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3N second Exact\r\n datafile_path_event_3N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv\"\r\n Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)\r\n #3N Survey \r\n datafile_path_survey_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv\"\r\n Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)\r\n \r\n #4N\r\n datafile_path_day_4N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv\"\r\n Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)\r\n datafile_path_event_4N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv\"\r\n Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)\r\n #4N second Exact\r\n datafile_path_event_4N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv\"\r\n Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)\r\n #4N Survey \r\n datafile_path_survey_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv\"\r\n Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)\r\n \r\nelif Computer == 'personal' and Hood_or_no == 'hood':\r\n #1H\r\n datafile_path_day_1H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv\"\r\n Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)\r\n datafile_path_event_1H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv\"\r\n Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)\r\n #there is no second exact in phase 1H\r\n #1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)\r\n datafile_path_survey_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)\r\n \r\n \r\n #2H\r\n datafile_path_day_2H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv\"\r\n Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)\r\n datafile_path_event_2H_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv\"\r\n Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)\r\n #2H second Exact\r\n datafile_path_event_2H_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv\"\r\n Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)\r\n #2H survey \r\n datafile_path_survey_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv\"\r\n Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)\r\n \r\n #3H\r\n datafile_path_day_3H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv\"\r\n Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv\"\r\n Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3H second Exact\r\n datafile_path_event_3H_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv\"\r\n Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)\r\n #3H survey \r\n datafile_path_survey_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv\"\r\n Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)\r\n \r\n #work uses box information and not local data\r\nelif Computer == 'work' and Hood_or_no == 'no_hood':\r\n # 1N for box file system\r\n datafile_path_day_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv\"\r\n Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)\r\n datafile_path_event_1N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv\"\r\n Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)\r\n # there is no second exact in phase 1N\r\n #1N Survey \r\n datafile_path_survey_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)\r\n #print(Filter_1n_survey.iloc[0:40, :])\r\n Survey_1N = Filter_1n_survey.iloc[0:40,:]\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)\r\n \r\n #2N\r\n datafile_path_day_2N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv\"\r\n Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)\r\n datafile_path_event_2N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv\"\r\n Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)\r\n #2N second Exact\r\n datafile_path_event_2N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv\"\r\n Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)\r\n #2N Survey\r\n datafile_path_survey_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv\"\r\n Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)\r\n \r\n #3N\r\n datafile_path_day_3N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv\"\r\n Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv\"\r\n Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3N second Exact\r\n datafile_path_event_3N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv\"\r\n Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)\r\n #3N survey\r\n datafile_path_survey_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv\"\r\n Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)\r\n \r\n #4N\r\n datafile_path_day_4N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv\"\r\n Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)\r\n datafile_path_event_4N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv\"\r\n Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)\r\n #4N second Exact\r\n datafile_path_event_4N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv\"\r\n Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)\r\n #4N Survey \r\n datafile_path_survey_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv\"\r\n Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)\r\n \r\nelse:\r\n #1H\r\n datafile_path_day_1H =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv\"\r\n Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)\r\n datafile_path_event_1H =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv\"\r\n Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)\r\n #there is no second exact in phase 1H\r\n #1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)\r\n datafile_path_survey_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)\r\n \r\n #2H\r\n datafile_path_day_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv\"\r\n Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)\r\n datafile_path_event_2H_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv\"\r\n Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)\r\n #2H second Exact\r\n datafile_path_event_2H_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv\"\r\n Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)\r\n #2H survey \r\n datafile_path_survey_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv\"\r\n Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)\r\n \r\n #3H\r\n datafile_path_day_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv\"\r\n Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv\"\r\n Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3H second Exact\r\n datafile_path_event_3H_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv\"\r\n Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)\r\n #3H survey \r\n datafile_path_survey_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv\"\r\n Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)\r\n \r\n#time to start ploting fun things \r\n#1st starting with the fuel per day per adult histogram and box plot\r\nNO_hood_counter = np.arange(0,39)\r\nhood_counter = np.arange(0,14)\r\n#what household do you want to remove from the graphs (1046 is a dummy spacer)\r\n\r\n\r\nprint('---------------Fuel per Day per Adult No-Hood Phase---------------------')\r\nif Hood_or_no == 'no_hood':\r\n Fuel_per_day_per_adult_1N = []\r\n f_d_a_1N = []\r\n Fuel_per_day_per_adult_2N = []\r\n f_d_a_2N = []\r\n Fuel_per_day_per_adult_3N = []\r\n f_d_a_3N = []\r\n Fuel_per_day_per_adult_4N = []\r\n f_d_a_4N =[]\r\n count_t = 0\r\n count_f = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_NO_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if Fuel_remove_1N_24hr.iloc[c,6]!= -1.00:\r\n Fuel_per_day_per_adult_1N.append(Fuel_remove_1N_24hr.iloc[c,6]/Survey_1N.iloc[c,7])\r\n f_d_a_1N.append(Day_1N.iloc[c,0])\r\n if Fuel_remove_2N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_2N.append(Fuel_remove_2N_24hr.iloc[c,6] / Survey_2N.iloc[c, 7])\r\n f_d_a_2N.append(Day_2N.iloc[c,0])\r\n if Fuel_remove_3N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_3N.append(Fuel_remove_3N_24hr.iloc[c,6]/ Survey_3N.iloc[c, 7])\r\n f_d_a_3N.append(Day_3N.iloc[c, 0])\r\n if Fuel_remove_4N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_4N.append(Fuel_remove_4N_24hr.iloc[c,6] / Survey_4N.iloc[c, 7])\r\n f_d_a_4N.append(Day_3N.iloc[c, 0])\r\n # percentage Change of Fuel per day between the phases\r\n Fuel_per_day_per_adult_2N_1N = []\r\n f_d_a_2N_1N = []\r\n Fuel_per_day_per_adult_3N_1N = []\r\n f_d_a_3N_1N = []\r\n Fuel_per_day_per_adult_4N_1N = []\r\n f_d_a_4N_1N = []\r\n \r\n Fuel_per_day_per_adult_3N_2N = []\r\n f_d_a_3N_2N = []\r\n Fuel_per_day_per_adult_4N_3N = []\r\n f_d_a_4N_3N = []\r\n Fuel_per_day_per_adult_4N_2N = []\r\n f_d_a_4N_2N = []\r\n\r\n count_t = 0\r\n count_f = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_NO_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if (len(Fuel_per_day_per_adult_2N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_1N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_2N_1N.append(Fuel_per_day_per_adult_2N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_2N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_3N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n Fuel_per_day_per_adult_3N_1N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_3N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_1N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_4N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:\r\n if Day_3N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_3N_2N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_2N[c])\r\n f_d_a_3N_2N.append(Day_2N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_3N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_3N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_3N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_3N[c])\r\n f_d_a_4N_3N.append(Day_3N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_2N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_2N[c])\r\n f_d_a_4N_2N.append(Day_4N.iloc[c,0])\r\n \r\n \r\n \r\n # now for box plotting for Fuel per day beteen Phases\r\n #1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_1N, ax=ax_box, color='b')\r\n sns.distplot(Fuel_per_day_per_adult_1N, ax=ax_hist, color='b')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('1N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #2N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_2N, ax=ax_box, color='g')\r\n sns.distplot(Fuel_per_day_per_adult_2N, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('2N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #3N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_3N, ax=ax_box, color='r')\r\n sns.distplot(Fuel_per_day_per_adult_3N, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('3N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_4N, ax=ax_box, color='y')\r\n sns.distplot(Fuel_per_day_per_adult_4N, ax=ax_hist, color='y')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('4N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('No-Hood Fuel per Day per Adult')\r\n #plt.hold(True)\r\n #1N\r\n quant_1_1N = np.percentile(Fuel_per_day_per_adult_1N, [25,50,75])\r\n Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_1N, positions = [1], widths = 0.6)\r\n Fuel_D_A_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_1N):\r\n if a > Top_lim_1_1N or a < Low_lim_1_1N:\r\n Fuel_D_A_1N_outlier.append(f_d_a_1N[v])\r\n plt.text(1,a,f_d_a_1N[v])\r\n plt.text(1,0.1,'1N',color='b')\r\n\r\n #2N \r\n quant_1_2N = np.percentile(Fuel_per_day_per_adult_2N, [25,50,75])\r\n Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_2N,positions = [2], widths = 0.6)\r\n Fuel_D_A_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2N):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n Fuel_D_A_2N_outlier.append(f_d_a_2N[v])\r\n plt.text(2,a,f_d_a_2N[v])\r\n plt.text(2,0.1,'2N', color= 'g')\r\n #3N\r\n quant_1_3N = np.percentile(Fuel_per_day_per_adult_3N, [25,50,75])\r\n Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_3N,positions = [3], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_3N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n Fuel_D_A_3N_outlier.append(f_d_a_3N[v])\r\n count = count + 1\r\n if count == 2:\r\n plt.text(3,a,f_d_a_3N[v],ha='left',va='bottom')\r\n elif count != 2:\r\n plt.text(3,a,f_d_a_3N[v],ha='right',va='bottom')\r\n plt.text(3,0.1,'3N', color='r') \r\n \r\n #4N\r\n quant_1_4N = np.percentile(Fuel_per_day_per_adult_4N, [25,50,75])\r\n Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_4N,positions = [4], widths = 0.6)\r\n Fuel_D_A_4N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N):\r\n if a > Top_lim_1_4N or a < Low_lim_1_4N:\r\n Fuel_D_A_4N_outlier.append(f_d_a_4N[v])\r\n plt.text(4,a,f_d_a_4N[v])\r\n plt.text(4,0.1,'4N', color='y') \r\n \r\n plt.xlim(0,5)\r\n plt.ylim(0,2.3)\r\n print('Fuel/Day/Adult 1N had these values as outliers ', Fuel_D_A_1N_outlier)\r\n print('Fuel/Day/Adult 2N had these values as outliers ', Fuel_D_A_2N_outlier)\r\n print('Fuel/Day/Adult 3N had these values as outliers ', Fuel_D_A_3N_outlier)\r\n print('Fuel/Day/Adult 4N had these values as outliers ', Fuel_D_A_4N_outlier)\r\n plt.show()\r\n\r\n\r\n\r\n # % change of fuel per day per adult between each phase\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood Change from Fuel per Day per Adult' )\r\n #plt.hold(True)\r\n #2N to 1N\r\n quant_1_2N_1N = np.percentile(Fuel_per_day_per_adult_2N_1N, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2N_1N, positions=[1], widths= 0.6)\r\n Fuel_D_A_2N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2N_1N):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n Fuel_D_A_2N_1N_outlier.append(f_d_a_2N_1N[v])\r\n plt.text(1, a, f_d_a_2N_1N[v])\r\n plt.text(0.5, 0, '2N / 1N', color= 'g')\r\n \r\n #3N to 1N\r\n quant_1_3N_1N = np.percentile(Fuel_per_day_per_adult_3N_1N, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_1N, positions=[2], widths= 0.6)\r\n Fuel_D_A_3N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N_1N):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n Fuel_D_A_3N_1N_outlier.append(f_d_a_3N_1N[v])\r\n plt.text(2, a, f_d_a_3N_1N[v])\r\n plt.text(1.5, 0, '3N / 1N', color= 'r')\r\n \r\n #4N to 1N\r\n quant_1_4N_1N = np.percentile(Fuel_per_day_per_adult_4N_1N, [25,50,75])\r\n Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_1N, positions=[3], widths= 0.6)\r\n Fuel_D_A_4N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_1N):\r\n if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:\r\n Fuel_D_A_4N_1N_outlier.append(f_d_a_4N_1N[v])\r\n plt.text(3, a, f_d_a_4N_1N[v])\r\n plt.text(2.5, 0, '4N / 1N', color= 'y')\r\n \r\n #3N to 2N\r\n quant_1_3N_2N = np.percentile(Fuel_per_day_per_adult_3N_2N, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_2N, positions=[4], widths= 0.6)\r\n Fuel_D_A_3N_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N_2N):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n Fuel_D_A_3N_2N_outlier.append(f_d_a_3N_2N[v])\r\n plt.text(4, a, f_d_a_3N_2N[v])\r\n plt.text(3.5, 0, '3N / 2N', color= 'm')\r\n \r\n #4N to 3N\r\n quant_1_4N_3N = np.percentile(Fuel_per_day_per_adult_4N_3N, [25,50,75])\r\n Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_3N, positions=[5], widths= 0.6)\r\n Fuel_D_A_4N_3N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_3N):\r\n if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:\r\n Fuel_D_A_4N_3N_outlier.append(f_d_a_4N_3N[v])\r\n plt.text(5, a, f_d_a_4N_3N[v])\r\n plt.text(4.5, 0, '4N / 3N', color= 'k')\r\n \r\n #4N to 2N\r\n quant_1_4N_2N = np.percentile(Fuel_per_day_per_adult_4N_2N, [25,50,75])\r\n Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_2N, positions=[6], widths= 0.6)\r\n Fuel_D_A_4N_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_2N):\r\n if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:\r\n Fuel_D_A_4N_2N_outlier.append(f_d_a_4N_2N[v])\r\n plt.text(6, a, f_d_a_4N_2N[v])\r\n plt.text(5.5, 0, '4N / 2N', color= 'tab:orange')\r\n \r\n \r\n plt.xlim(0,7)\r\n plt.ylim(-0.5,4)\r\n print('Fuel/Day/Adult 2N/1N had these values as outliers ', Fuel_D_A_2N_1N_outlier)\r\n print('Fuel/Day/Adult 3N/1N had these values as outliers ', Fuel_D_A_3N_1N_outlier)\r\n print('Fuel/Day/Adult 4N/1N had these values as outliers ', Fuel_D_A_4N_1N_outlier)\r\n print('Fuel/Day/Adult 3N/2N had these values as outliers ', Fuel_D_A_3N_2N_outlier)\r\n print('Fuel/Day/Adult 4N/3N had these values as outliers ', Fuel_D_A_4N_3N_outlier)\r\n print('Fuel/Day/Adult 4N/2N had these values as outliers ', Fuel_D_A_4N_2N_outlier)\r\n plt.show()\r\n #adding averages to the tables\r\n quant_1_1N = np.append(quant_1_1N, np.average(Fuel_per_day_per_adult_1N))\r\n quant_1_2N = np.append(quant_1_2N, np.average(Fuel_per_day_per_adult_2N))\r\n quant_1_3N = np.append(quant_1_3N, np.average(Fuel_per_day_per_adult_3N))\r\n quant_1_4N = np.append(quant_1_4N, np.average(Fuel_per_day_per_adult_4N))\r\n \r\n D_50_quant_phase_f_d_a = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}\r\n F_D_A_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a, columns=['Percentile %','1N', '2N', '3N','4N'])\r\n \r\n quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Fuel_per_day_per_adult_2N_1N))\r\n quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Fuel_per_day_per_adult_3N_1N))\r\n quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Fuel_per_day_per_adult_4N_1N))\r\n quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Fuel_per_day_per_adult_3N_2N))\r\n quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Fuel_per_day_per_adult_4N_3N))\r\n quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Fuel_per_day_per_adult_4N_2N))\r\n \r\n D_50_quant_percent_f_d_a ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,\r\n '3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}\r\n F_D_A_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'\r\n ,'3N / 2N','4N / 3N','4N / 2N'])\r\n print(F_D_A_50_phase_no_hood)\r\n print(F_D_A_50_percent_change_no_hood)\r\n# add more\r\nprint ('-------------------Fuel per Day per Adult Hood Phase -------------------')\r\n\r\nif Hood_or_no == 'hood':\r\n Fuel_per_day_per_adult_1H = []\r\n f_d_a_1H = []\r\n Fuel_per_day_per_adult_2H = []\r\n f_d_a_2H = []\r\n Fuel_per_day_per_adult_3H = []\r\n f_d_a_3H = []\r\n \r\n count_t = 0\r\n count_f = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if Fuel_remove_1H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_1H.append(Fuel_remove_1H_24hr.iloc[c,6]/Survey_1H.iloc[c,7])\r\n f_d_a_1H.append(Day_1H.iloc[c,0])\r\n \r\n if Fuel_remove_2H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_2H.append(Fuel_remove_2H_24hr.iloc[c,6] / Survey_2H.iloc[c, 7])\r\n f_d_a_2H.append(Day_2H.iloc[c,0])\r\n \r\n if Fuel_remove_3H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_3H.append(Fuel_remove_3H_24hr.iloc[c,6]/ Survey_3H.iloc[c, 7])\r\n f_d_a_3H.append(Day_3H.iloc[c, 0])\r\n \r\n # percentage Change of Fuel per day between the phases\r\n Fuel_per_day_per_adult_2H_1H = []\r\n f_d_a_2H_1H = []\r\n Fuel_per_day_per_adult_3H_1H = []\r\n f_d_a_3H_1H = []\r\n Fuel_per_day_per_adult_3H_2H = []\r\n f_d_a_3H_2H = []\r\n \r\n count_t = 0\r\n count_f = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if (len(Fuel_per_day_per_adult_2H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:\r\n if Day_1H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n Fuel_per_day_per_adult_2H_1H.append(Fuel_per_day_per_adult_2H[c]/Fuel_per_day_per_adult_1H[c])\r\n f_d_a_2H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:\r\n if Day_3H.iloc[c,13] > 0 and Day_1H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:\r\n Fuel_per_day_per_adult_3H_1H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_1H[c])\r\n f_d_a_3H_1H.append(Day_1H.iloc[c,0]) \r\n if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_2H)-1) >= c:\r\n if Day_3H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n Fuel_per_day_per_adult_3H_2H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_2H[c])\r\n f_d_a_3H_2H.append(Day_1H.iloc[c,0])\r\n \r\n # now for plotting\r\n #1H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_1H, ax=ax_box, color='b')\r\n sns.distplot(Fuel_per_day_per_adult_1H, ax=ax_hist, color='b')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('1H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #2H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_2H, ax=ax_box, color='g')\r\n sns.distplot(Fuel_per_day_per_adult_2H, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('2H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_3H, ax=ax_box, color='r')\r\n sns.distplot(Fuel_per_day_per_adult_3H, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('3H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n fig_2, ax_2 = plt.subplots()\r\n plt.title('Hood Fuel per Day per Adult')\r\n #plt.hold(True)\r\n \r\n quant_1_1H = np.percentile(Fuel_per_day_per_adult_1H, [25,50,75])\r\n Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_1H, positions = [1], widths = 0.6)\r\n Fuel_D_A_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_1H):\r\n if a > Top_lim_1_1H or a < Low_lim_1_1H:\r\n Fuel_D_A_1H_outlier.append(f_d_a_1H[v])\r\n plt.text(1,a,f_d_a_1H[v])\r\n plt.text(1,0,'1H',color='b')\r\n \r\n \r\n quant_1_2H = np.percentile(Fuel_per_day_per_adult_2H, [25,50,75])\r\n Top_lim_1_2H = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n Low_lim_1_2H = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_2H,positions = [2], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_2H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2H):\r\n if a > Top_lim_1_2H or a < Low_lim_1_2H:\r\n Fuel_D_A_2H_outlier.append(f_d_a_2H[v])\r\n count = count + 1\r\n if count == 1:\r\n plt.text(2,a,f_d_a_2H[v],ha='left',va='bottom')\r\n elif count !=1:\r\n plt.text(2,a,f_d_a_2H[v],ha='right',va='bottom')\r\n plt.text(2,0,'2H', color= 'g')\r\n \r\n quant_1_3H = np.percentile(Fuel_per_day_per_adult_3H, [25,50,75])\r\n Top_lim_1_3H = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n Low_lim_1_3H = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_3H,positions = [3], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_3H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H):\r\n if a > Top_lim_1_3H or a < Low_lim_1_3H:\r\n Fuel_D_A_3H_outlier.append(f_d_a_3H[v])\r\n count = count + 1\r\n if count == 3:\r\n plt.text(3,a,f_d_a_3H[v],ha='left',va='bottom')\r\n elif count != 1:\r\n plt.text(3,a,f_d_a_3H[v],ha='right',va='bottom')\r\n plt.text(3,0,'3H', color='r') \r\n \r\n \r\n plt.xlim(-0,4)\r\n plt.ylim(-0.25,2.5)\r\n print('Fuel/Day/Adult 1H had these values as outliers ', Fuel_D_A_1H_outlier)\r\n print('Fuel/Day/Adult 2H had these values as outliers ', Fuel_D_A_2H_outlier)\r\n print('Fuel/Day/Adult 3H had these values as outliers ', Fuel_D_A_3H_outlier)\r\n plt.show()\r\n \r\n \r\n #% change of fuel perday per adult between each phase \r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood Change from Fuel per Day per Adult' )\r\n #plt.hold(True)\r\n #2H to 1H\r\n quant_1_2H_1H = np.percentile(Fuel_per_day_per_adult_2H_1H, [25,50,75])\r\n Top_lim_1_2H_1H = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n Low_lim_1_2H_1H = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2H_1H, positions=[1], widths= 0.6)\r\n Fuel_D_A_2H_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2H_1H):\r\n if a > Top_lim_1_2H_1H or a < Low_lim_1_2H_1H:\r\n Fuel_D_A_2H_1H_outlier.append(f_d_a_2H_1H[v])\r\n plt.text(1, a, f_d_a_2H_1H[v])\r\n plt.text(0.75, -0.25, '2H / 1H', color= 'g')\r\n \r\n #3H to 1H\r\n quant_1_3H_1H = np.percentile(Fuel_per_day_per_adult_3H_1H, [25,50,75])\r\n Top_lim_1_3H_1H = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n Low_lim_1_3H_1H = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_1H, positions=[2], widths= 0.6)\r\n Fuel_D_A_3H_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H_1H):\r\n if a > Top_lim_1_3H_1H or a < Low_lim_1_3H_1H:\r\n Fuel_D_A_3H_1H_outlier.append(f_d_a_3H_1H[v])\r\n plt.text(2, a, f_d_a_3H_1H[v])\r\n plt.text(1.75, -0.25, '3H / 1H', color= 'r')\r\n \r\n #3H to 2H\r\n quant_1_3H_2H = np.percentile(Fuel_per_day_per_adult_3H_2H, [25,50,75])\r\n Top_lim_1_3H_2H = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n Low_lim_1_3H_2H = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_2H, positions=[3], widths= 0.6)\r\n Fuel_D_A_3H_2H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H_2H):\r\n if a > Top_lim_1_3H_2H or a < Low_lim_1_3H_2H:\r\n Fuel_D_A_3H_2H_outlier.append(f_d_a_3H_2H[v])\r\n plt.text(3, a, f_d_a_3H_2H[v])\r\n plt.text(2.75, -0.25, '2H / 1H', color= 'm')\r\n \r\n plt.xlim(-0,4)\r\n plt.ylim(-0.25,6)\r\n print('Fuel/Day/Adult 2H/1H had these values as outliers ', Fuel_D_A_2H_1H_outlier)\r\n print('Fuel/Day/Adult 3H/1H had these values as outliers ', Fuel_D_A_3H_1H_outlier)\r\n print('Fuel/Day/Adult 3H/2H had these values as outliers ', Fuel_D_A_3H_2H_outlier)\r\n plt.show()\r\n \r\n quant_1_1H = np.append(quant_1_1H, np.average(Fuel_per_day_per_adult_1H))\r\n quant_1_2H = np.append(quant_1_2H, np.average(Fuel_per_day_per_adult_2H))\r\n quant_1_3H = np.append(quant_1_3H, np.average(Fuel_per_day_per_adult_3H))\r\n\r\n \r\n D_50_quant_phase_f_d_a_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}\r\n F_D_A_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a_hood, columns=['Percentile %','1H', '2H','3H'] )\r\n \r\n quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Fuel_per_day_per_adult_2H_1H))\r\n quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Fuel_per_day_per_adult_3H_1H))\r\n quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Fuel_per_day_per_adult_3H_2H))\r\n \r\n D_50_quant_percent_f_d_a_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}\r\n F_D_A_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])\r\n\r\n print(F_D_A_50_phase_hood)\r\n print(F_D_A_50_percent_change_hood)\r\nprint('----------------------- Kitchen PM per Day -----------------------------')\r\nif Hood_or_no == 'no_hood':\r\n Kit_PM_per_day_1N = []\r\n K_PM_D_1N = []\r\n Kit_PM_per_day_2N = []\r\n K_PM_D_2N = []\r\n Kit_PM_per_day_3N = []\r\n K_PM_D_3N = []\r\n Kit_PM_per_day_4N = []\r\n K_PM_D_4N = []\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_NO_Hood_PM):\r\n count_pm = 0\r\n continue\r\n # if Day_1N.iloc[c,7] != -1.00:\r\n # Kit_PM_per_day_1N.append(Day_1N.iloc[c,7]/Day_1N.iloc[c,1])\r\n # K_PM_D_1N.append(Day_1N.iloc[c,0])\r\n if Kit_PM_1N_24hr.iloc[c,6] != -1.00:\r\n Kit_PM_per_day_1N.append(Kit_PM_1N_24hr.iloc[c,6])\r\n K_PM_D_1N.append(Kit_PM_1N_24hr.iloc[c, 0])\r\n #if Day_2N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_2N.append(Day_2N.iloc[c,7]/Day_2N.iloc[c,1])\r\n # K_PM_D_2N.append(Day_2N.iloc[c,0])\r\n if Kit_PM_2N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_2N.append(Kit_PM_2N_24hr.iloc[c, 6])\r\n K_PM_D_2N.append(Kit_PM_2N_24hr.iloc[c, 0])\r\n # if Day_3N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_3N.append(Day_3N.iloc[c,7]/Day_3N.iloc[c,1])\r\n # K_PM_D_3N.append(Day_3N.iloc[c, 0])\r\n if Kit_PM_3N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_3N.append(Kit_PM_3N_24hr.iloc[c, 6])\r\n K_PM_D_3N.append(Kit_PM_3N_24hr.iloc[c, 0])\r\n # if Day_4N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_4N.append(Day_4N.iloc[c,7]/Day_4N.iloc[c,1])\r\n # K_PM_D_4N.append(Day_4N.iloc[c, 0])\r\n if Kit_PM_4N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_4N.append(Kit_PM_4N_24hr.iloc[c, 6])\r\n K_PM_D_4N.append(Kit_PM_4N_24hr.iloc[c, 0])\r\n \r\n # percentages Between Phases of kitchen PM per day\r\n Kit_per_day_2N_1N = []\r\n K_PM_D_2N_1N = []\r\n Kit_per_day_3N_1N = []\r\n K_PM_D_3N_1N = []\r\n Kit_per_day_4N_1N = []\r\n K_PM_D_4N_1N = []\r\n \r\n Kit_per_day_3N_2N = []\r\n K_PM_D_3N_2N = []\r\n Kit_per_day_4N_3N = []\r\n K_PM_D_4N_3N = []\r\n Kit_per_day_4N_2N = []\r\n K_PM_D_4N_2N = []\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_NO_Hood_PM):\r\n count_pm = 0\r\n continue\r\n if (len(Kit_PM_per_day_2N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_1N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_1N_24hr.iloc[c,6] > 0 and Kit_PM_2N_24hr.iloc[c,6] > 0 and Kit_PM_1N_24hr.iloc[c,0] == Kit_PM_2N_24hr.iloc[c,0]:\r\n Kit_per_day_2N_1N.append(Kit_PM_per_day_2N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_2N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_3N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_1N_24hr.iloc[c, 0]:\r\n Kit_per_day_3N_1N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_3N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \\\r\n Kit_PM_1N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_1N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_4N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:\r\n #if Day_3N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_2N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_2N_24hr.iloc[c, 0]:\r\n Kit_per_day_3N_2N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_2N[c])\r\n K_PM_D_3N_2N.append(Day_2N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_3N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_3N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_4N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_3N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_3N[c])\r\n K_PM_D_4N_3N.append(Day_3N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \\\r\n Kit_PM_2N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_2N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_2N[c])\r\n K_PM_D_4N_2N.append(Day_4N.iloc[c,0])\r\n \r\n # now for box plotting for Kitchen PM per day percent changes\r\n \r\n #2N to 1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_2N_1N, ax=ax_box, color='g')\r\n sns.distplot(Kit_per_day_2N_1N, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 2N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3N to 1N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3N_1N, ax=ax_box, color='r')\r\n sns.distplot(Kit_per_day_3N_1N, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_1N, ax=ax_box, color='y')\r\n sns.distplot(Kit_per_day_4N_1N, ax=ax_hist, color='y')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3N to 2N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3N_2N, ax=ax_box, color='m')\r\n sns.distplot(Kit_per_day_3N_2N, ax=ax_hist, color='m')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3N/2N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 3N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_3N, ax=ax_box, color='k')\r\n sns.distplot(Kit_per_day_4N_3N, ax=ax_hist, color='k')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/3N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 2N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_2N, ax=ax_box, color='tab:orange')\r\n sns.distplot(Kit_per_day_4N_2N, ax=ax_hist, color='tab:orange')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/2N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('No-Hood Kitchen PM per day')\r\n #plt.hold()\r\n #1N\r\n quant_1_1N = np.percentile(Kit_PM_per_day_1N, [25,50,75])\r\n Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_1N, positions = [1], widths = 0.6)\r\n kitchen_pm_1N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_1N):\r\n if a > Top_lim_1_1N or a < Low_lim_1_1N:\r\n kitchen_pm_1N_outlier.append(K_PM_D_1N[v])\r\n plt.text(1,a,K_PM_D_1N[v])\r\n plt.text(1,0.1,'1N',color='b')\r\n\r\n #2N \r\n quant_1_2N = np.percentile(Kit_PM_per_day_2N, [25,50,75])\r\n Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_2N,positions = [2], widths = 0.6)\r\n kitchen_pm_2N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_2N):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n kitchen_pm_2N_outlier.append(K_PM_D_2N[v])\r\n plt.text(2,a,K_PM_D_2N[v])\r\n plt.text(2,0.1,'2N', color= 'g')\r\n #3N\r\n quant_1_3N = np.percentile(Kit_PM_per_day_3N, [25,50,75])\r\n Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n kitchen_pm_3N_outlier = []\r\n bp_1 = plt.boxplot(Kit_PM_per_day_3N,positions = [3], widths = 0.6)\r\n count = 0\r\n for v,a in enumerate(Kit_PM_per_day_3N):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n kitchen_pm_3N_outlier.append(K_PM_D_3N[v])\r\n count = count + 1\r\n if count == (3):\r\n plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')\r\n if count == (1):\r\n plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')\r\n else:\r\n plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')\r\n plt.text(3,0.1,'3N', color='r') \r\n \r\n #4N\r\n quant_1_4N = np.percentile(Kit_PM_per_day_4N, [25,50,75])\r\n Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_4N,positions = [4], widths = 0.6)\r\n kitchen_pm_4N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_4N):\r\n if a > Top_lim_1_4N or a < Low_lim_1_4N:\r\n kitchen_pm_4N_outlier.append(K_PM_D_4N[v])\r\n plt.text(4,a,K_PM_D_4N[v])\r\n plt.text(4,0.1,'4N', color='y') \r\n \r\n plt.xlim(0,5)\r\n plt.ylim(0,1200)\r\n print('Kitchen PM 1N had these values as outliers ', kitchen_pm_1N_outlier)\r\n print('Kitchen PM 2N had these values as outliers ', kitchen_pm_2N_outlier)\r\n print('Kitchen PM 3N had these values as outliers ', kitchen_pm_3N_outlier)\r\n print('Kitchen PM 4N had these values as outliers ', kitchen_pm_4N_outlier)\r\n plt.show()\r\n\r\n\r\n # % change of PM per day \r\n\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood PM per Day Change' )\r\n #plt.hold(True)\r\n #2N to 1N\r\n quant_1_2N_1N = np.percentile(Kit_per_day_2N_1N, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_2N_1N, positions=[1], widths= 0.6)\r\n kitchen_pm_2N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_2N_1N):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n kitchen_pm_2N_1N_outlier.append(K_PM_D_2N_1N[v])\r\n plt.text(1, a, K_PM_D_2N_1N[v])\r\n plt.text(0.5, -0.25, '2N / 1N', color= 'g')\r\n \r\n #3N to 1N\r\n quant_1_3N_1N = np.percentile(Kit_per_day_3N_1N, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3N_1N, positions=[2], widths= 0.6)\r\n kitchen_pm_3N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_3N_1N):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n kitchen_pm_3N_1N_outlier.append(K_PM_D_3N_1N[v])\r\n plt.text(2, a, K_PM_D_3N_1N[v])\r\n plt.text(1.5, -0.25, '3N / 1N', color= 'r')\r\n \r\n #4N to 1N\r\n quant_1_4N_1N = np.percentile(Kit_per_day_4N_1N, [25,50,75])\r\n Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_1N, positions=[3], widths= 0.6)\r\n kitchen_pm_4N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_1N):\r\n if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:\r\n kitchen_pm_4N_1N_outlier.append(K_PM_D_4N_1N[v])\r\n plt.text(3, a, K_PM_D_4N_1N[v])\r\n plt.text(2.5, -0.25, '4N / 1N', color= 'y')\r\n \r\n #3N to 2N\r\n quant_1_3N_2N = np.percentile(Kit_per_day_3N_2N, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3N_2N, positions=[4], widths= 0.6)\r\n kitchen_pm_3N_2N_outlier = []\r\n for v,a in enumerate(Kit_per_day_3N_2N):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n kitchen_pm_3N_2N_outlier.append(K_PM_D_3N_2N[v])\r\n plt.text(4, a, K_PM_D_3N_2N[v])\r\n plt.text(3.5, -0.25, '3N / 2N', color= 'm')\r\n \r\n #4N to 3N\r\n quant_1_4N_3N = np.percentile(Kit_per_day_4N_3N, [25,50,75])\r\n Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_3N, positions=[5], widths= 0.6)\r\n kitchen_pm_4N_3N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_3N):\r\n if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:\r\n kitchen_pm_4N_3N_outlier.append(K_PM_D_4N_3N[v])\r\n plt.text(5, a, K_PM_D_4N_3N[v])\r\n plt.text(4.5, -0.25, '4N / 3N', color= 'k')\r\n \r\n #4N to 2N\r\n quant_1_4N_2N = np.percentile(Kit_per_day_4N_2N, [25,50,75])\r\n Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_2N, positions=[6], widths= 0.6)\r\n kitchen_pm_4N_2N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_2N):\r\n if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:\r\n kitchen_pm_4N_2N_outlier.append(K_PM_D_4N_2N[v])\r\n plt.text(6, a, K_PM_D_4N_2N[v])\r\n plt.text(5.5, -0.25, '4N / 2N', color= 'tab:orange')\r\n \r\n\r\n plt.xlim(0,7)\r\n plt.ylim(-0.5,5)\r\n\r\n print('Kitchen PM 2N/1N had these values as outliers ', kitchen_pm_2N_1N_outlier)\r\n print('Kitchen PM 3N/1N had these values as outliers ', kitchen_pm_3N_1N_outlier)\r\n print('Kitchen PM 4N/1N had these values as outliers ', kitchen_pm_4N_1N_outlier)\r\n print('Kitchen PM 3N/2N had these values as outliers ', kitchen_pm_3N_2N_outlier)\r\n print('Kitchen PM 4N/3N had these values as outliers ', kitchen_pm_4N_3N_outlier)\r\n print('Kitchen PM 4N/2N had these values as outliers ', kitchen_pm_4N_2N_outlier)\r\n plt.show()\r\n \r\n #adding averages to the tables\r\n quant_1_1N = np.append(quant_1_1N, np.average(Kit_PM_per_day_1N))\r\n quant_1_2N = np.append(quant_1_2N, np.average(Kit_PM_per_day_2N))\r\n quant_1_3N = np.append(quant_1_3N, np.average(Kit_PM_per_day_3N))\r\n quant_1_4N = np.append(quant_1_4N, np.average(Kit_PM_per_day_4N))\r\n \r\n D_50_quant_phase_PM_d = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}\r\n PM_D_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_PM_d,columns=['Percentile %','1N', '2N', '3N','4N'])\r\n \r\n quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Kit_per_day_2N_1N))\r\n quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Kit_per_day_3N_1N))\r\n quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Kit_per_day_4N_1N))\r\n quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Kit_per_day_3N_2N))\r\n quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Kit_per_day_4N_3N))\r\n quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Kit_per_day_4N_2N))\r\n \r\n D_50_quant_percent_PM_d ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,\r\n '3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}\r\n PM_D_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_PM_d, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'\r\n ,'3N / 2N','4N / 3N','4N / 2N'])\r\n\r\n \r\n print(PM_D_50_phase_no_hood)\r\n print(PM_D_50_percent_change_no_hood)\r\n \r\n# hood Pm per day\r\nif Hood_or_no == 'hood':\r\n Kit_PM_per_day_1H = []\r\n K_PM_D_1H = []\r\n Kit_PM_per_day_2H = []\r\n K_PM_D_2H = []\r\n Kit_PM_per_day_3H = []\r\n K_PM_D_3H = []\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_Hood_PM):\r\n count_pm = 0\r\n continue\r\n # if Day_1H.iloc[c,7] != -1.00:\r\n # Kit_PM_per_day_1H.append(Day_1H.iloc[c,7]/Day_1H.iloc[c,1])\r\n # K_PM_D_1H.append(Day_1H.iloc[c,0])\r\n if Kit_PM_1H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_1H.append(Kit_PM_1H_24hr.iloc[c,6])\r\n K_PM_D_1H.append(Kit_PM_1H_24hr.iloc[c,0])\r\n # if Day_2H.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_2H.append(Day_2H.iloc[c,7]/Day_2H.iloc[c,1])\r\n # K_PM_D_2H.append(Day_2H.iloc[c,0])\r\n if Kit_PM_2H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_2H.append(Kit_PM_2H_24hr.iloc[c,6])\r\n K_PM_D_2H.append(Kit_PM_2H_24hr.iloc[c,0])\r\n # if Day_3H.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_3H.append(Day_3H.iloc[c,7]/Day_3H.iloc[c,1])\r\n # K_PM_D_3H.append(Day_3H.iloc[c, 0])\r\n if Kit_PM_3H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_3H.append(Kit_PM_3H_24hr.iloc[c,6])\r\n K_PM_D_3H.append(Kit_PM_3H_24hr.iloc[c,0])\r\n \r\n \r\n # percentages Between Phases of kitchen PM per day\r\n Kit_per_day_2H_1H = []\r\n K_PM_D_2H_1H = []\r\n Kit_per_day_3H_1H = []\r\n K_PM_D_3H_1H = []\r\n Kit_per_day_3H_2H = []\r\n K_PM_D_3H_2H = []\r\n\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_Hood_PM):\r\n count_pm = 0\r\n continue\r\n if (len(Kit_PM_per_day_2H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:\r\n #if Day_1H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n if Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == Kit_PM_2H_24hr.iloc[c, 0]:\r\n Kit_per_day_2H_1H.append(Kit_PM_per_day_2H[c]/Kit_PM_per_day_1H[c])\r\n K_PM_D_2H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:\r\n #if Day_3H.iloc[c,7] > 0 and Day_1H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:\r\n if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == \\\r\n Kit_PM_3H_24hr.iloc[c, 0]:\r\n Kit_per_day_3H_1H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_1H[c])\r\n K_PM_D_3H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_2H)-1) >= c:\r\n #if Day_3H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_3H_24hr.iloc[c, 0] == \\\r\n Kit_PM_2H_24hr.iloc[c, 0]:\r\n Kit_per_day_3H_2H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_2H[c])\r\n K_PM_D_3H_2H.append(Day_2H.iloc[c,0])\r\n\r\n \r\n # now for box plotting for Kitchen PM per day percent changes\r\n \r\n #2H to 1H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_2H_1H, ax=ax_box, color='g')\r\n sns.distplot(Kit_per_day_2H_1H, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 2H/1H (Kitchen PM per Day)')\r\n plt.ylim(top=1.5)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H to 1H \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3H_1H, ax=ax_box, color='r')\r\n sns.distplot(Kit_per_day_3H_1H, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3H/1H (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H to 2H \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3H_2H, ax=ax_box, color='m')\r\n sns.distplot(Kit_per_day_3H_2H, ax=ax_hist, color='m')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3H/2H (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('Hood Kitchen PM per day')\r\n #1H\r\n quant_1_1H = np.percentile(Kit_PM_per_day_1H, [25,50,75])\r\n Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_1H, positions = [1], widths = 0.6)\r\n kitchen_pm_1H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_1H):\r\n if a > Top_lim_1_1H or a < Low_lim_1_1H:\r\n kitchen_pm_1H_outlier.append(K_PM_D_1H[v])\r\n plt.text(1,a,K_PM_D_1H[v])\r\n plt.text(0.5,0.1,'1H',color='b')\r\n\r\n #2N \r\n quant_1_2H = np.percentile(Kit_PM_per_day_2H, [25,50,75])\r\n Top_lim_1_2N = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n Low_lim_1_2N = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_2H,positions = [2], widths = 0.6)\r\n kitchen_pm_2H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_2H):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n kitchen_pm_2H_outlier.append(K_PM_D_2H[v])\r\n plt.text(2,a,K_PM_D_2H[v])\r\n plt.text(1.5,0.1,'2H', color= 'g')\r\n #3H\r\n quant_1_3H = np.percentile(Kit_PM_per_day_3H, [25,50,75])\r\n Top_lim_1_3N = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n Low_lim_1_3N = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n kitchen_3H_outlier = []\r\n bp_1 = plt.boxplot(Kit_PM_per_day_3H,positions = [3], widths = 0.6)\r\n count = 0\r\n kitchen_pm_3H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_3H):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n kitchen_pm_3H_outlier.append(K_PM_D_3H[v])\r\n plt.text(3,a,K_PM_D_3H[v])\r\n# kitchen_3N_outlier.append(K_PM_D_3N[v])\r\n# count = count + 1\r\n# if count == (3):\r\n# plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')\r\n# if count == (1):\r\n# plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')\r\n# else:\r\n# plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')\r\n plt.text(2.5,0.1,'3H', color='r') \r\n plt.xlim(0,4)\r\n plt.ylim(0,1200)\r\n print('Kitchen PM 1H had these values as outliers ', kitchen_pm_1H_outlier)\r\n print('Kitchen PM 2H had these values as outliers ', kitchen_pm_2H_outlier)\r\n print('Kitchen PM 3H had these values as outliers ', kitchen_pm_3H_outlier)\r\n plt.show()\r\n #print('3N had these values as outliers ' , kitchen_3N_outlier)\r\n\r\n # % change of PM per day \r\n\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% hood PM per Day Change' )\r\n #plt.hold(True)\r\n #2H to 1H\r\n quant_1_2H_1H = np.percentile(Kit_per_day_2H_1H, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n Low_lim_1_2N_1N = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_2H_1H, positions=[1], widths= 0.6)\r\n kitchen_pm_2H_1H_outlier = []\r\n for v,a in enumerate(Kit_per_day_2H_1H):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n kitchen_pm_2H_1H_outlier.append(K_PM_D_2H_1H[v])\r\n plt.text(1, a, K_PM_D_2H_1H[v])\r\n plt.text(0.75, -0.25, '2H / 1H', color= 'g')\r\n \r\n #3H to 1H\r\n quant_1_3H_1H = np.percentile(Kit_per_day_3H_1H, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n Low_lim_1_3N_1N = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3H_1H, positions=[2], widths= 0.6)\r\n kitchen_pm_3H_1H_outlier = []\r\n for v,a in enumerate(Kit_per_day_3H_1H):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n kitchen_pm_3H_1H_outlier.append(K_PM_D_3H_1H[v])\r\n plt.text(2, a, K_PM_D_3H_1H[v])\r\n plt.text(1.75, -0.25, '3H / 1H', color= 'r')\r\n\r\n #3H to 2H\r\n quant_1_3H_2H = np.percentile(Kit_per_day_3H_2H, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n Low_lim_1_3N_2N = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3H_2H, positions=[3], widths= 0.6)\r\n kitchen_pm_3H_2H_outlier = []\r\n for v,a in enumerate(Kit_per_day_3H_2H):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n kitchen_pm_3H_2H_outlier.append(K_PM_D_3H_2H[v])\r\n plt.text(3, a, K_PM_D_3H_2H[v])\r\n plt.text(2.75, -0.25, '3H / 2H', color= 'm')\r\n\r\n plt.xlim(0,4)\r\n plt.ylim(-0.5,5)\r\n print('Kitchen PM 2H/1H had these values as outliers ', kitchen_pm_2H_1H_outlier)\r\n print('Kitchen PM 3H/1H had these values as outliers ', kitchen_pm_3H_1H_outlier)\r\n print('Kitchen PM 3H/2H had these values as outliers ', kitchen_pm_3H_2H_outlier)\r\n plt.show()\r\n \r\n quant_1_1H = np.append(quant_1_1H, np.average(Kit_PM_per_day_1H))\r\n quant_1_2H = np.append(quant_1_2H, np.average(Kit_PM_per_day_2H))\r\n quant_1_3H = np.append(quant_1_3H, np.average(Kit_PM_per_day_3H))\r\n \r\n D_50_quant_phase_PM_D_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}\r\n PM_D_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_PM_D_hood, columns= ['Percentile %','1H','2H','3H' ])\r\n \r\n quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Kit_per_day_2H_1H))\r\n quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Kit_per_day_3H_1H))\r\n quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Kit_per_day_3H_2H))\r\n \r\n \r\n D_50_quant_percent_PM_D_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}\r\n PM_D_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_PM_D_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])\r\n\r\n print(PM_D_50_phase_hood)\r\n print(PM_D_50_percent_change_hood)\r\n# when i am ready to transfer to a data frame and get the differences\r\n\r\n#histograms for the comparison\r\nif Hood_or_no == 'no_hood':\r\n plt.title('Histogram of Fuel per 24 Hours per Person - No Hood' )\r\n plt.hist([Fuel_per_day_per_adult_1N],\r\n color=['b'], alpha=0.5, label='1N')\r\n plt.hist([Fuel_per_day_per_adult_2N],\r\n color=['g'], alpha=0.5, label='2N')\r\n plt.hist([Fuel_per_day_per_adult_3N],\r\n color=['r'], alpha=0.5, label='3N')\r\n plt.hist([Fuel_per_day_per_adult_4N],\r\n color=['y'], alpha=0.5, label='4N')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\n\r\n plt.title('Histogram of Kitchen PM 24 Hours - No Hood' )\r\n plt.hist([Kit_PM_per_day_1N],\r\n color=['b'], alpha=0.5, label='1N')\r\n plt.hist([Kit_PM_per_day_2N],\r\n color=['g'], alpha=0.5, label='2N')\r\n plt.hist([Kit_PM_per_day_3N],\r\n color=['r'], alpha=0.5, label='3N')\r\n plt.hist([Kit_PM_per_day_4N],\r\n color=['y'], alpha=0.5, label='4N')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\nif Hood_or_no == 'hood':\r\n plt.title('Histogram of Fuel per 24 Hours per Person - Hood' )\r\n plt.hist([Fuel_per_day_per_adult_1H],\r\n color=['b'], alpha=0.5, label='1H')\r\n plt.hist([Fuel_per_day_per_adult_2H],\r\n color=['g'], alpha=0.5, label='2H')\r\n plt.hist([Fuel_per_day_per_adult_3H],\r\n color=['r'], alpha=0.5, label='3H')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\n plt.title('Histogram of Kitchen PM 24 Hours - Hood' )\r\n plt.hist([Kit_PM_per_day_1H],\r\n color=['b'], alpha=0.5, label='1H')\r\n plt.hist([Kit_PM_per_day_2H],\r\n color=['g'], alpha=0.5, label='2H')\r\n plt.hist([Kit_PM_per_day_3H],\r\n color=['r'], alpha=0.5, label='3H')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n", "import itertools\nimport os\nfrom types import prepare_class\nimport pandas as pd\nimport numpy as np\nimport csv\nimport glob\nfrom decimal import *\nfrom itertools import chain\n#import statistics as stat\nimport datetime\nfrom datetime import datetime\nfrom itertools import islice, cycle\nfrom io import StringIO\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nPhase = \"1N\"\nComputer = \"Personal\"\n\nif Computer == \"work\":\n USB = \"D\"\n os.chdir(\"D:/Sensitivity Fuel Threshold/\"+Phase)\nelse:\n USB = \"E\"\n os.chdir(\"E:/Sensitivity Fuel Threshold/\"+Phase)\n\nif Phase == (\"2N\") or Phase == \"3N\" or Phase == \"1N\" or Phase == \"4N\":\n HH_1_fuel_sense = []\n HH_2_fuel_sense = []\n HH_3_fuel_sense = []\n HH_4_fuel_sense = []\n HH_5_fuel_sense = []\n HH_6_fuel_sense = []\n HH_7_fuel_sense = []\n\nelse:\n HH_11_fuel_sense = []\n HH_22_fuel_sense = []\n HH_33_fuel_sense = []\n\nDay_met_path = os.getcwd()\ncsv_R_m = glob.glob(os.path.join(Day_met_path, \"*.csv\"))\n#Thresh_step = np.arange(0,0.22, 0.005)\nhoushold_count = 1\nfor file in csv_R_m:\n with open(file, 'r') as f:\n csv_reader = csv.reader(f)\n for idx, row in enumerate(csv_reader):\n if 'Household Number' in row:\n id_number = (row[1])\n print('------- ID Number------', id_number)\n print('houshold_count ------',houshold_count)\n #print('thressh', Thresh_step)\n elif 'Fuel Raw Data':\n Start_data = idx\n #print('row where everything starts', Start_data)\n break\n\n sensor_data = pd.read_csv(file, skiprows=Start_data)\n\n Thresh_step = np.arange(0,0.22, 0.005)\n #Thresh_step = 0,0.02 \n Fuel_KG_nf = sensor_data.iloc[:,0]\n #print('length of Fuel_KG_nf',Fuel_KG_nf)\n Fuel_consumed = []\n for thres_fuel in Thresh_step:\n n = 0\n fuel_kg = []\n insert = []\n remove = []\n for v, kg in enumerate(Fuel_KG_nf):\n \n if v+1 == len(Fuel_KG_nf):\n fuel_kg.append(fuel_kg[-1])\n break\n else:\n change = Fuel_KG_nf.iloc[v+1] - (kg)\n next_weight = Fuel_KG_nf[v+1]\n \n if v == 0:\n previous = Fuel_KG_nf.iloc[0]\n up_thresh = previous + thres_fuel\n low_thresh = previous - thres_fuel\n else:\n previous = fuel_kg[-1]\n up_thresh = previous + thres_fuel\n low_thresh = previous - thres_fuel\n\n if abs(change) < thres_fuel:\n fuel_kg.append(previous)\n \n elif abs(change) > thres_fuel:\n \n if (next_weight <= previous) or (low_thresh <= next_weight <= up_thresh):\n fuel_kg.append(next_weight)\n elif (next_weight<kg) and (previous< next_weight):\n fuel_kg.append(next_weight)\n else:\n fuel_kg.append(next_weight)\n else:\n if (next_weight > previous) or (low_thresh <= next_weight <= up_thresh):\n fuel_kg.append(previous)\n elif (next_weight>kg) and (previous>next_weight):\n fuel_kg.append(next_weight)\n else:\n fuel_kg.append(kg)\n #previous = Fuel_KG_nf.iloc[(0)]\n #for kg in Fuel_KG_nf:\n # n = n + 1\n # if n+2 == (len(Fuel_KG_nf)):\n # fuel_kg.append(Fuel_KG_nf.iloc[(n)])\n # fuel_kg.append(Fuel_KG_nf.iloc[(n+1)])\n # break\n\n # elif (previous < Fuel_KG_nf.iloc[n+2]) and (Fuel_KG_nf.iloc[(n+2)] < Fuel_KG_nf.iloc[(n)]):\n # fuel_kg.append(Fuel_KG_nf.iloc[n+2])\n\n # elif Fuel_KG_nf.iloc[n+2] >= previous:\n # fuel_kg.append(previous)\n\n # elif (previous > Fuel_KG_nf.iloc[n+2]) and (Fuel_KG_nf.iloc[(n)] < Fuel_KG_nf.iloc[(n+2)]):\n # fuel_kg.append(Fuel_KG_nf.iloc[n+2])\n\n # else:\n # fuel_kg.append(Fuel_KG_nf.iloc[(n)])\n\n # previous = fuel_kg[-1]\n fuel_kg.insert(0, Fuel_KG_nf.iloc[0])\n #count = count + 1\n #fuel_kg.insert(0, Fuel_KG_nf.iloc[0])\n\n remove = []\n remove_kg = []\n insert = []\n insert_kg = []\n v = 0\n for tv, weight in enumerate(fuel_kg):\n if tv + 1 == len(fuel_kg):\n break \n elif weight > fuel_kg[tv+1]:\n remove.append(tv+1)\n remove_kg.append((int((abs(fuel_kg[tv +1 ] - weight))*1000)/1000))\n elif fuel_kg[tv +1] > weight:\n insert.append(tv)\n insert_kg.append(fuel_kg[tv+1] - weight)\n v = 0\n\n\n kg = np.arange(0, len(Fuel_KG_nf),1)\n count = 0\n kg_burned = []\n for wei in kg:\n if (wei) == (len(Fuel_KG_nf)-1):\n kg_burned.append(kg_burned[-1])\n break\n elif remove[-1] == len(kg_burned)-2:\n kg_burned.append(kg_burned[-1])\n pass\n elif wei == remove[count]:\n kg_burned.append(remove_kg[count])\n if remove[-1] == wei:\n end_bit = np.arange(wei, len(Fuel_KG_nf),1)\n for a in end_bit:\n kg_burned.append(kg_burned[-1])\n break\n count = count + 1\n elif wei == 0 and remove_kg[wei] != 0:\n kg_burned.append(0)\n else:\n kg_burned.append(kg_burned[-1])\n#seting the kg burned out of array time values to sum and set\n set_kg_burned = []\n for fiber, wood in enumerate(kg_burned):\n if len(kg_burned) == fiber +1:\n if wood != set_kg_burned[-1]:\n set_kg_burned.append(wood)\n break\n elif kg_burned[fiber+1] != wood:\n set_kg_burned.append(wood)\n # count = count + 1\n #print('here is the fuel used', sum(set_kg_burned))\n Fuel_consumed.append((int(sum(set_kg_burned)*100))/100)\n\n if Phase == (\"1N\") or Phase == (\"2N\") or Phase == \"3N\" or Phase == \"4N\":\n if houshold_count == 1:\n HH_1_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_1 = id_number\n min_1 = kg\n HH_1_raw = Fuel_KG_nf\n HH_1_filter = fuel_kg\n elif houshold_count == 2:\n HH_2_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_2 = id_number\n min_2 = kg\n HH_2_raw = Fuel_KG_nf\n HH_2_filter = fuel_kg\n elif houshold_count == 3:\n HH_3_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_3 = id_number\n min_3 = kg\n HH_3_raw = Fuel_KG_nf\n HH_3_filter = fuel_kg\n elif houshold_count == 4:\n HH_4_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_4 = id_number\n min_4 = kg\n HH_4_raw = Fuel_KG_nf\n HH_4_filter = fuel_kg\n elif houshold_count == 5:\n HH_5_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_5 = id_number\n elif houshold_count == 6:\n HH_6_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_6 = id_number\n elif houshold_count == 7:\n HH_7_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_7 = id_number\n else:\n if houshold_count == 1:\n HH_11_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_11 = id_number\n elif houshold_count == 2:\n HH_22_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_22 = id_number\n elif houshold_count == 3:\n HH_33_fuel_sense.append((int(sum(set_kg_burned)*100))/100)\n HH_33 = id_number\n houshold_count = houshold_count +1\n#print('----------------------------------',Thresh_step, HH_1_fuel_sense, type(Thresh_step), type(HH_1_fuel_sense))\n\n#if Phase == (\"2N\") or Phase == \"3N\" or Phase == \"1N\" or Phase == \"4N\":\n #fig = go.Figure()\n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_1_fuel_sense,\n # mode='lines+markers',\n # name=HH_1))\n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_2_fuel_sense,\n # mode='lines+markers',\n # name=HH_2))\n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_3_fuel_sense,\n # mode='lines+markers', name=HH_3))\n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_4_fuel_sense,\n # mode='lines+markers',\n # name=HH_4))\n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_5_fuel_sense,\n # mode='lines+markers',\n # name=HH_5))\n \n #fig.add_trace(go.Scatter(x=Thresh_step, y=HH_6_fuel_sense,\n # mode='lines+markers', name=HH_6))\n #if houshold_count > 6:\n # fig.add_trace(go.Scatter(x=Thresh_step, y=HH_7_fuel_sense,\n # mode='lines+markers', name=HH_7))\n #fig.update_layout(title=Phase+\" Sensitivity for Fuel Threshold\",\n # xaxis_title='Threshold',\n # yaxis_title='Fuel Removed')\n #fig.show()\n\n#else:\n# fig = go.Figure()\n# fig.add_trace(go.Scatter(x=Thresh_step, y=HH_11_fuel_sense,\n# mode='lines+markers',\n# name=HH_11))\n# fig.add_trace(go.Scatter(x=Thresh_step, y=HH_22_fuel_sense,\n# mode='lines+markers',\n# name=HH_22))\n# fig.add_trace(go.Scatter(x=Thresh_step, y=HH_33_fuel_sense,\n# mode='lines+markers', name=HH_33))\n# fig.update_layout(title=Phase+\" Sensitivity for Fuel Threshold\",\n# xaxis_title='Threshold',\n# yaxis_title='Fuel Removed')\n# fig.show()\n\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=min_1, y=HH_1_filter,\n mode='lines+markers',\n name=HH_1))\n#fig.add_trace(go.Scatter(x=min_2, y=HH_2_filter,\n# mode='lines+markers',\n# name=HH_2))\n#fig.add_trace(go.Scatter(x=min_3, y=HH_3_filter,\n# mode='lines+markers', name=HH_3))\nfig.add_trace(go.Scatter(x=min_4, y=HH_4_filter,\n mode='lines+markers',\n name=HH_4))\nfig.add_trace(go.Scatter(x=min_1, y=HH_1_raw,\n mode='lines+markers',\n name=HH_1+'raw'))\n#fig.add_trace(go.Scatter(x=min_2, y=HH_2_raw,\n# mode='lines+markers',\n# name=HH_2+'raw'))\n#fig.add_trace(go.Scatter(x=min_3, y=HH_3_raw,\n# mode='lines+markers', name=HH_3+'raw'))\nfig.add_trace(go.Scatter(x=min_4, y=HH_4_raw,\n mode='lines+markers',\n name=HH_4+'raw'))\nfig.update_layout(title=Phase+\" Sensitivity for Fuel Threshold\",\n xaxis_title='Threshold',\n yaxis_title='Fuel Removed')\nfig.show()\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.boxplot", "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "numpy.percentile", "pandas.DataFrame", "matplotlib.pyplot.xlim", "numpy.average", "pandas.set_option", "matplotlib.pyplot.show", "matplotlib.pyplot.hist" ], [ "numpy.arange", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
icml2020submission6857/metarl
[ "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "ae4825d21478fa1fd0aa6b116941ea40caa152a5", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "9b66cefa2b6bcb6a38096d629ce8853b47c7171d", "ae4825d21478fa1fd0aa6b116941ea40caa152a5" ]
[ "tests/metarl/torch/algos/test_torch_algo_utils.py", "tests/metarl/tf/baselines/test_gaussian_cnn_baseline.py", "src/metarl/tf/policies/categorical_mlp_policy.py", "src/metarl/tf/q_functions/discrete_cnn_q_function.py", "src/metarl/envs/dm_control/dm_control_env.py", "tests/metarl/envs/wrappers/test_noop.py", "src/metarl/torch/modules/tanh_gaussian_mlp_module_2.py", "src/metarl/torch/embeddings/recurrent_encoder.py", "src/metarl/torch/q_functions/continuous_mlp_q_function.py" ]
[ "\"\"\"Test torch algo utility functions.\"\"\"\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nimport torch\nimport torch.nn.functional as F\n\nimport metarl.tf.misc.tensor_utils as tf_utils\nimport metarl.torch.algos._utils as torch_algo_utils\nfrom tests.fixtures import TfGraphTestCase\n\n\ndef stack(d, arr):\n \"\"\"Stack 'arr' 'd' times.\"\"\"\n return np.repeat(np.expand_dims(arr, axis=0), repeats=d, axis=0)\n\n\nONES = np.ones((4, 6))\nZEROS = np.zeros((4, 6))\nARRANGE = stack(4, np.arange(6))\nPI_DIGITS = stack(4, [3, 1, 4, 1, 5, 9])\nE_DIGITS = stack(4, [2, 7, 1, 8, 2, 8])\nFIBS = stack(4, [1, 1, 2, 3, 5, 8])\n\nnums_1d = np.arange(0, 4).astype(float)\nnums_2d = np.arange(0, 4).astype(float).reshape(2, 2)\nnums_3d = np.arange(0, 8).astype(float).reshape(2, 2, 2)\n\n\nclass TestTorchAlgoUtils(TfGraphTestCase):\n \"\"\"Test class for torch algo utility functions.\"\"\"\n # yapf: disable\n @pytest.mark.parametrize('gae_lambda, rewards_val, baselines_val', [\n (0.4, ONES, ZEROS),\n (0.8, PI_DIGITS, ARRANGE),\n (1.2, ONES, FIBS),\n (1.7, E_DIGITS, PI_DIGITS),\n ])\n # yapf: enable\n def testcompute_advantages(self, gae_lambda, rewards_val, baselines_val):\n \"\"\"Test compute_advantage function.\"\"\"\n discount = 0.99\n max_len = rewards_val.shape[-1]\n\n torch_advs = torch_algo_utils.compute_advantages(\n discount, gae_lambda, max_len, torch.Tensor(baselines_val),\n torch.Tensor(rewards_val))\n\n rewards = tf.compat.v1.placeholder(dtype=tf.float32,\n name='reward',\n shape=[None, None])\n baselines = tf.compat.v1.placeholder(dtype=tf.float32,\n name='baseline',\n shape=[None, None])\n adv = tf_utils.compute_advantages(discount, gae_lambda, max_len,\n baselines, rewards)\n tf_advs = self.sess.run(adv,\n feed_dict={\n rewards: rewards_val,\n baselines: baselines_val,\n })\n\n assert np.allclose(torch_advs.numpy(),\n tf_advs.reshape(torch_advs.shape),\n atol=1e-5)\n\n def test_add_padding_last_1d(self):\n \"\"\"Test pad_to_last function for 1d.\"\"\"\n max_length = 10\n\n expected = F.pad(torch.Tensor(nums_1d),\n (0, max_length - nums_1d.shape[-1]))\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_1d,\n total_length=max_length)\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_1d,\n total_length=10,\n axis=0)\n assert expected.eq(tensor_padding).all()\n\n def test_add_padding_last_2d(self):\n \"\"\"Test pad_to_last function for 2d.\"\"\"\n max_length = 10\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d, total_length=10)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, max_length - nums_2d.shape[-1]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d,\n total_length=10,\n axis=0)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, 0, 0, max_length - nums_2d.shape[0]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d,\n total_length=10,\n axis=1)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, max_length - nums_2d.shape[-1], 0, 0))\n assert expected.eq(tensor_padding).all()\n\n def test_add_padding_last_3d(self):\n \"\"\"Test pad_to_last function for 3d.\"\"\"\n max_length = 10\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d, total_length=10)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=0)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, 0, 0, 0, 0, max_length - nums_3d.shape[0]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=1)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, 0, 0, max_length - nums_3d.shape[-1], 0, 0))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=2)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))\n assert expected.eq(tensor_padding).all()\n\n @pytest.mark.parametrize('nums', [nums_1d, nums_2d, nums_3d])\n def test_out_of_index_error(self, nums):\n \"\"\"Test pad_to_last raises IndexError.\"\"\"\n with pytest.raises(IndexError):\n torch_algo_utils.pad_to_last(nums,\n total_length=10,\n axis=len(nums.shape))\n\n def testmake_optimizer_with_type(self):\n \"\"\"Test make_optimizer function with type as first argument.\"\"\"\n optimizer_type = torch.optim.Adam\n module = torch.nn.Linear(2, 1)\n lr = 0.123\n optimizer = torch_algo_utils.make_optimizer(optimizer_type,\n module,\n lr=lr)\n assert isinstance(optimizer, optimizer_type)\n assert optimizer.defaults['lr'] == lr\n\n def testmake_optimizer_with_tuple(self):\n \"\"\"Test make_optimizer function with tuple as first argument.\"\"\"\n optimizer_type = (torch.optim.Adam, {'lr': 0.1})\n module = torch.nn.Linear(2, 1)\n optimizer = torch_algo_utils.make_optimizer(optimizer_type, module)\n assert isinstance(optimizer, optimizer_type)\n assert optimizer.defaults['lr'] == optimizer_type[1]['lr']\n\n def testmake_optimizer_raise_value_error(self):\n \"\"\"Test make_optimizer raises value error.\"\"\"\n optimizer_type = (torch.optim.Adam, {'lr': 0.1})\n module = torch.nn.Linear(2, 1)\n with pytest.raises(ValueError):\n _ = torch_algo_utils.make_optimizer(optimizer_type,\n module,\n lr=0.123)\n", "import pickle\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom metarl.tf.baselines import GaussianCNNBaseline\nfrom metarl.tf.envs import TfEnv\nfrom tests.fixtures import TfGraphTestCase\nfrom tests.fixtures.envs.dummy import DummyBoxEnv\nfrom tests.fixtures.regressors import SimpleGaussianCNNRegressor\n\n\nclass TestGaussianCNNBaseline(TfGraphTestCase):\n\n @pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])\n def test_fit(self, obs_dim):\n box_env = TfEnv(DummyBoxEnv(obs_dim=obs_dim))\n with mock.patch(('metarl.tf.baselines.'\n 'gaussian_cnn_baseline.'\n 'GaussianCNNRegressor'),\n new=SimpleGaussianCNNRegressor):\n gcb = GaussianCNNBaseline(env_spec=box_env.spec)\n paths = [{\n 'observations': [np.full(obs_dim, 1)],\n 'returns': [1]\n }, {\n 'observations': [np.full(obs_dim, 2)],\n 'returns': [2]\n }]\n gcb.fit(paths)\n\n obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}\n prediction = gcb.predict(obs)\n assert np.array_equal(prediction, [1, 2])\n\n @pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])\n def test_param_values(self, obs_dim):\n box_env = TfEnv(DummyBoxEnv(obs_dim=obs_dim))\n with mock.patch(('metarl.tf.baselines.'\n 'gaussian_cnn_baseline.'\n 'GaussianCNNRegressor'),\n new=SimpleGaussianCNNRegressor):\n gcb = GaussianCNNBaseline(env_spec=box_env.spec)\n new_gcb = GaussianCNNBaseline(env_spec=box_env.spec,\n name='GaussianCNNBaseline2')\n\n # Manual change the parameter of GaussianCNNBaseline\n with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):\n return_var = tf.compat.v1.get_variable(\n 'SimpleGaussianCNNModel/return_var')\n return_var.load(1.0)\n\n old_param_values = gcb.get_param_values()\n new_param_values = new_gcb.get_param_values()\n assert not np.array_equal(old_param_values, new_param_values)\n new_gcb.set_param_values(old_param_values)\n new_param_values = new_gcb.get_param_values()\n assert np.array_equal(old_param_values, new_param_values)\n\n @pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])\n def test_get_params_internal(self, obs_dim):\n box_env = TfEnv(DummyBoxEnv(obs_dim=obs_dim))\n with mock.patch(('metarl.tf.baselines.'\n 'gaussian_cnn_baseline.'\n 'GaussianCNNRegressor'),\n new=SimpleGaussianCNNRegressor):\n gcb = GaussianCNNBaseline(env_spec=box_env.spec,\n regressor_args=dict())\n params_interal = gcb.get_params_internal()\n trainable_params = tf.compat.v1.trainable_variables(\n scope='GaussianCNNBaseline')\n assert np.array_equal(params_interal, trainable_params)\n\n def test_is_pickleable(self):\n box_env = TfEnv(DummyBoxEnv(obs_dim=(1, )))\n with mock.patch(('metarl.tf.baselines.'\n 'gaussian_cnn_baseline.'\n 'GaussianCNNRegressor'),\n new=SimpleGaussianCNNRegressor):\n gcb = GaussianCNNBaseline(env_spec=box_env.spec)\n obs = {'observations': [np.full(1, 1), np.full(1, 1)]}\n\n with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):\n return_var = tf.compat.v1.get_variable(\n 'SimpleGaussianCNNModel/return_var')\n return_var.load(1.0)\n\n prediction = gcb.predict(obs)\n\n h = pickle.dumps(gcb)\n\n with tf.compat.v1.Session(graph=tf.Graph()):\n gcb_pickled = pickle.loads(h)\n prediction2 = gcb_pickled.predict(obs)\n\n assert np.array_equal(prediction, prediction2)\n", "\"\"\"CategoricalMLPPolicy.\"\"\"\nimport akro\nimport tensorflow as tf\n\nfrom metarl.tf.distributions import Categorical\nfrom metarl.tf.models import MLPModel\nfrom metarl.tf.policies import StochasticPolicy\n\n\nclass CategoricalMLPPolicy(StochasticPolicy):\n \"\"\"CategoricalMLPPolicy\n\n A policy that contains a MLP to make prediction based on\n a categorical distribution.\n\n It only works with akro.Discrete action space.\n\n Args:\n env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.\n name (str): Policy name, also the variable scope.\n hidden_sizes (list[int]): Output dimension of dense layer(s).\n For example, (32, 32) means the MLP of this policy consists of two\n hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n tf.Tensor.\n layer_normalization (bool): Bool for using layer normalization or not.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n name='CategoricalMLPPolicy',\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n hidden_w_init=tf.glorot_uniform_initializer(),\n hidden_b_init=tf.zeros_initializer(),\n output_nonlinearity=tf.nn.softmax,\n output_w_init=tf.glorot_uniform_initializer(),\n output_b_init=tf.zeros_initializer(),\n layer_normalization=False):\n assert isinstance(env_spec.action_space, akro.Discrete), (\n 'CategoricalMLPPolicy only works with akro.Discrete action '\n 'space.')\n super().__init__(name, env_spec)\n self.obs_dim = env_spec.observation_space.flat_dim\n self.action_dim = env_spec.action_space.n\n\n self.model = MLPModel(output_dim=self.action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization,\n name='MLPModel')\n\n self._initialize()\n\n def _initialize(self):\n state_input = tf.compat.v1.placeholder(tf.float32,\n shape=(None, self.obs_dim))\n\n with tf.compat.v1.variable_scope(self.name) as vs:\n self._variable_scope = vs\n self.model.build(state_input)\n\n self._f_prob = tf.compat.v1.get_default_session().make_callable(\n self.model.networks['default'].outputs,\n feed_list=[self.model.networks['default'].input])\n\n @property\n def vectorized(self):\n \"\"\"Vectorized or not.\"\"\"\n return True\n\n def dist_info_sym(self, obs_var, state_info_vars=None, name=None):\n \"\"\"Symbolic graph of the distribution.\"\"\"\n with tf.compat.v1.variable_scope(self._variable_scope):\n prob = self.model.build(obs_var, name=name)\n return dict(prob=prob)\n\n def dist_info(self, obs, state_infos=None):\n \"\"\"Distribution info.\"\"\"\n prob = self._f_prob(obs)\n return dict(prob=prob)\n\n def get_action(self, observation):\n \"\"\"Return a single action.\"\"\"\n flat_obs = self.observation_space.flatten(observation)\n prob = self._f_prob([flat_obs])[0]\n action = self.action_space.weighted_sample(prob)\n return action, dict(prob=prob)\n\n def get_actions(self, observations):\n \"\"\"Return multiple actions.\"\"\"\n flat_obs = self.observation_space.flatten_n(observations)\n probs = self._f_prob(flat_obs)\n actions = list(map(self.action_space.weighted_sample, probs))\n return actions, dict(prob=probs)\n\n def get_regularizable_vars(self):\n \"\"\"Get regularizable weight variables under the Policy scope.\"\"\"\n trainable = self.get_trainable_vars()\n return [\n var for var in trainable\n if 'hidden' in var.name and 'kernel' in var.name\n ]\n\n @property\n def distribution(self):\n \"\"\"Policy distribution.\"\"\"\n return Categorical(self.action_dim)\n\n def __getstate__(self):\n \"\"\"Object.__getstate__.\"\"\"\n new_dict = super().__getstate__()\n del new_dict['_f_prob']\n return new_dict\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\"\"\"\n super().__setstate__(state)\n self._initialize()\n", "\"\"\"Discrete CNN QFunction with CNN-MLP structure.\"\"\"\nimport tensorflow as tf\n\nfrom metarl.tf.models import CNNModel\nfrom metarl.tf.models import CNNModelWithMaxPooling\nfrom metarl.tf.models import MLPDuelingModel\nfrom metarl.tf.models import MLPModel\nfrom metarl.tf.models import Sequential\nfrom metarl.tf.q_functions import QFunction\n\n\nclass DiscreteCNNQFunction(QFunction):\n \"\"\"Q function based on a CNN-MLP structure for discrete action space.\n\n This class implements a Q value network to predict Q based on the\n input state and action. It uses an CNN and a MLP to fit the function\n of Q(s, a).\n\n Args:\n env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.\n filter_dims (tuple[int]): Dimension of the filters. For example,\n (3, 5) means there are two convolutional layers. The filter for\n first layer is of dimension (3 x 3) and the second one is of\n dimension (5 x 5).\n num_filters (tuple[int]): Number of filters. For example, (3, 32) means\n there are two convolutional layers. The filter for the first layer\n has 3 channels and the second one with 32 channels.\n strides (tuple[int]): The stride of the sliding window. For example,\n (1, 2) means there are two convolutional layers. The stride of the\n filter for first layer is 1 and that of the second layer is 2.\n hidden_sizes (list[int]): Output dimension of dense layer(s).\n For example, (32, 32) means the MLP of this q-function consists of\n two hidden layers, each with 32 hidden units.\n name (str): Variable scope of the cnn.\n padding (str): The type of padding algorithm to use,\n either 'SAME' or 'VALID'.\n max_pooling (bool): Boolean for using max pooling layer or not.\n pool_shapes (tuple[int]): Dimension of the pooling layer(s). For\n example, (2, 2) means that all the pooling layers have\n shape (2, 2).\n pool_strides (tuple[int]): The strides of the pooling layer(s). For\n example, (2, 2) means that all the pooling layers have\n strides (2, 2).\n cnn_hidden_nonlinearity (callable): Activation function for\n intermediate dense layer(s) in the CNN. It should return a\n tf.Tensor. Set it to None to maintain a linear activation.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s) in the MLP. It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s) in the MLP. The function should\n return a tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s) in the MLP. The function should\n return a tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer in the MLP. It should return a tf.Tensor. Set it to None\n to maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s) in the MLP. The function should return\n a tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s) in the MLP. The function should return\n a tf.Tensor.\n dueling (bool): Bool for using dueling network or not.\n layer_normalization (bool): Bool for using layer normalization or not.\n \"\"\"\n\n def __init__(self,\n env_spec,\n filter_dims,\n num_filters,\n strides,\n hidden_sizes=[256],\n name=None,\n padding='SAME',\n max_pooling=False,\n pool_strides=(2, 2),\n pool_shapes=(2, 2),\n cnn_hidden_nonlinearity=tf.nn.relu,\n hidden_nonlinearity=tf.nn.relu,\n hidden_w_init=tf.glorot_uniform_initializer(),\n hidden_b_init=tf.zeros_initializer(),\n output_nonlinearity=None,\n output_w_init=tf.glorot_uniform_initializer(),\n output_b_init=tf.zeros_initializer(),\n dueling=False,\n layer_normalization=False):\n super().__init__(name)\n self._env_spec = env_spec\n self._action_dim = env_spec.action_space.n\n self._filter_dims = filter_dims\n self._num_filters = num_filters\n self._strides = strides\n self._hidden_sizes = hidden_sizes\n self._padding = padding\n self._max_pooling = max_pooling\n self._pool_strides = pool_strides\n self._pool_shapes = pool_shapes\n self._cnn_hidden_nonlinearity = cnn_hidden_nonlinearity\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._output_nonlinearity = output_nonlinearity\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._layer_normalization = layer_normalization\n self._dueling = dueling\n\n self.obs_dim = self._env_spec.observation_space.shape\n action_dim = self._env_spec.action_space.flat_dim\n\n if not max_pooling:\n cnn_model = CNNModel(filter_dims=filter_dims,\n num_filters=num_filters,\n strides=strides,\n padding=padding,\n hidden_nonlinearity=cnn_hidden_nonlinearity)\n else:\n cnn_model = CNNModelWithMaxPooling(\n filter_dims=filter_dims,\n num_filters=num_filters,\n strides=strides,\n padding=padding,\n pool_strides=pool_strides,\n pool_shapes=pool_shapes,\n hidden_nonlinearity=cnn_hidden_nonlinearity)\n if not dueling:\n output_model = MLPModel(output_dim=action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization)\n else:\n output_model = MLPDuelingModel(\n output_dim=action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization)\n\n self.model = Sequential(cnn_model, output_model)\n\n self._initialize()\n\n def _initialize(self):\n obs_ph = tf.compat.v1.placeholder(tf.float32, (None, ) + self.obs_dim,\n name='obs')\n with tf.compat.v1.variable_scope(self.name) as vs:\n self._variable_scope = vs\n self.model.build(obs_ph)\n\n @property\n def q_vals(self):\n \"\"\"Q values.\"\"\"\n return self.model.networks['default'].outputs\n\n @property\n def input(self):\n \"\"\"Input tf.Tensor of the Q-function.\"\"\"\n return self.model.networks['default'].input\n\n def get_qval_sym(self, state_input, name):\n \"\"\"Symbolic graph for q-network.\n\n Args:\n state_input (tf.Tensor): The state input tf.Tensor to the network.\n name (str): Network variable scope.\n\n Return:\n The tf.Tensor output of Discrete CNN QFunction.\n \"\"\"\n with tf.compat.v1.variable_scope(self._variable_scope):\n return self.model.build(state_input, name=name)\n\n def clone(self, name):\n \"\"\"Return a clone of the Q-function.\n\n It only copies the configuration of the Q-function,\n not the parameters.\n\n Args:\n name: Name of the newly created q-function.\n \"\"\"\n return self.__class__(name=name,\n env_spec=self._env_spec,\n filter_dims=self._filter_dims,\n num_filters=self._num_filters,\n strides=self._strides,\n hidden_sizes=self._hidden_sizes,\n padding=self._padding,\n max_pooling=self._max_pooling,\n pool_shapes=self._pool_shapes,\n pool_strides=self._pool_strides,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearity=self._output_nonlinearity,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n dueling=self._dueling,\n layer_normalization=self._layer_normalization)\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\"\"\"\n self.__dict__.update(state)\n self._initialize()\n", "from dm_control import suite\nfrom dm_control.rl.control import flatten_observation\nfrom dm_control.rl.environment import StepType\nimport gym\nimport numpy as np\n\nfrom metarl.envs import Step\nfrom metarl.envs.dm_control.dm_control_viewer import DmControlViewer\n\n\nclass DmControlEnv(gym.Env):\n \"\"\"\n Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_\n \"\"\"\n\n def __init__(self, env, name=None):\n self._name = name or type(env.task).__name__\n self._env = env\n self._viewer = None\n\n @classmethod\n def from_suite(cls, domain_name, task_name):\n return cls(suite.load(domain_name, task_name),\n name='{}.{}'.format(domain_name, task_name))\n\n def step(self, action):\n time_step = self._env.step(action)\n return Step(\n flatten_observation(time_step.observation)['observations'],\n time_step.reward, time_step.step_type == StepType.LAST,\n **time_step.observation)\n\n def reset(self):\n time_step = self._env.reset()\n return flatten_observation(time_step.observation)['observations']\n\n def render(self, mode='human'):\n # pylint: disable=inconsistent-return-statements\n if mode == 'human':\n if not self._viewer:\n title = 'dm_control {}'.format(self._name)\n self._viewer = DmControlViewer(title=title)\n self._viewer.launch(self._env)\n self._viewer.render()\n return None\n elif mode == 'rgb_array':\n return self._env.physics.render()\n else:\n raise NotImplementedError\n\n def close(self):\n if self._viewer:\n self._viewer.close()\n self._env.close()\n self._viewer = None\n self._env = None\n\n def _flat_shape(self, observation):\n return np.sum(int(np.prod(v.shape)) for k, v in observation.items())\n\n @property\n def action_space(self):\n action_spec = self._env.action_spec()\n if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or\n np.inf in action_spec.maximum):\n return gym.spaces.Discrete(np.prod(action_spec.shape))\n else:\n return gym.spaces.Box(action_spec.minimum,\n action_spec.maximum,\n dtype=np.float32)\n\n @property\n def observation_space(self):\n flat_dim = self._flat_shape(self._env.observation_spec())\n return gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=[flat_dim],\n dtype=np.float32)\n\n def __getstate__(self):\n d = self.__dict__.copy()\n d['_viewer'] = None\n return d\n", "import numpy as np\n\nfrom metarl.envs.wrappers import Noop\nfrom tests.fixtures.envs.dummy import DummyDiscretePixelEnv\n\n\nclass TestNoop:\n def test_noop(self):\n env = Noop(DummyDiscretePixelEnv(), noop_max=3)\n\n for _ in range(1000):\n env.reset()\n assert 1 <= env.env.step_called <= 3\n\n env = Noop(DummyDiscretePixelEnv(), noop_max=10)\n for _ in range(1000):\n obs = env.reset()\n if env.env.step_called % 5 == 0:\n \"\"\"\n There are only 5 lives in the environment, so if number of\n steps are multiple of 5, env will call reset at last.\n \"\"\"\n assert np.array_equal(obs,\n np.ones(env.observation_space.shape))\n else:\n assert not np.array_equal(obs,\n np.ones(env.observation_space.shape))\n", "\"\"\"GaussianMLPModule.\"\"\"\nimport abc\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom metarl.torch.distributions import TanhNormal\nfrom metarl.torch.modules.mlp_module import MLPModule\nfrom metarl.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule\n\n\nclass TanhGaussianMLPBaseModule2(nn.Module):\n \"\"\"\n GaussianMLPModel.\n\n Args:\n input_dim (int): Input dimension of the model.\n output_dim (int): Output dimension of the model.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a torch.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n learn_std (bool): Is std trainable.\n init_std (float): Initial value for std.\n (plain value - not log or exponentiated).\n adaptive_std (bool): Is std a neural network. If False, it will be a\n parameter.\n std_share_network (bool): Boolean for whether mean and std share\n the same network.\n std_hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for std. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n std_hidden_nonlinearity: Nonlinearity for each hidden layer in\n the std network.\n std_output_w_init (callable): Initializer function for the weight\n of output dense layer(s) in the std network.\n std_parametrization (str): How the std should be parametrized. There\n are two options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n layer_normalization (bool): Bool for using layer normalization or not.\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_hidden_sizes=(32, 32),\n std_hidden_nonlinearity=nn.ReLU,\n std_hidden_w_init=nn.init.xavier_uniform_,\n std_hidden_b_init=nn.init.zeros_,\n std_output_w_init=nn.init.xavier_uniform_,\n std_parameterization='exp',\n layer_normalization=False):\n super().__init__()\n\n self._input_dim = input_dim\n self._hidden_sizes = hidden_sizes\n self._action_dim = output_dim\n self._learn_std = learn_std\n self._std_hidden_sizes = std_hidden_sizes\n self._min_std = min_std\n self._max_std = max_std\n self._std_hidden_nonlinearity = std_hidden_nonlinearity\n self._std_hidden_w_init = std_hidden_w_init\n self._std_hidden_b_init = std_hidden_b_init\n self._std_output_nonlinearity = torch.tanh\n self._std_output_w_init = std_output_w_init\n self._std_parameterization = std_parameterization\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._output_nonlinearity = output_nonlinearity\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._layer_normalization = layer_normalization\n\n if self._std_parameterization not in ('exp', 'softplus'):\n raise NotImplementedError\n\n init_std_param = torch.Tensor([init_std]).log()\n if self._learn_std:\n self._init_std = torch.nn.Parameter(init_std_param)\n else:\n self._init_std = init_std_param\n self.register_buffer('init_std', self._init_std)\n\n self._min_std_param = self._max_std_param = None\n if min_std is not None:\n self._min_std_param = torch.Tensor([min_std]).log()\n self.register_buffer('min_std_param', self._min_std_param)\n if max_std is not None:\n self._max_std_param = torch.Tensor([max_std]).log()\n self.register_buffer('max_std_param', self._max_std_param)\n \n def to(self, *args, **kwargs):\n super().to(*args, **kwargs)\n buffers = dict(self.named_buffers())\n if not isinstance(self._init_std, torch.nn.Parameter):\n self._init_std = buffers['init_std']\n self._min_std_param = buffers['min_std_param']\n self._max_std_param = buffers['max_std_param']\n\n @abc.abstractmethod\n def _get_mean_and_log_std(self, inputs):\n pass\n\n def forward(self, inputs):\n \"\"\"Forward method.\"\"\"\n mean, log_std_uncentered = self._get_mean_and_log_std(inputs)\n # requires that std nonlinearity is tanh\n log_std_uncentered = self._min_std_param + 0.5 * (self._max_std_param - self._min_std_param) * (log_std_uncentered + 1.)\n if self._std_parameterization == 'exp':\n std = log_std_uncentered.exp()\n else:\n std = log_std_uncentered.exp().exp().add(1.).log()\n dist = TanhNormal(mean, std)\n return dist\n\n def _to_scalar_if_not_none(self, tensor):\n return None if tensor is None else tensor.item()\n\n\nclass TanhGaussianMLPTwoHeadedModule2(TanhGaussianMLPBaseModule2):\n \"\"\"GaussianMLPModule which has only one mean network.\"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=nn.ReLU,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=nn.ReLU,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=np.exp(-20.),\n max_std=np.exp(2.),\n std_parameterization='exp',\n layer_normalization=False):\n\n super(TanhGaussianMLPTwoHeadedModule2,\n self).__init__(input_dim=input_dim,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n learn_std=learn_std,\n init_std=init_std,\n min_std=min_std,\n max_std=max_std,\n std_parameterization=std_parameterization,\n layer_normalization=layer_normalization)\n self._shared_mean_log_std_network = MultiHeadedMLPModule(\n n_heads=2,\n input_dim=self._input_dim,\n output_dims=self._action_dim,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearities=[None, nn.Tanh],\n output_w_inits=self._output_w_init,\n output_b_inits=[\n nn.init.zeros_,\n lambda x: nn.init.constant_(x, self._init_std.item())\n ],\n layer_normalization=self._layer_normalization)\n\n def _get_mean_and_log_std(self, inputs):\n return self._shared_mean_log_std_network(inputs)\n", "# pylint: disable=attribute-defined-outside-init\n\"\"\"A recurrent network with LSTM for encoding context of RL tasks.\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom metarl.torch.modules import MLPModule\n\n\nclass RecurrentEncoder(MLPModule):\n \"\"\"This recurrent network encodes context of RL tasks.\n\n Context is stored in the terms of observation, action, and reward, and this\n network uses an MLP module followed by an LSTM model for encoding it.\n\n Args:\n *args: MLPModule arguments.\n **kwargs: MLPModule arguments including:\n input_dim (int) : Dimension of the network input.\n output_dim (int): Dimension of the network output.\n hidden_sizes (list[int]): Output dimension of dense layer(s).\n For example, (32, 32) means this MLP consists of two\n hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable or torch.nn.Module): Activation\n function for intermediate dense layer(s). It should return a\n torch.Tensor.Set it to None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable or torch.nn.Module): Activation\n function for output dense layer. It should return a\n torch.Tensor. Set it to None to maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n layer_normalization (bool): Bool for using layer normalization or\n not.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._hidden_dim = self._hidden_sizes[-1]\n # hidden dimension should be (task, 1, feat)\n self.register_buffer('hidden', torch.zeros(1, 1, self._hidden_dim))\n self._lstm = nn.LSTM(self._hidden_dim,\n self._hidden_dim,\n num_layers=1,\n batch_first=True)\n\n # pylint: disable=arguments-differ\n def forward(self, input_val):\n \"\"\"Forward method with LSTM.\n\n Args:\n input_val (torch.Tensor): Input values with shape\n (task, seq, feat).\n\n Returns:\n torch.Tensor: Output values.\n\n \"\"\"\n task, seq, feat = input_val.size()\n out = input_val.view(task * seq, feat)\n\n # embed with MLP\n for layer in self._layers:\n out = layer(out)\n if self._hidden_nonlinearity is not None:\n out = self._hidden_nonlinearity(out)\n out = out.view(task, seq, -1)\n\n # add LSTM before output layer\n # step through the entire sequence of LSTM all at once\n # out = all hidden states in the sequence\n # hn = last hidden state with gradients\n out, (hn,\n _) = self._lstm(out,\n (self.hidden, torch.zeros(self.hidden.size())))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out[:, -1, :]\n\n # output layer\n output = self._output_layers[-1](out)\n if self._output_nonlinearity is not None:\n output = self._output_nonlinearity(output)\n\n output = output.view(task, -1, self._output_dim)\n\n return output\n\n def reset(self, num_tasks=1):\n \"\"\"Reset task size in hidden dimensions.\n\n Args:\n num_tasks (int): Size of tasks.\n\n \"\"\"\n self.hidden = self.hidden.new_full((1, num_tasks, self._hidden_dim), 0)\n\n def detach_hidden(self):\n \"\"\"Disable backprop through hidden.\"\"\"\n self.hidden = self.hidden.detach()\n", "\"\"\"This modules creates a continuous Q-function network.\"\"\"\n\nimport torch\n\nfrom metarl.torch.modules import MLPModule\n\n\nclass ContinuousMLPQFunction(MLPModule):\n \"\"\"\n Implements a continuous MLP Q-value network.\n\n It predicts the Q-value for all actions based on the input state. It uses\n a PyTorch neural network module to fit the function of Q(s, a).\n \"\"\"\n\n def __init__(self, env_spec, **kwargs):\n \"\"\"\n Initialize class with multiple attributes.\n\n Args:\n env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.\n nn_module (nn.Module): Neural network module in PyTorch.\n \"\"\"\n self._env_spec = env_spec\n self._obs_dim = env_spec.observation_space.flat_dim\n self._action_dim = env_spec.action_space.flat_dim\n\n MLPModule.__init__(self,\n input_dim=self._obs_dim + self._action_dim,\n output_dim=1,\n **kwargs)\n\n def forward(self, observations, actions):\n \"\"\"Return Q-value(s).\"\"\"\n return super().forward(torch.cat([observations, actions], 1))\n" ]
[ [ "numpy.expand_dims", "torch.Tensor", "numpy.arange", "numpy.ones", "torch.nn.Linear", "tensorflow.compat.v1.placeholder", "numpy.zeros" ], [ "tensorflow.Graph", "numpy.array_equal", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.trainable_variables", "numpy.full", "tensorflow.compat.v1.variable_scope" ], [ "tensorflow.zeros_initializer", "tensorflow.compat.v1.get_default_session", "tensorflow.compat.v1.placeholder", "tensorflow.glorot_uniform_initializer", "tensorflow.compat.v1.variable_scope" ], [ "tensorflow.zeros_initializer", "tensorflow.compat.v1.variable_scope", "tensorflow.glorot_uniform_initializer", "tensorflow.compat.v1.placeholder" ], [ "numpy.prod" ], [ "numpy.ones" ], [ "torch.nn.Parameter", "numpy.exp", "torch.Tensor" ], [ "torch.zeros", "torch.nn.LSTM" ], [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jhbrito/HelloWorlds
[ "7e2247ca7f312a516ce6a5054913d59e2f1de0f9" ]
[ "HelloWorldOpenCV.py" ]
[ "# Demo with a few examples of using OpenCV functions and UI\n# packages: opencv-python\n# uses lena: https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png\n\nimport numpy as np\nimport cv2\n\nprint(\"Hello World OpenCV\")\nprint(\"OpenCV Version:\", cv2.__version__)\n\nimage = np.ones((256, 256), dtype=\"uint8\")\nimage = image * 127\nimage[0:128, 0:128] = 0\nimage[128:, 128:] = 255\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\n\n# Opening and Viewing an Image\nimport os.path\n\nif os.path.isfile('lena.png'):\n print(\"Test Image File exist\")\nelse:\n print(\"Test Image File does not exist; downloading...\")\n import urllib.request as urllib_request\n\n urllib_request.urlretrieve(\"https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png\", \"lena.png\")\n\nimage = cv2.imread(\"./lena.png\")\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nrgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ncv2.imshow(\"Image RGB\", rgb_image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\ndef viewImage(image, name_of_window):\n cv2.namedWindow(name_of_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(name_of_window, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nviewImage(image, \"Lena\")\n\n# Edit pixels\nedited = image.copy()\nedited[200:390, 200:360, 0] = 255\nviewImage(edited, \"Lena edited\")\n\n# Cropping\ncropped = image[200:390, 200:360]\nviewImage(cropped, \"Lena cropped\")\n\n# Resizing\nscale_percent = 10 # percent of original size\nwidth = int(image.shape[1] * scale_percent / 100)\nheight = int(image.shape[0] * scale_percent / 100)\ndim = (width, height)\nresized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\nviewImage(resized, \"Lena resized to {}%\".format(scale_percent))\n\n# Drawing a Rectangle\noutput = image.copy()\ncv2.rectangle(output, (200, 200), (360, 390), (255, 0, 0), 10)\nviewImage(output, \"Lena with a rectangle\")\n\n# Drawing a line\ncv2.line(output, (256, 390), (256, 512), (0, 0, 255), 5)\nviewImage(output, \"Lena with a line\")\n\n# Writing on an image\ncv2.putText(output, \"Lena\", (360, 390), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)\nviewImage(output, \"Lena with text\")\n\n# Saving an image\ncv2.imwrite(\"./output.jpg\", output)\n\n# Blurring/Smoothing\nblurred = cv2.GaussianBlur(image, (15, 15), 0)\nviewImage(blurred, \"Lena blurred\")\n\n# Rotating\n(h, w, d) = image.shape\ncenter = (w // 2, h // 2)\nrot = 45\nM = cv2.getRotationMatrix2D(center, rot, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\nviewImage(rotated, \"Lena rotated by {} degrees\".format(rot))\n\n# Blend\nalpha_slider_max = 100\n\n\ndef on_trackbar_weight(val):\n alpha = val / alpha_slider_max\n beta = (1.0 - alpha)\n blend = cv2.addWeighted(image, alpha, rotated, beta, 0.0)\n cv2.imshow('Lena blended', blend)\n\n\ncv2.namedWindow('Lena blended')\ntrackbar_name = 'Alpha 0 - {}'.format(alpha_slider_max)\ncv2.createTrackbar(trackbar_name, 'Lena blended', 50, alpha_slider_max, on_trackbar_weight)\non_trackbar_weight(50)\ncv2.waitKey()\ncv2.destroyWindow('Lena blended')\n\n# Grayscaling\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nviewImage(gray_image, \"Lena gray-scale\")\n\n# Thresholding\nthreshold_slider_max = 255\nthreshold = 200\nret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)\n\n\ndef on_trackbar_threshold(val):\n threshold = val\n ret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)\n cv2.imshow(\"Lena thresholded\", threshold_image)\n\n\ncv2.namedWindow(\"Lena thresholded\")\ntrackbar_name = \"Threshold 0 - {}\".format(threshold_slider_max)\ncv2.createTrackbar(trackbar_name, \"Lena thresholded\", threshold, threshold_slider_max, on_trackbar_threshold)\non_trackbar_threshold(threshold)\ncv2.waitKey()\ncv2.destroyWindow(\"Lena thresholded\")\n\n# Contours\ncontours, hierarchy = cv2.findContours(threshold_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\nimage_with_contours = image.copy()\ncv2.drawContours(image_with_contours, contours, -1, (255, 0, 0), 1)\nviewImage(image_with_contours, \"Lena contours\")\n\n# Face Detection\nface_cascade = cv2.CascadeClassifier('venv\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')\nfaces = face_cascade.detectMultiScale(gray_image)\nprint(\"Lena with {} faces detected\".format(len(faces)))\nimage_faces = image.copy()\nfor (x, y, w, h) in faces:\n cv2.rectangle(image_faces, (x, y), (x + w, y + h), (0, 255, 0), 2)\nviewImage(image_faces, \"Lena with {} faces detected\".format(len(faces)))\n\n\ndef display_box(im, bbox):\n n_boxes = len(bbox)\n for j_box in range(n_boxes):\n for j in range(4):\n cv2.line(im,\n (int(bbox[j_box][j][0]), int(bbox[j_box][j][1])),\n (int(bbox[j_box][(j + 1) % 4][0]), int(bbox[j_box][(j + 1) % 4][1])),\n (255, 0, 0), 3)\n # Display results\n cv2.imshow(\"Results\", im)\n\n\ninputImage = cv2.imread(\"qrcode.jpg\")\nqrDecoder = cv2.QRCodeDetector()\ndata, bbox, rectifiedImage = qrDecoder.detectAndDecode(inputImage)\nif len(data) > 0:\n print(\"Decoded Data : {}\".format(data))\n display_box(inputImage, bbox)\n rectifiedImage = np.uint8(rectifiedImage)\n cv2.imshow(\"Rectified QRCode\", rectifiedImage)\nelse:\n print(\"QR Code not detected\")\n cv2.imshow(\"Results\", inputImage)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.uint8", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rish-16/gym-navmaze
[ "cc21d730ec6ab1e96a4a1a8f602a5bbb951d2929" ]
[ "src/cartpole.py" ]
[ "import numpy as np\nfrom collections import deque\nimport pickle\nimport torch\nfrom utils import collect_trajectories, random_sample\nfrom PPO import PPO\nimport matplotlib.pyplot as plt\nfrom parallelEnv import *\nimport gym\n\nenv = gym.make(\"CartPole-v0\")\nenv.reset()\nenv.seed(2)\n\nobs_dim = env.observation_space.shape[0]\nn_actions = env.action_space.n\nact_dist = [0 for i in range(n_actions)]\n\ndef train(episode, env_name):\n gamma = .99\n gae_lambda = 0.95\n use_gae = True\n beta = .01\n cliprange = 0.1\n best_score = -np.inf\n goal_score = 195.0\n ep_length = []\n\n nenvs = 1\n rollout_length = 200\n minibatches = 10*8\n nbatch = nenvs * rollout_length\n optimization_epochs = 4\n \n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n envs = parallelEnv(env_name, nenvs, seed=1234)\n agent = PPO(state_size=obs_dim,\n action_size=n_actions,\n seed=0,\n hidden_layers=[64,64],\n lr_policy=1e-4, \n use_reset=True,\n device=device)\n\n print(agent.policy)\n\n # keep track of progress\n mean_rewards = []\n scores_window = deque(maxlen=100)\n loss_storage = []\n\n for i_episode in range(episode+1):\n log_probs_old, states, actions, rewards, values, dones, vals_last, infos, ep_length = collect_trajectories(envs, act_dist, ep_length, agent.policy, rollout_length)\n\n returns = np.zeros_like(rewards)\n advantages = np.zeros_like(rewards)\n \n if not use_gae:\n for t in reversed(range(rollout_length)):\n if t == rollout_length - 1:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last\n else:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]\n advantages[t] = returns[t] - values[t]\n else:\n for t in reversed(range(rollout_length)):\n if t == rollout_length - 1:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last\n td_error = returns[t] - values[t]\n else:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]\n td_error = rewards[t] + gamma * (1-dones[t]) * values[t+1] - values[t]\n advantages[t] = advantages[t] * gae_lambda * gamma * (1-dones[t]) + td_error\n \n # convert to pytorch tensors and move to gpu if available\n returns = torch.from_numpy(returns).float().to(device).view(-1,)\n advantages = torch.from_numpy(advantages).float().to(device).view(-1,)\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n \n for _ in range(optimization_epochs):\n sampler = random_sample(nbatch, minibatches)\n for inds in sampler:\n mb_log_probs_old = log_probs_old[inds]\n mb_states = states[inds]\n mb_actions = actions[inds]\n mb_returns = returns[inds]\n mb_advantages = advantages[inds]\n loss_p, loss_v, loss_ent = agent.update(mb_log_probs_old, mb_states, mb_actions, mb_returns, mb_advantages, cliprange=cliprange, beta=beta)\n loss_storage.append([loss_p, loss_v, loss_ent])\n \n total_rewards = np.sum(rewards, axis=0)\n scores_window.append(np.mean(total_rewards)) # last 100 scores\n mean_rewards.append(np.mean(total_rewards)) # get the average reward of the parallel environments\n cliprange *= 0.999 # the clipping parameter reduces as time goes on\n beta *= 0.999 # the regulation term reduces\n \n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n print(total_rewards)\n if np.mean(scores_window)>=goal_score and np.mean(scores_window)>=best_score: \n torch.save(agent.policy.state_dict(), \"policy_cartpole.pth\")\n best_score = np.mean(scores_window)\n \n return mean_rewards, loss_storage, act_dist, ep_length\n\nmean_rewards, loss, new_act_dist, ep_length = train(10000, 'CartPole-v0')\n\nprint (new_act_dist[-1])\nprint (ep_length)\n\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rcParams['font.size'] = 10\n\nplt.title(\"PPO + MLP + GAE for 10000 episodes\")\n\nplt.subplot(131)\nplt.plot(mean_rewards)\nplt.ylabel('Average score')\nplt.xlabel('Episode')\n\nplt.subplot(132)\nplt.plot(list(range(len(ep_length))), ep_length, color=\"red\")\nplt.ylabel('Episode Length')\nplt.xlabel('Episode')\n\nplt.subplot(133)\nplt.ylabel('Frequency')\nplt.xlabel('Actions')\nplt.bar(['Action {}'.format(i) for i in range(len(new_act_dist))], new_act_dist[-1])\n\nplt.show()" ]
[ [ "matplotlib.pyplot.title", "torch.from_numpy", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "numpy.zeros_like", "numpy.mean", "torch.cuda.is_available", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
williamberrios/lofo-importance
[ "34967cf47dc1c2797d3a77f8926918ae91e4197a" ]
[ "lofo/infer_defaults.py" ]
[ "import numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom lightgbm import LGBMClassifier, LGBMRegressor\n\n\ndef infer_model(df, features, y, n_jobs):\n model_class = LGBMRegressor\n if len(np.unique(y)) == 2:\n y = LabelEncoder().fit_transform(y)\n model_class = LGBMClassifier\n\n categoricals = df[features].select_dtypes(exclude=[np.number]).columns.tolist()\n for f in categoricals:\n df[f] = LabelEncoder().fit_transform(df[f].apply(str))\n\n min_child_samples = int(0.01*df.shape[0])\n\n model = model_class(min_child_samples=min_child_samples, n_jobs=n_jobs)\n\n return model, df, categoricals, y\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kolibril13/napari
[ "b39647d94e587f0255b0d4cc3087855e160a8929", "b39647d94e587f0255b0d4cc3087855e160a8929" ]
[ "napari/_vispy/overlays/axes.py", "napari/components/viewer_model.py" ]
[ "import numpy as np\nfrom vispy.scene.visuals import Compound, Line, Mesh, Text\nfrom vispy.visuals.transforms import STTransform\n\nfrom ...layers.shapes._shapes_utils import triangulate_ellipse\nfrom ...utils.colormaps.standardize_color import transform_color\nfrom ...utils.theme import get_theme\nfrom ...utils.translations import trans\n\n\ndef make_dashed_line(num_dashes, axis):\n \"\"\"Make a dashed line.\n\n Parameters\n ----------\n num_dashes : int\n Number of dashes in the line.\n axis : int\n Axis which is dashed.\n\n Returns\n -------\n np.ndarray\n Dashed line, of shape (num_dashes, 3) with zeros in\n the non dashed axes and line segments in the dashed\n axis.\n \"\"\"\n dashes = np.linspace(0, 1, num_dashes * 2)\n dashed_line_ends = np.concatenate(\n [[dashes[2 * i], dashes[2 * i + 1]] for i in range(num_dashes)], axis=0\n )\n dashed_line = np.zeros((2 * num_dashes, 3))\n dashed_line[:, axis] = np.array(dashed_line_ends)\n return dashed_line\n\n\ndef make_arrow_head(num_segments, axis):\n \"\"\"Make an arrowhead line.\n\n Parameters\n ----------\n num_segments : int\n Number of segments in the arrowhead.\n axis\n Arrowhead direction.\n\n Returns\n -------\n np.ndarray, np.ndarray\n Vertices and faces of the arrowhead.\n \"\"\"\n corners = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]]) * 0.1\n vertices, faces = triangulate_ellipse(corners, num_segments)\n full_vertices = np.zeros((num_segments + 1, 3))\n inds = list(range(3))\n inds.pop(axis)\n full_vertices[:, inds] = vertices\n full_vertices[:, axis] = 0.9\n full_vertices[0, axis] = 1.02\n return full_vertices, faces\n\n\ndef color_lines(colors):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 2],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 2, [colors[2]] * 2],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\ndef color_dashed_lines(colors):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 4 * 2],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 4 * 2, [colors[2]] * 8 * 2],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\ndef color_arrowheads(colors, num_segments):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * num_segments, [colors[1]] * num_segments],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [\n [colors[0]] * num_segments,\n [colors[1]] * num_segments,\n [colors[2]] * num_segments,\n ],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\nclass VispyAxesOverlay:\n \"\"\"Axes indicating world coordinate origin and orientation.\"\"\"\n\n _NUM_SEGMENTS_ARROWHEAD = 100\n\n def __init__(self, viewer, parent=None, order=0):\n self._viewer = viewer\n self._scale = 1\n\n # Target axes length in canvas pixels\n self._target_length = 80\n # CMYRGB for 6 axes data in x, y, z, ... ordering\n self._default_color = [\n [0, 1, 1, 1],\n [1, 0, 1, 1],\n [1, 1, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n ]\n # Text offset from line end position\n self._text_offsets = 0.1 * np.array([1, 1, 1])\n\n # note order is x, y, z for VisPy\n self._line_data2D = np.array(\n [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0]]\n )\n self._line_data3D = np.array(\n [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]]\n )\n\n # note order is x, y, z for VisPy\n self._dashed_line_data2D = np.concatenate(\n [[[1, 0, 0], [0, 0, 0]], make_dashed_line(4, axis=1)],\n axis=0,\n )\n self._dashed_line_data3D = np.concatenate(\n [\n [[1, 0, 0], [0, 0, 0]],\n make_dashed_line(4, axis=1),\n make_dashed_line(8, axis=2),\n ],\n axis=0,\n )\n\n # note order is x, y, z for VisPy\n vertices = np.empty((0, 3))\n faces = np.empty((0, 3))\n for axis in range(2):\n v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)\n faces = np.concatenate([faces, f + len(vertices)], axis=0)\n vertices = np.concatenate([vertices, v], axis=0)\n self._default_arrow_vertices2D = vertices\n self._default_arrow_faces2D = faces.astype(int)\n\n vertices = np.empty((0, 3))\n faces = np.empty((0, 3))\n for axis in range(3):\n v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)\n faces = np.concatenate([faces, f + len(vertices)], axis=0)\n vertices = np.concatenate([vertices, v], axis=0)\n self._default_arrow_vertices3D = vertices\n self._default_arrow_faces3D = faces.astype(int)\n\n self.node = Compound(\n [Line(connect='segments', method='gl', width=3), Mesh(), Text()],\n parent=parent,\n )\n self.node.transform = STTransform()\n self.node.order = order\n\n # Add a text node to display axes labels\n self.text_node = self.node._subvisuals[2]\n self.text_node.font_size = 10\n self.text_node.anchors = ('center', 'center')\n self.text_node.text = f'{1}'\n\n self.node.canvas._backend.destroyed.connect(self._set_canvas_none)\n # End Note\n\n self._viewer.events.theme.connect(self._on_data_change)\n self._viewer.axes.events.visible.connect(self._on_visible_change)\n self._viewer.axes.events.colored.connect(self._on_data_change)\n self._viewer.axes.events.dashed.connect(self._on_data_change)\n self._viewer.axes.events.labels.connect(self._on_data_change)\n self._viewer.axes.events.arrows.connect(self._on_data_change)\n self._viewer.dims.events.order.connect(self._on_data_change)\n self._viewer.dims.events.range.connect(self._on_data_change)\n self._viewer.dims.events.ndisplay.connect(self._on_data_change)\n self._viewer.dims.events.axis_labels.connect(self._on_data_change)\n self._viewer.camera.events.zoom.connect(self._on_zoom_change)\n\n self._on_visible_change(None)\n self._on_data_change(None)\n\n def _set_canvas_none(self):\n self.node._set_canvas(None)\n self.text_node._set_canvas(None)\n\n def _on_visible_change(self, event):\n \"\"\"Change visibiliy of axes.\"\"\"\n self.node.visible = self._viewer.axes.visible\n self._on_zoom_change(event)\n self._on_data_change(event)\n\n def _on_data_change(self, event):\n \"\"\"Change style of axes.\"\"\"\n if not self._viewer.axes.visible:\n return\n\n # Determine which axes are displayed\n axes = self._viewer.dims.displayed\n\n # Actual number of displayed dims\n ndisplay = len(self._viewer.dims.displayed)\n\n # Determine the labels of those axes\n axes_labels = [self._viewer.dims.axis_labels[a] for a in axes[::-1]]\n # Counting backwards from total number of dimensions\n # determine axes positions. This is done as by default\n # the last NumPy axis corresponds to the first Vispy axis\n reversed_axes = [self._viewer.dims.ndim - 1 - a for a in axes[::-1]]\n\n # Determine colors of axes based on reverse position\n if self._viewer.axes.colored:\n axes_colors = [\n self._default_color[ra % len(self._default_color)]\n for ra in reversed_axes\n ]\n else:\n # the reason for using the `as_hex` here is to avoid\n # `UserWarning` which is emitted when RGB values are above 1\n background_color = get_theme(\n self._viewer.theme, False\n ).canvas.as_hex()\n background_color = transform_color(background_color)[0]\n color = np.subtract(1, background_color)\n color[-1] = background_color[-1]\n axes_colors = [color] * ndisplay\n\n # Determine data based on number of displayed dimensions and\n # axes visualization parameters\n if self._viewer.axes.dashed and ndisplay == 2:\n data = self._dashed_line_data2D\n color = color_dashed_lines(axes_colors)\n text_data = self._line_data2D[1::2]\n elif self._viewer.axes.dashed and ndisplay == 3:\n data = self._dashed_line_data3D\n color = color_dashed_lines(axes_colors)\n text_data = self._line_data3D[1::2]\n elif not self._viewer.axes.dashed and ndisplay == 2:\n data = self._line_data2D\n color = color_lines(axes_colors)\n text_data = self._line_data2D[1::2]\n elif not self._viewer.axes.dashed and ndisplay == 3:\n data = self._line_data3D\n color = color_lines(axes_colors)\n text_data = self._line_data3D[1::2]\n else:\n raise ValueError(\n trans._(\n 'Axes dash status and ndisplay combination not supported',\n deferred=True,\n )\n )\n\n if self._viewer.axes.arrows and ndisplay == 2:\n arrow_vertices = self._default_arrow_vertices2D\n arrow_faces = self._default_arrow_faces2D\n arrow_color = color_arrowheads(\n axes_colors, self._NUM_SEGMENTS_ARROWHEAD\n )\n elif self._viewer.axes.arrows and ndisplay == 3:\n arrow_vertices = self._default_arrow_vertices3D\n arrow_faces = self._default_arrow_faces3D\n arrow_color = color_arrowheads(\n axes_colors, self._NUM_SEGMENTS_ARROWHEAD\n )\n else:\n arrow_vertices = np.zeros((3, 3))\n arrow_faces = np.array([[0, 1, 2]])\n arrow_color = [[0, 0, 0, 0]]\n\n self.node._subvisuals[0].set_data(data, color)\n self.node._subvisuals[1].set_data(\n vertices=arrow_vertices,\n faces=arrow_faces,\n face_colors=arrow_color,\n )\n\n # Set visibility status of text\n self.text_node.visible = (\n self._viewer.axes.visible and self._viewer.axes.labels\n )\n self.text_node.text = axes_labels\n self.text_node.color = axes_colors\n self.text_node.pos = text_data + self._text_offsets\n\n def _on_zoom_change(self, event):\n \"\"\"Update axes length based on zoom scale.\"\"\"\n if not self._viewer.axes.visible:\n return\n\n scale = 1 / self._viewer.camera.zoom\n\n # If scale has not changed, do not redraw\n if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:\n return\n self._scale = scale\n scale_canvas2world = self._scale\n target_canvas_pixels = self._target_length\n scale = target_canvas_pixels * scale_canvas2world\n # Update axes scale\n self.node.transform.scale = [scale, scale, scale, 1]\n", "from __future__ import annotations\n\nimport inspect\nimport itertools\nimport os\nimport warnings\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\n\nimport numpy as np\nfrom pydantic import Extra, Field, validator\n\nfrom .. import layers\nfrom ..layers import Image, Layer\nfrom ..layers._source import layer_source\nfrom ..layers.image._image_utils import guess_labels\nfrom ..layers.utils.stack_utils import split_channels\nfrom ..settings import get_settings\nfrom ..utils._register import create_func as create_add_method\nfrom ..utils.colormaps import ensure_colormap\nfrom ..utils.events import Event, EventedModel, disconnect_events\nfrom ..utils.key_bindings import KeymapProvider\nfrom ..utils.misc import is_sequence\nfrom ..utils.mouse_bindings import MousemapProvider\nfrom ..utils.progress import progress\nfrom ..utils.theme import available_themes\nfrom ..utils.translations import trans\nfrom ._viewer_mouse_bindings import dims_scroll\nfrom .axes import Axes\nfrom .camera import Camera\nfrom .cursor import Cursor\nfrom .dims import Dims\nfrom .grid import GridCanvas\nfrom .layerlist import LayerList\nfrom .scale_bar import ScaleBar\nfrom .text_overlay import TextOverlay\nfrom .tooltip import Tooltip\n\nDEFAULT_THEME = 'dark'\nEXCLUDE_DICT = {\n 'keymap',\n '_mouse_wheel_gen',\n '_mouse_drag_gen',\n '_persisted_mouse_event',\n 'mouse_move_callbacks',\n 'mouse_drag_callbacks',\n 'mouse_wheel_callbacks',\n}\nEXCLUDE_JSON = EXCLUDE_DICT.union({'layers', 'active_layer'})\n\nif TYPE_CHECKING:\n from ..types import FullLayerData, LayerData\n\nPathLike = Union[str, Path]\nPathOrPaths = Union[PathLike, Sequence[PathLike]]\n\n__all__ = ['ViewerModel', 'valid_add_kwargs']\n\n\ndef _current_theme() -> str:\n return get_settings().appearance.theme\n\n\n# KeymapProvider & MousemapProvider should eventually be moved off the ViewerModel\nclass ViewerModel(KeymapProvider, MousemapProvider, EventedModel):\n \"\"\"Viewer containing the rendered scene, layers, and controlling elements\n including dimension sliders, and control bars for color limits.\n\n Parameters\n ----------\n title : string\n The title of the viewer window.\n ndisplay : {2, 3}\n Number of displayed dimensions.\n order : tuple of int\n Order in which dimensions are displayed where the last two or last\n three dimensions correspond to row x column or plane x row x column if\n ndisplay is 2 or 3.\n axis_labels : list of str\n Dimension names.\n\n Attributes\n ----------\n window : Window\n Parent window.\n layers : LayerList\n List of contained layers.\n dims : Dimensions\n Contains axes, indices, dimensions and sliders.\n \"\"\"\n\n # Using allow_mutation=False means these attributes aren't settable and don't\n # have an event emitter associated with them\n axes: Axes = Field(default_factory=Axes, allow_mutation=False)\n camera: Camera = Field(default_factory=Camera, allow_mutation=False)\n cursor: Cursor = Field(default_factory=Cursor, allow_mutation=False)\n dims: Dims = Field(default_factory=Dims, allow_mutation=False)\n grid: GridCanvas = Field(default_factory=GridCanvas, allow_mutation=False)\n layers: LayerList = Field(\n default_factory=LayerList, allow_mutation=False\n ) # Need to create custom JSON encoder for layer!\n scale_bar: ScaleBar = Field(default_factory=ScaleBar, allow_mutation=False)\n text_overlay: TextOverlay = Field(\n default_factory=TextOverlay, allow_mutation=False\n )\n\n help: str = ''\n status: str = 'Ready'\n tooltip: Tooltip = Field(default_factory=Tooltip, allow_mutation=False)\n theme: str = Field(default_factory=_current_theme)\n title: str = 'napari'\n\n # 2-tuple indicating height and width\n _canvas_size: Tuple[int, int] = (600, 800)\n\n def __init__(self, title='napari', ndisplay=2, order=(), axis_labels=()):\n # allow extra attributes during model initialization, useful for mixins\n self.__config__.extra = Extra.allow\n super().__init__(\n title=title,\n dims={\n 'axis_labels': axis_labels,\n 'ndisplay': ndisplay,\n 'order': order,\n },\n )\n self.__config__.extra = Extra.ignore\n\n settings = get_settings()\n self.tooltip.visible = settings.appearance.layer_tooltip_visibility\n settings.appearance.events.layer_tooltip_visibility.connect(\n self._tooltip_visible_update\n )\n\n self._update_viewer_grid()\n settings.application.events.grid_stride.connect(\n self._update_viewer_grid\n )\n settings.application.events.grid_width.connect(\n self._update_viewer_grid\n )\n settings.application.events.grid_height.connect(\n self._update_viewer_grid\n )\n\n # Add extra events - ideally these will be removed too!\n self.events.add(layers_change=Event, reset_view=Event)\n\n # Connect events\n self.grid.events.connect(self.reset_view)\n self.grid.events.connect(self._on_grid_change)\n self.dims.events.ndisplay.connect(self._update_layers)\n self.dims.events.ndisplay.connect(self.reset_view)\n self.dims.events.order.connect(self._update_layers)\n self.dims.events.order.connect(self.reset_view)\n self.dims.events.current_step.connect(self._update_layers)\n self.cursor.events.position.connect(self._on_cursor_position_change)\n self.layers.events.inserted.connect(self._on_add_layer)\n self.layers.events.removed.connect(self._on_remove_layer)\n self.layers.events.reordered.connect(self._on_grid_change)\n self.layers.events.reordered.connect(self._on_layers_change)\n self.layers.selection.events.active.connect(self._on_active_layer)\n\n # Add mouse callback\n self.mouse_wheel_callbacks.append(dims_scroll)\n\n def _tooltip_visible_update(self, event):\n self.tooltip.visible = event.value\n\n def _update_viewer_grid(self, e=None):\n \"\"\"Keep viewer grid settings up to date with settings values.\"\"\"\n\n settings = get_settings()\n\n self.grid.stride = settings.application.grid_stride\n self.grid.shape = (\n settings.application.grid_height,\n settings.application.grid_width,\n )\n\n @validator('theme')\n def _valid_theme(cls, v):\n themes = available_themes()\n if v not in available_themes():\n raise ValueError(\n trans._(\n \"Theme '{theme_name}' not found; options are {themes}.\",\n deferred=True,\n theme_name=v,\n themes=themes,\n )\n )\n\n return v\n\n def json(self, **kwargs):\n \"\"\"Serialize to json.\"\"\"\n # Manually exclude the layer list and active layer which cannot be serialized at this point\n # and mouse and keybindings don't belong on model\n # https://github.com/samuelcolvin/pydantic/pull/2231\n # https://github.com/samuelcolvin/pydantic/issues/660#issuecomment-642211017\n exclude = kwargs.pop('exclude', set())\n exclude = exclude.union(EXCLUDE_JSON)\n return super().json(exclude=exclude, **kwargs)\n\n def dict(self, **kwargs):\n \"\"\"Convert to a dictionary.\"\"\"\n # Manually exclude the layer list and active layer which cannot be serialized at this point\n # and mouse and keybindings don't belong on model\n # https://github.com/samuelcolvin/pydantic/pull/2231\n # https://github.com/samuelcolvin/pydantic/issues/660#issuecomment-642211017\n exclude = kwargs.pop('exclude', set())\n exclude = exclude.union(EXCLUDE_DICT)\n return super().dict(exclude=exclude, **kwargs)\n\n def __hash__(self):\n return id(self)\n\n def __str__(self):\n \"\"\"Simple string representation\"\"\"\n return f'napari.Viewer: {self.title}'\n\n @property\n def _sliced_extent_world(self) -> np.ndarray:\n \"\"\"Extent of layers in world coordinates after slicing.\n\n D is either 2 or 3 depending on if the displayed data is 2D or 3D.\n\n Returns\n -------\n sliced_extent_world : array, shape (2, D)\n \"\"\"\n if len(self.layers) == 0 and self.dims.ndim != 2:\n # If no data is present and dims model has not been reset to 0\n # than someone has passed more than two axis labels which are\n # being saved and so default values are used.\n return np.vstack(\n [np.zeros(self.dims.ndim), np.repeat(512, self.dims.ndim)]\n )\n else:\n return self.layers.extent.world[:, self.dims.displayed]\n\n def reset_view(self, event=None):\n \"\"\"Reset the camera view.\"\"\"\n\n extent = self._sliced_extent_world\n scene_size = extent[1] - extent[0]\n corner = extent[0]\n grid_size = list(self.grid.actual_shape(len(self.layers)))\n if len(scene_size) > len(grid_size):\n grid_size = [1] * (len(scene_size) - len(grid_size)) + grid_size\n size = np.multiply(scene_size, grid_size)\n center = np.add(corner, np.divide(size, 2))[-self.dims.ndisplay :]\n center = [0] * (self.dims.ndisplay - len(center)) + list(center)\n self.camera.center = center\n # zoom is definied as the number of canvas pixels per world pixel\n # The default value used below will zoom such that the whole field\n # of view will occupy 95% of the canvas on the most filled axis\n if np.max(size) == 0:\n self.camera.zoom = 0.95 * np.min(self._canvas_size)\n else:\n scale = np.array(size[-2:])\n scale[np.isclose(scale, 0)] = 1\n self.camera.zoom = 0.95 * np.min(\n np.array(self._canvas_size) / scale\n )\n self.camera.angles = (0, 0, 90)\n\n # Emit a reset view event, which is no longer used internally, but\n # which maybe useful for building on napari.\n self.events.reset_view(\n center=self.camera.center,\n zoom=self.camera.zoom,\n angles=self.camera.angles,\n )\n\n def _new_labels(self):\n \"\"\"Create new labels layer filling full world coordinates space.\"\"\"\n extent = self.layers.extent.world\n scale = self.layers.extent.step\n scene_size = extent[1] - extent[0]\n corner = extent[0] + 0.5 * self.layers.extent.step\n shape = [\n np.round(s / sc).astype('int') if s > 0 else 1\n for s, sc in zip(scene_size, scale)\n ]\n empty_labels = np.zeros(shape, dtype=int)\n self.add_labels(empty_labels, translate=np.array(corner), scale=scale)\n\n def _update_layers(self, event=None, layers=None):\n \"\"\"Updates the contained layers.\n\n Parameters\n ----------\n layers : list of napari.layers.Layer, optional\n List of layers to update. If none provided updates all.\n \"\"\"\n layers = layers or self.layers\n for layer in layers:\n layer._slice_dims(\n self.dims.point, self.dims.ndisplay, self.dims.order\n )\n\n def _on_active_layer(self, event):\n \"\"\"Update viewer state for a new active layer.\"\"\"\n active_layer = event.value\n if active_layer is None:\n self.help = ''\n self.cursor.style = 'standard'\n self.camera.interactive = True\n else:\n self.help = active_layer.help\n self.cursor.style = active_layer.cursor\n self.cursor.size = active_layer.cursor_size\n self.camera.interactive = active_layer.interactive\n\n def _on_layers_change(self, event):\n if len(self.layers) == 0:\n self.dims.ndim = 2\n self.dims.reset()\n else:\n extent = self.layers.extent\n world = extent.world\n ss = extent.step\n ndim = world.shape[1]\n self.dims.ndim = ndim\n for i in range(ndim):\n self.dims.set_range(\n i,\n (\n world[0, i] + 0.5 * ss[i],\n world[1, i] + 0.5 * ss[i],\n ss[i],\n ),\n )\n\n new_dim = self.dims.ndim\n dim_diff = new_dim - len(self.cursor.position)\n if dim_diff < 0:\n self.cursor.position = self.cursor.position[:new_dim]\n elif dim_diff > 0:\n self.cursor.position = tuple(\n list(self.cursor.position) + [0] * dim_diff\n )\n self.events.layers_change()\n\n def _update_interactive(self, event):\n \"\"\"Set the viewer interactivity with the `event.interactive` bool.\"\"\"\n self.camera.interactive = event.interactive\n\n def _update_cursor(self, event):\n \"\"\"Set the viewer cursor with the `event.cursor` string.\"\"\"\n self.cursor.style = event.cursor\n\n def _update_cursor_size(self, event):\n \"\"\"Set the viewer cursor_size with the `event.cursor_size` int.\"\"\"\n self.cursor.size = event.cursor_size\n\n def _on_cursor_position_change(self, event):\n \"\"\"Set the layer cursor position.\"\"\"\n with warnings.catch_warnings():\n # Catch the deprecation warning on layer.position\n warnings.filterwarnings(\n 'ignore',\n message=str(\n trans._('layer.position is deprecated', deferred=True)\n ),\n )\n for layer in self.layers:\n layer.position = self.cursor.position\n\n # Update status and help bar based on active layer\n active = self.layers.selection.active\n if active is not None:\n self.status = active.get_status(\n self.cursor.position,\n view_direction=self.cursor._view_direction,\n dims_displayed=list(self.dims.displayed),\n world=True,\n )\n self.help = active.help\n if self.tooltip.visible:\n self.tooltip.text = active._get_tooltip_text(\n self.cursor.position, world=True\n )\n\n def _on_grid_change(self, event):\n \"\"\"Arrange the current layers is a 2D grid.\"\"\"\n extent = self._sliced_extent_world\n n_layers = len(self.layers)\n for i, layer in enumerate(self.layers):\n i_row, i_column = self.grid.position(n_layers - 1 - i, n_layers)\n self._subplot(layer, (i_row, i_column), extent)\n\n def _subplot(self, layer, position, extent):\n \"\"\"Shift a layer to a specified position in a 2D grid.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer that is to be moved.\n position : 2-tuple of int\n New position of layer in grid.\n extent : array, shape (2, D)\n Extent of the world.\n \"\"\"\n scene_shift = extent[1] - extent[0]\n translate_2d = np.multiply(scene_shift[-2:], position)\n translate = [0] * layer.ndim\n translate[-2:] = translate_2d\n layer.translate_grid = translate\n\n @property\n def experimental(self):\n \"\"\"Experimental commands for IPython console.\n\n For example run \"viewer.experimental.cmds.loader.help\".\n \"\"\"\n from .experimental.commands import ExperimentalNamespace\n\n return ExperimentalNamespace(self.layers)\n\n def _on_add_layer(self, event):\n \"\"\"Connect new layer events.\n\n Parameters\n ----------\n event : :class:`napari.layers.Layer`\n Layer to add.\n \"\"\"\n layer = event.value\n\n # Connect individual layer events to viewer events\n # TODO: in a future PR, we should now be able to connect viewer *only*\n # to viewer.layers.events... and avoid direct viewer->layer connections\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.events.scale.connect(self._on_layers_change)\n layer.events.translate.connect(self._on_layers_change)\n layer.events.rotate.connect(self._on_layers_change)\n layer.events.shear.connect(self._on_layers_change)\n layer.events.affine.connect(self._on_layers_change)\n layer.events.name.connect(self.layers._update_name)\n\n # Update dims and grid model\n self._on_layers_change(None)\n self._on_grid_change(None)\n # Slice current layer based on dims\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n\n def _on_remove_layer(self, event):\n \"\"\"Disconnect old layer events.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n Event which will remove a layer.\n\n Returns\n -------\n layer : :class:`napari.layers.Layer` or list\n The layer that was added (same as input).\n \"\"\"\n layer = event.value\n\n # Disconnect all connections from layer\n disconnect_events(layer.events, self)\n disconnect_events(layer.events, self.layers)\n\n self._on_layers_change(None)\n self._on_grid_change(None)\n\n def add_layer(self, layer: Layer) -> Layer:\n \"\"\"Add a layer to the viewer.\n\n Parameters\n ----------\n layer : :class:`napari.layers.Layer`\n Layer to add.\n\n Returns\n -------\n layer : :class:`napari.layers.Layer` or list\n The layer that was added (same as input).\n \"\"\"\n # Adding additional functionality inside `add_layer`\n # should be avoided to keep full functionality\n # from adding a layer through the `layers.append`\n # method\n self.layers.append(layer)\n return layer\n\n def add_image(\n self,\n data=None,\n *,\n channel_axis=None,\n rgb=None,\n colormap=None,\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n iso_threshold=0.5,\n attenuation=0.05,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n rotate=None,\n shear=None,\n affine=None,\n opacity=1,\n blending=None,\n visible=True,\n multiscale=None,\n cache=True,\n experimental_slicing_plane=None,\n experimental_clipping_planes=None,\n ) -> Union[Image, List[Image]]:\n \"\"\"Add an image layer to the layer list.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N >= 2 dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n a multiscale image. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n channel_axis : int, optional\n Axis to expand image along. If provided, each channel in the data\n will be added as an individual image layer. In channel_axis mode,\n all other parameters MAY be provided as lists, and the Nth value\n will be applied to the Nth channel in the data. If a single value\n is provided, it will be broadcast to all Layers.\n rgb : bool or list\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n If a list then must be same length as the axis that is being\n expanded as channels.\n colormap : str, napari.utils.Colormap, tuple, dict, list\n Colormaps to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap. If a list then must be same length as the axis that is\n being expanded as channels, and each colormap is applied to each\n new image layer.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image. If list of lists then must be same length as the axis\n that is being expanded and then each colormap is applied to each\n image.\n gamma : list, float\n Gamma correction for determining colormap linearity. Defaults to 1.\n If a list then must be same length as the axis that is being\n expanded as channels.\n interpolation : str or list\n Interpolation mode used by vispy. Must be one of our supported\n modes. If a list then must be same length as the axis that is being\n expanded as channels.\n rendering : str or list\n Rendering mode used by vispy. Must be one of our supported\n modes. If a list then must be same length as the axis that is being\n expanded as channels.\n iso_threshold : float or list\n Threshold for isosurface. If a list then must be same length as the\n axis that is being expanded as channels.\n attenuation : float or list\n Attenuation rate for attenuated maximum intensity projection. If a\n list then must be same length as the axis that is being expanded as\n channels.\n name : str or list of str\n Name of the layer. If a list then must be same length as the axis\n that is being expanded as channels.\n metadata : dict or list of dict\n Layer metadata. If a list then must be a list of dicts with the\n same length as the axis that is being expanded as channels.\n scale : tuple of float or list\n Scale factors for the layer. If a list then must be a list of\n tuples of float with the same length as the axis that is being\n expanded as channels.\n translate : tuple of float or list\n Translation values for the layer. If a list then must be a list of\n tuples of float with the same length as the axis that is being\n expanded as channels.\n rotate : float, 3-tuple of float, n-D array or list.\n If a float convert into a 2D rotation matrix using that value as an\n angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,\n pitch, roll convention. Otherwise assume an nD rotation. Angles are\n assumed to be in degrees. They can be converted from radians with\n np.degrees if needed. If a list then must have same length as\n the axis that is being expanded as channels.\n shear : 1-D array or list.\n A vector of shear values for an upper triangular n-D shear matrix.\n If a list then must have same length as the axis that is being\n expanded as channels.\n affine : n-D array or napari.utils.transforms.Affine\n (N+1, N+1) affine transformation matrix in homogeneous coordinates.\n The first (N, N) entries correspond to a linear transform and\n the final column is a length N translation vector and a 1 or a\n napari `Affine` transform object. Applied as an extra transform on\n top of the provided scale, rotate, and shear values.\n opacity : float or list\n Opacity of the layer visual, between 0.0 and 1.0. If a list then\n must be same length as the axis that is being expanded as channels.\n blending : str or list\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}. If a list then\n must be same length as the axis that is being expanded as channels.\n visible : bool or list of bool\n Whether the layer visual is currently being displayed.\n If a list then must be same length as the axis that is\n being expanded as channels.\n multiscale : bool\n Whether the data is a multiscale image or not. Multiscale data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be multiscale. The first image in the list\n should be the largest. Please note multiscale rendering is only\n supported in 2D. In 3D, only the lowest resolution scale is\n displayed.\n cache : bool\n Whether slices of out-of-core datasets should be cached upon\n retrieval. Currently, this only applies to dask arrays.\n experimental_slicing_plane : dict or SlicingPlane\n Properties defining plane rendering in 3D. Properties are defined in\n data coordinates. Valid dictionary keys are\n {'position', 'normal', 'thickness', and 'enabled'}.\n experimental_clipping_planes : list of dicts, list of ClippingPlane, or ClippingPlaneList\n Each dict defines a clipping plane in 3D in data coordinates.\n Valid dictionary keys are {'position', 'normal', and 'enabled'}.\n Values on the negative side of the normal are discarded if the plane is enabled.\n\n Returns\n -------\n layer : :class:`napari.layers.Image` or list\n The newly-created image layer or list of image layers.\n \"\"\"\n\n if colormap is not None:\n # standardize colormap argument(s) to Colormaps, and make sure they\n # are in AVAILABLE_COLORMAPS. This will raise one of many various\n # errors if the colormap argument is invalid. See\n # ensure_colormap for details\n if isinstance(colormap, list):\n colormap = [ensure_colormap(c) for c in colormap]\n else:\n colormap = ensure_colormap(colormap)\n\n # doing this here for IDE/console autocompletion in add_image function.\n kwargs = {\n 'rgb': rgb,\n 'colormap': colormap,\n 'contrast_limits': contrast_limits,\n 'gamma': gamma,\n 'interpolation': interpolation,\n 'rendering': rendering,\n 'iso_threshold': iso_threshold,\n 'attenuation': attenuation,\n 'name': name,\n 'metadata': metadata,\n 'scale': scale,\n 'translate': translate,\n 'rotate': rotate,\n 'shear': shear,\n 'affine': affine,\n 'opacity': opacity,\n 'blending': blending,\n 'visible': visible,\n 'multiscale': multiscale,\n 'cache': cache,\n 'experimental_slicing_plane': experimental_slicing_plane,\n 'experimental_clipping_planes': experimental_clipping_planes,\n }\n\n # these arguments are *already* iterables in the single-channel case.\n iterable_kwargs = {\n 'scale',\n 'translate',\n 'rotate',\n 'shear',\n 'affine',\n 'contrast_limits',\n 'metadata',\n 'experimental_clipping_planes',\n }\n\n if channel_axis is None:\n kwargs['colormap'] = kwargs['colormap'] or 'gray'\n kwargs['blending'] = kwargs['blending'] or 'translucent'\n # Helpful message if someone tries to add mulit-channel kwargs,\n # but forget the channel_axis arg\n for k, v in kwargs.items():\n if k not in iterable_kwargs and is_sequence(v):\n raise TypeError(\n trans._(\n \"Received sequence for argument '{argument}', did you mean to specify a 'channel_axis'? \",\n deferred=True,\n argument=k,\n )\n )\n layer = Image(data, **kwargs)\n self.layers.append(layer)\n\n return layer\n else:\n layerdata_list = split_channels(data, channel_axis, **kwargs)\n\n layer_list = list()\n for image, i_kwargs, _ in layerdata_list:\n layer = Image(image, **i_kwargs)\n self.layers.append(layer)\n layer_list.append(layer)\n\n return layer_list\n\n def open_sample(\n self,\n plugin: str,\n sample: str,\n reader_plugin: Optional[str] = None,\n **kwargs,\n ) -> List[Layer]:\n \"\"\"Open `sample` from `plugin` and add it to the viewer.\n\n To see all available samples registered by plugins, use\n :func:`napari.plugins.available_samples`\n\n Parameters\n ----------\n plugin : str\n name of a plugin providing a sample\n sample : str\n name of the sample\n reader_plugin : str, optional\n reader plugin to pass to viewer.open (only used if the sample data\n is a string). by default None.\n **kwargs\n additional kwargs will be passed to the sample data loader provided\n by `plugin`. Use of **kwargs may raise an error if the kwargs do\n not match the sample data loader.\n\n Returns\n -------\n layers : list\n A list of any layers that were added to the viewer.\n\n Raises\n ------\n KeyError\n If `plugin` does not provide a sample named `sample`.\n \"\"\"\n from ..plugins import plugin_manager\n\n try:\n data = plugin_manager._sample_data[plugin][sample]['data']\n except KeyError:\n samples = plugin_manager.available_samples()\n msg = trans._(\n \"Plugin {plugin!r} does not provide sample data named {sample!r}. \",\n plugin=plugin,\n sample=sample,\n deferred=True,\n )\n if samples:\n msg = trans._(\n \"Plugin {plugin!r} does not provide sample data named {sample!r}. Available samples include: {samples}.\",\n deferred=True,\n plugin=plugin,\n sample=sample,\n samples=samples,\n )\n else:\n msg = trans._(\n \"Plugin {plugin!r} does not provide sample data named {sample!r}. No plugin samples have been registered.\",\n deferred=True,\n plugin=plugin,\n sample=sample,\n )\n\n raise KeyError(msg)\n\n with layer_source(sample=(plugin, sample)):\n if callable(data):\n added = []\n for datum in data(**kwargs):\n added.extend(self._add_layer_from_data(*datum))\n return added\n elif isinstance(data, (str, Path)):\n return self.open(data, plugin=reader_plugin)\n else:\n raise TypeError(\n trans._(\n 'Got unexpected type for sample ({plugin!r}, {sample!r}): {data_type}',\n deferred=True,\n plugin=plugin,\n sample=sample,\n data_type=type(data),\n )\n )\n\n def open(\n self,\n path: PathOrPaths,\n *,\n stack: bool = False,\n plugin: Optional[str] = None,\n layer_type: Optional[str] = None,\n **kwargs,\n ) -> List[Layer]:\n \"\"\"Open a path or list of paths with plugins, and add layers to viewer.\n\n A list of paths will be handed one-by-one to the napari_get_reader hook\n if stack is False, otherwise the full list is passed to each plugin\n hook.\n\n Parameters\n ----------\n path : str or list of str\n A filepath, directory, or URL (or a list of any) to open.\n stack : bool, optional\n If a list of strings is passed and ``stack`` is ``True``, then the\n entire list will be passed to plugins. It is then up to individual\n plugins to know how to handle a list of paths. If ``stack`` is\n ``False``, then the ``path`` list is broken up and passed to plugin\n readers one by one. by default False.\n plugin : str, optional\n Name of a plugin to use. If provided, will force ``path`` to be\n read with the specified ``plugin``. If the requested plugin cannot\n read ``path``, an exception will be raised.\n layer_type : str, optional\n If provided, will force data read from ``path`` to be passed to the\n corresponding ``add_<layer_type>`` method (along with any\n additional) ``kwargs`` provided to this function. This *may*\n result in exceptions if the data returned from the path is not\n compatible with the layer_type.\n **kwargs\n All other keyword arguments will be passed on to the respective\n ``add_layer`` method.\n\n Returns\n -------\n layers : list\n A list of any layers that were added to the viewer.\n \"\"\"\n paths = [path] if isinstance(path, (Path, str)) else path\n paths = [os.fspath(path) for path in paths] # PathObjects -> str\n if not isinstance(paths, (tuple, list)):\n raise ValueError(\n trans._(\n \"'path' argument must be a string, list, or tuple\",\n deferred=True,\n )\n )\n\n if stack:\n return self._add_layers_with_plugins(\n paths, kwargs, plugin=plugin, layer_type=layer_type\n )\n\n added: List[Layer] = [] # for layers that get added\n with progress(\n paths,\n desc='Opening Files',\n total=0\n if len(paths) == 1\n else None, # indeterminate bar for 1 file\n ) as pbr:\n for _path in pbr:\n added.extend(\n self._add_layers_with_plugins(\n _path, kwargs, plugin=plugin, layer_type=layer_type\n )\n )\n return added\n\n def _add_layers_with_plugins(\n self,\n path_or_paths: Union[str, Sequence[str]],\n kwargs: Optional[dict] = None,\n plugin: Optional[str] = None,\n layer_type: Optional[str] = None,\n ) -> List[Layer]:\n \"\"\"Load a path or a list of paths into the viewer using plugins.\n\n This function is mostly called from self.open_path, where the ``stack``\n argument determines whether a list of strings is handed to plugins one\n at a time, or en-masse.\n\n Parameters\n ----------\n path_or_paths : str or list of str\n A filepath, directory, or URL (or a list of any) to open. If a\n list, the assumption is that the list is to be treated as a stack.\n kwargs : dict, optional\n keyword arguments that will be used to overwrite any of those that\n are returned in the meta dict from plugins.\n plugin : str, optional\n Name of a plugin to use. If provided, will force ``path`` to be\n read with the specified ``plugin``. If the requested plugin cannot\n read ``path``, an exception will be raised.\n layer_type : str, optional\n If provided, will force data read from ``path`` to be passed to the\n corresponding ``add_<layer_type>`` method (along with any\n additional) ``kwargs`` provided to this function. This *may*\n result in exceptions if the data returned from the path is not\n compatible with the layer_type.\n\n Returns\n -------\n List[Layer]\n A list of any layers that were added to the viewer.\n \"\"\"\n from ..plugins.io import read_data_with_plugins\n\n layer_data, hookimpl = read_data_with_plugins(\n path_or_paths, plugin=plugin\n )\n\n # glean layer names from filename. These will be used as *fallback*\n # names, if the plugin does not return a name kwarg in their meta dict.\n filenames = []\n if isinstance(path_or_paths, str):\n filenames = itertools.repeat(path_or_paths)\n elif is_sequence(path_or_paths):\n if len(path_or_paths) == len(layer_data):\n filenames = iter(path_or_paths)\n else:\n # if a list of paths has been returned as a list of layer data\n # without a 1:1 relationship between the two lists we iterate\n # over the first name\n filenames = itertools.repeat(path_or_paths[0])\n\n # add each layer to the viewer\n added: List[Layer] = [] # for layers that get added\n plugin = hookimpl.plugin_name if hookimpl else None\n for data, filename in zip(layer_data, filenames):\n basename, _ext = os.path.splitext(os.path.basename(filename))\n _data = _unify_data_and_user_kwargs(\n data, kwargs, layer_type, fallback_name=basename\n )\n # actually add the layer\n with layer_source(path=filename, reader_plugin=plugin):\n added.extend(self._add_layer_from_data(*_data))\n return added\n\n def _add_layer_from_data(\n self,\n data,\n meta: Dict[str, Any] = None,\n layer_type: Optional[str] = None,\n ) -> List[Layer]:\n \"\"\"Add arbitrary layer data to the viewer.\n\n Primarily intended for usage by reader plugin hooks.\n\n Parameters\n ----------\n data : Any\n Data in a format that is valid for the corresponding `add_*` method\n of the specified ``layer_type``.\n meta : dict, optional\n Dict of keyword arguments that will be passed to the corresponding\n `add_*` method. MUST NOT contain any keyword arguments that are\n not valid for the corresponding method.\n layer_type : str\n Type of layer to add. MUST have a corresponding add_* method on\n on the viewer instance. If not provided, the layer is assumed to\n be \"image\", unless data.dtype is one of (np.int32, np.uint32,\n np.int64, np.uint64), in which case it is assumed to be \"labels\".\n\n Returns\n -------\n layers : list of layers\n A list of layers added to the viewer.\n\n Raises\n ------\n ValueError\n If ``layer_type`` is not one of the recognized layer types.\n TypeError\n If any keyword arguments in ``meta`` are unexpected for the\n corresponding `add_*` method for this layer_type.\n\n Examples\n --------\n A typical use case might be to upack a tuple of layer data with a\n specified layer_type.\n\n >>> viewer = napari.Viewer()\n >>> data = (\n ... np.random.random((10, 2)) * 20,\n ... {'face_color': 'blue'},\n ... 'points',\n ... )\n >>> viewer._add_layer_from_data(*data)\n\n \"\"\"\n\n layer_type = (layer_type or '').lower()\n\n # assumes that big integer type arrays are likely labels.\n if not layer_type:\n layer_type = guess_labels(data)\n\n if layer_type not in layers.NAMES:\n raise ValueError(\n trans._(\n \"Unrecognized layer_type: '{layer_type}'. Must be one of: {layer_names}.\",\n deferred=True,\n layer_type=layer_type,\n layer_names=layers.NAMES,\n )\n )\n\n try:\n add_method = getattr(self, 'add_' + layer_type)\n layer = add_method(data, **(meta or {}))\n except TypeError as exc:\n if 'unexpected keyword argument' not in str(exc):\n raise exc\n bad_key = str(exc).split('keyword argument ')[-1]\n raise TypeError(\n trans._(\n \"_add_layer_from_data received an unexpected keyword argument ({bad_key}) for layer type {layer_type}\",\n deferred=True,\n bad_key=bad_key,\n layer_type=layer_type,\n )\n ) from exc\n return layer if isinstance(layer, list) else [layer]\n\n\ndef _normalize_layer_data(data: LayerData) -> FullLayerData:\n \"\"\"Accepts any layerdata tuple, and returns a fully qualified tuple.\n\n Parameters\n ----------\n data : LayerData\n 1-, 2-, or 3-tuple with (data, meta, layer_type).\n\n Returns\n -------\n FullLayerData\n 3-tuple with (data, meta, layer_type)\n\n Raises\n ------\n ValueError\n If data has len < 1 or len > 3, or if the second item in ``data`` is\n not a ``dict``, or the third item is not a valid layer_type ``str``\n \"\"\"\n if not isinstance(data, tuple) and 0 < len(data) < 4:\n raise ValueError(\n trans._(\n \"LayerData must be a 1-, 2-, or 3-tuple\",\n deferred=True,\n )\n )\n\n _data = list(data)\n if len(_data) > 1:\n if not isinstance(_data[1], dict):\n raise ValueError(\n trans._(\n \"The second item in a LayerData tuple must be a dict\",\n deferred=True,\n )\n )\n else:\n _data.append(dict())\n if len(_data) > 2:\n if _data[2] not in layers.NAMES:\n raise ValueError(\n trans._(\n \"The third item in a LayerData tuple must be one of: {layers!r}.\",\n deferred=True,\n layers=layers.NAMES,\n )\n )\n else:\n _data.append(guess_labels(_data[0]))\n return tuple(_data) # type: ignore\n\n\ndef _unify_data_and_user_kwargs(\n data: LayerData,\n kwargs: Optional[dict] = None,\n layer_type: Optional[str] = None,\n fallback_name: str = None,\n) -> FullLayerData:\n \"\"\"Merge data returned from plugins with options specified by user.\n\n If ``data == (_data, _meta, _type)``. Then:\n\n - ``kwargs`` will be used to update ``_meta``\n - ``layer_type`` will replace ``_type`` and, if provided, ``_meta`` keys\n will be pruned to layer_type-appropriate kwargs\n - ``fallback_name`` is used if ``not _meta.get('name')``\n\n .. note:\n\n If a user specified both layer_type and additional keyword arguments\n to viewer.open(), it is their responsibility to make sure the kwargs\n match the layer_type.\n\n Parameters\n ----------\n data : LayerData\n 1-, 2-, or 3-tuple with (data, meta, layer_type) returned from plugin.\n kwargs : dict, optional\n User-supplied keyword arguments, to override those in ``meta`` supplied\n by plugins.\n layer_type : str, optional\n A user-supplied layer_type string, to override the ``layer_type``\n declared by the plugin.\n fallback_name : str, optional\n A name for the layer, to override any name in ``meta`` supplied by the\n plugin.\n\n Returns\n -------\n FullLayerData\n Fully qualified LayerData tuple with user-provided overrides.\n \"\"\"\n _data, _meta, _type = _normalize_layer_data(data)\n\n if layer_type:\n # the user has explicitly requested this be a certain layer type\n # strip any kwargs from the plugin that are no longer relevant\n _meta = prune_kwargs(_meta, layer_type)\n _type = layer_type\n\n if kwargs:\n # if user provided kwargs, use to override any meta dict values that\n # were returned by the plugin. We only prune kwargs if the user did\n # *not* specify the layer_type. This means that if a user specified\n # both layer_type and additional keyword arguments to viewer.open(),\n # it is their responsibility to make sure the kwargs match the\n # layer_type.\n _meta.update(prune_kwargs(kwargs, _type) if not layer_type else kwargs)\n\n if not _meta.get('name') and fallback_name:\n _meta['name'] = fallback_name\n return (_data, _meta, _type)\n\n\ndef prune_kwargs(kwargs: Dict[str, Any], layer_type: str) -> Dict[str, Any]:\n \"\"\"Return copy of ``kwargs`` with only keys valid for ``add_<layer_type>``\n\n Parameters\n ----------\n kwargs : dict\n A key: value mapping where some or all of the keys are parameter names\n for the corresponding ``Viewer.add_<layer_type>`` method.\n layer_type : str\n The type of layer that is going to be added with these ``kwargs``.\n\n Returns\n -------\n pruned_kwargs : dict\n A key: value mapping where all of the keys are valid parameter names\n for the corresponding ``Viewer.add_<layer_type>`` method.\n\n Raises\n ------\n ValueError\n If ``ViewerModel`` does not provide an ``add_<layer_type>`` method\n for the provided ``layer_type``.\n\n Examples\n --------\n >>> test_kwargs = {\n ... 'scale': (0.75, 1),\n ... 'blending': 'additive',\n ... 'num_colors': 10,\n ... }\n >>> prune_kwargs(test_kwargs, 'image')\n {'scale': (0.75, 1), 'blending': 'additive'}\n\n >>> # only labels has the ``num_colors`` argument\n >>> prune_kwargs(test_kwargs, 'labels')\n {'scale': (0.75, 1), 'blending': 'additive', 'num_colors': 10}\n \"\"\"\n add_method = getattr(ViewerModel, 'add_' + layer_type, None)\n if not add_method or layer_type == 'layer':\n raise ValueError(\n trans._(\n \"Invalid layer_type: {layer_type}\",\n deferred=True,\n layer_type=layer_type,\n )\n )\n\n # get valid params for the corresponding add_<layer_type> method\n valid = valid_add_kwargs()[layer_type]\n return {k: v for k, v in kwargs.items() if k in valid}\n\n\n@lru_cache(maxsize=1)\ndef valid_add_kwargs() -> Dict[str, Set[str]]:\n \"\"\"Return a dict where keys are layer types & values are valid kwargs.\"\"\"\n valid = dict()\n for meth in dir(ViewerModel):\n if not meth.startswith('add_') or meth[4:] == 'layer':\n continue\n params = inspect.signature(getattr(ViewerModel, meth)).parameters\n valid[meth[4:]] = set(params) - {'self', 'kwargs'}\n return valid\n\n\nfor _layer in (\n layers.Labels,\n layers.Points,\n layers.Shapes,\n layers.Surface,\n layers.Tracks,\n layers.Vectors,\n):\n func = create_add_method(_layer)\n setattr(ViewerModel, func.__name__, func)\n" ]
[ [ "numpy.linspace", "numpy.subtract", "numpy.concatenate", "numpy.log10", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.multiply", "numpy.min", "numpy.round", "numpy.max", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.divide", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PaperCodeReview/MoCo-TF
[ "1ea01b2d005de3e030229f79a37135468fa1631e" ]
[ "dataloader.py" ]
[ "import os\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom augment import Augment\r\n\r\n\r\nAUTO = tf.data.experimental.AUTOTUNE\r\n\r\n\r\ndef set_dataset(task, data_path):\r\n trainset = pd.read_csv(\r\n os.path.join(\r\n data_path, 'imagenet_trainset.csv'\r\n )).values.tolist()\r\n trainset = [[os.path.join(data_path, t[0]), t[1]] for t in trainset]\r\n\r\n if task == 'lincls':\r\n valset = pd.read_csv(\r\n os.path.join(\r\n data_path, 'imagenet_valset.csv'\r\n )).values.tolist()\r\n valset = [[os.path.join(data_path, t[0]), t[1]] for t in valset]\r\n return np.array(trainset, dtype='object'), np.array(valset, dtype='object')\r\n\r\n return np.array(trainset, dtype='object')\r\n\r\n\r\nclass DataLoader:\r\n def __init__(self, args, mode, datalist, batch_size, num_workers=1, shuffle=True):\r\n self.args = args\r\n self.mode = mode\r\n self.datalist = datalist\r\n self.batch_size = batch_size\r\n self.num_workers = num_workers\r\n self.shuffle = shuffle\r\n\r\n self.dataloader = self._dataloader()\r\n\r\n def __len__(self):\r\n return len(self.datalist)\r\n\r\n def fetch_dataset(self, path, y=None):\r\n x = tf.io.read_file(path)\r\n if y is not None:\r\n return tf.data.Dataset.from_tensors((x, y))\r\n return tf.data.Dataset.from_tensors(x)\r\n\r\n def augmentation(self, img, shape):\r\n augset = Augment(self.args, self.mode)\r\n if self.args.task in ['v1', 'v2']:\r\n img_list = []\r\n for _ in range(2): # query, key\r\n aug_img = tf.identity(img)\r\n if self.args.task == 'v1':\r\n aug_img = augset._augmentv1(aug_img, shape) # moco v1\r\n else:\r\n radius = np.random.choice([3, 5])\r\n aug_img = augset._augmentv2(aug_img, shape, (radius, radius)) # moco v2\r\n img_list.append(aug_img)\r\n return img_list\r\n else:\r\n return augset._augment_lincls(img, shape)\r\n\r\n def dataset_parser(self, value, label=None):\r\n shape = tf.image.extract_jpeg_shape(value)\r\n img = tf.io.decode_jpeg(value, channels=3)\r\n if label is None:\r\n # moco\r\n query, key = self.augmentation(img, shape)\r\n inputs = {'query': query, 'key': key}\r\n labels = tf.zeros([])\r\n else:\r\n # lincls\r\n inputs = self.augmentation(img, shape)\r\n labels = tf.one_hot(label, self.args.classes)\r\n return (inputs, labels)\r\n\r\n def shuffle_BN(self, value, labels):\r\n if self.num_workers > 1:\r\n pre_shuffle = [(i, value['key'][i]) for i in range(self.batch_size)]\r\n random.shuffle(pre_shuffle)\r\n shuffle_idx = []\r\n value_temp = []\r\n for vv in pre_shuffle:\r\n shuffle_idx.append(vv[0])\r\n value_temp.append(tf.expand_dims(vv[1], axis=0))\r\n value['key'] = tf.concat(value_temp, axis=0)\r\n unshuffle_idx = np.array(shuffle_idx).argsort().tolist()\r\n value.update({'unshuffle': unshuffle_idx})\r\n return (value, labels)\r\n \r\n def _dataloader(self):\r\n self.imglist = self.datalist[:,0].tolist()\r\n if self.args.task in ['v1', 'v2']:\r\n dataset = tf.data.Dataset.from_tensor_slices(self.imglist)\r\n else:\r\n self.labellist = self.datalist[:,1].tolist()\r\n dataset = tf.data.Dataset.from_tensor_slices((self.imglist, self.labellist))\r\n\r\n dataset = dataset.repeat()\r\n if self.shuffle:\r\n dataset = dataset.shuffle(len(self.datalist))\r\n\r\n dataset = dataset.interleave(self.fetch_dataset, num_parallel_calls=AUTO)\r\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=AUTO)\r\n dataset = dataset.batch(self.batch_size)\r\n dataset = dataset.prefetch(AUTO)\r\n if self.args.shuffle_bn and self.args.task in ['v1', 'v2']:\r\n # only moco\r\n dataset = dataset.map(self.shuffle_BN, num_parallel_calls=AUTO)\r\n return dataset" ]
[ [ "tensorflow.data.Dataset.from_tensors", "tensorflow.concat", "tensorflow.zeros", "tensorflow.io.decode_jpeg", "numpy.random.choice", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.identity", "tensorflow.expand_dims", "tensorflow.image.extract_jpeg_shape", "tensorflow.one_hot", "tensorflow.io.read_file", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
leeamen/k_means
[ "dfa9cad22033c108e3988a99f4d58c685eb06921" ]
[ "user_modeling.py" ]
[ "#!/usr/bin/python\n#coding:utf-8\nimport numpy as np\nimport logging\nimport mylog\nimport mykmeans as ml\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.ERROR)\n\ndef str2num(s):\n a = ['very_low', 'Low', 'Middle', 'High']\n for i in range(0, len(a)):\n if a[i] == s:\n return float(i)\nif __name__ == '__main__':\n filename = './data/data_user_modeling.txt'\n train_data = np.loadtxt(filename, delimiter = ',', converters = {5:str2num})\n logger.debug(train_data)\n logger.debug(train_data.shape)\n\n train_x = train_data[:,0:-1]\n train_y = train_data[:,-1]\n logger.debug(train_x)\n logger.debug(train_y)\n\n param = {}\n param['use_random_for_k'] = 1\n param['k'] = [i for i in range(0, 258, 1)]\n param['n_clusters'] = 4\n param['max_iter'] = 100\n kmeans = ml.Kmeans(param)\n kmeans.Fit(train_x)\n# logger.debug(kmeans)\n pred = kmeans.Predict(train_x)\n logger.info('train_y:%s', train_y)\n logger.info(' pred:%s', pred)\n# logger.info('k-means准确率:%f', 1.0*sum(pred == train_y)/len(train_y))\n# ml.PickingRightK(train_x, param)\n import myplot\n myplot.Figure()\n ml.FitMulti(train_x, param, 100)\n ml.BisectingFitMulti(train_x, param, 100) \n myplot.Legend(['k-means','bisecting'])\n myplot.Title('user modeling')\n myplot.Show() \n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
esnet/hps-rl
[ "8426652e622394a955a44c42201e2204f6bfa0f2" ]
[ "searchmethods/modularGA.py" ]
[ "\nimport numpy, random\n\nclass Individual:\n def __init__(self,genome, llimits =[], ulimits=[], type=[], LEN = 1,fitness_func = None):\n if genome is None:\n self.genome = numpy.zeros(LEN,dtype=float)\n for gene in range(LEN):\n if type[gene] == \"integer\":\n self.genome[gene] = numpy.random.randint(llimits[gene], ulimits[gene])\n else:\n self.genome[gene] = numpy.random.uniform(llimits[gene], ulimits[gene])\n else:\n self.genome = genome\n self.fitness = fitness_func(self.genome)\n\n def __str__(self):\n return \"\".join(str(int(i)) for i in self.genome)\n\n\ndef crossover(a, b, fitness):\n g, h = a.genome.copy(), b.genome.copy()\n for pt in range(len(g)):\n if numpy.random.random() < 0.5:\n g[pt], h[pt] = h[pt], g[pt]\n return (Individual(genome=g,fitness_func=fitness), Individual(genome=h,fitness_func=fitness))\n\ndef mutate(a, mut_prob,fitness):\n g = a.genome.copy()\n for pt in range(len(g)):\n if numpy.random.random() < mut_prob:\n g[pt] = not g[pt]\n return Individual(g,fitness_func=fitness)\n\n\ndef stats(pop, gen,threshold):\n best = max(pop, key=lambda x: x.fitness)\n print(\"{0} {1:.2f} {2} {3}\".format(gen, numpy.mean([i.fitness for i in pop]), best.fitness, str(best)))\n return (best.fitness >= threshold)\n\n\ndef roulette(items, n):\n total = float(sum(w.fitness for w in items))\n i = 0\n w, v = items[0].fitness, items[0]\n while n:\n x = total * (1 - numpy.random.random() ** (1.0 / n))\n total -= x\n while x > w:\n x -= w\n i += 1\n w, v = items[i].fitness, items[i]\n w -= x\n yield v\n n -= 1\n\n\ndef tournament(items, n, tsize=5):\n for i in range(n):\n candidates = random.sample(items, tsize)\n yield max(candidates, key=lambda x: x.fitness)\n\ndef step(pop,cross_prob,mut_prob,fitness):\n newpop = []\n parents = roulette(pop, len(pop) + 1) # one extra for final xover\n while len(newpop) < len(pop):\n if numpy.random.random() < cross_prob:\n newpop.extend(map(mutate, crossover(next(parents), next(parents),fitness=fitness),[mut_prob,mut_prob],[fitness,fitness]))\n else:\n newpop.append(mutate(next(parents),mut_prob=mut_prob,fitness=fitness))\n return newpop\n\n\ndef run(llimit, ulimit, type, GENERATIONS, CROSSOVER_PROB, POPSIZE, LEN, MUTATION_PROB,FITNESS,THRESHOLD):\n numpy.random.seed(100)\n pop = [Individual(None,llimit,ulimit,type,LEN,FITNESS) for i in range(POPSIZE)]\n print(pop)\n stats(pop, 0, THRESHOLD)\n for gen in range(1, GENERATIONS):\n pop = step(pop,CROSSOVER_PROB,MUTATION_PROB,FITNESS)\n if stats(pop, gen, THRESHOLD):\n print(\"Success\")\n\nllimit = [0.5,1e-6,1e-6,0]\nulimit = [1.5,0.1,0.1,3]\ntype = ['real','real','real','integer']\nLEN = 4\nFITNESS, SUCCESS_THRESHOLD = (numpy.sum, LEN)\nrun(llimit,ulimit,type,100,1,100,4,0.9,FITNESS,10)" ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.mean", "numpy.random.uniform", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Bruce-zxy/deep-study-lenet5
[ "bba6531c9234c077107f79ff852f141cfed58229" ]
[ "data_creation.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport random\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport matplotlib.pyplot as plt\nfrom math import cos, sin, atan2, sqrt, pi, radians, degrees, ceil, isnan\nfrom skimage import io, transform\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\n\nTRAIN_CSV_PATH = './pointdata4/traindata/'\nTEST_CSV_PATH = './pointdata4/testdata/'\n\ndata_path = './h5/'\ntrain_file_path = data_path + 'initial_train_data.h5'\ntest_file_path = data_path + 'initial_test_data.h5'\n\n# 按旋转角度分类的子级目录\nlabel_dirs = [[16, 19], [43,71,129, 260], [95,128,129, 274]]\n# 按道路分类的父级目录\nlabel_set = [0, 1, 2]\n\n# 获取二维点集的中心点坐标\ndef get_centroid(point_set):\n c_x, c_y = zip(*point_set)\n centroid_x = sum(c_x)/len(c_x)\n centroid_y = sum(c_y)/len(c_y)\n return centroid_x, centroid_y\n\n# 逆时针旋转坐标点\n\n\ndef n_rotate(angle, valuex, valuey, centerx, centery):\n valuex = np.array(valuex)\n valuey = np.array(valuey)\n nRotatex = (valuex-centerx)*cos(angle) - \\\n (valuey-centery)*sin(angle) + centerx\n nRotatey = (valuex-centerx)*sin(angle) + \\\n (valuey-centery)*cos(angle) + centery\n return nRotatex, nRotatey\n\n# 获取csv文件的列表\n\n\ndef get_csv_list(path):\n csv_file_list = []\n file_list = os.listdir(path)\n for file_name in file_list:\n if file_name.endswith('csv'):\n csv_file_list.append(path + \"/\" + file_name)\n return csv_file_list\n\n# 获取csv文件中的点集数据\n\n\ndef get_csv_data(path_list):\n # 创建空的定维数组\n sum_data = np.empty([0, 1024, 2], dtype=np.float32)\n\n # 遍历每个csv文件\n for path in path_list:\n # 将每个csv文件读取为Numpy的数据\n data = np.genfromtxt(path, delimiter=',', dtype=np.float32)[:, :2]\n data_len = len(data)\n empty_len = 1024 - data_len\n\n # 完整的1024个元数据=csv文件数据+在csv文件中随机指定下标数据\n count = 0\n while count < empty_len:\n data = np.append(\n data, [data[random.randint(0, data_len-1)]], axis=0)\n count += 1\n sum_data = np.append(sum_data, [data], axis=0)\n print(sum_data.shape)\n return sum_data\n\n\n# 随机打乱点集数据\ndef exchange_data_index(sum_data, label_data):\n cursor_index = 0\n max_range = len(sum_data)\n while cursor_index < max_range:\n random_index = random.randint(0, max_range-1)\n temp_sum_data = sum_data[0]\n temp_label_data = label_data[0]\n\n sum_data = np.delete(sum_data, 0, axis=0)\n label_data = np.delete(label_data, 0, axis=0)\n sum_data = np.insert(sum_data, random_index, temp_sum_data, axis=0)\n label_data = np.insert(label_data, random_index,\n temp_label_data, axis=0)\n\n cursor_index += 1\n return sum_data, label_data\n\n\ndef get_label_and_data(root_path, label_dirs):\n sum_data = np.empty([0, 1024, 2], dtype=np.float32)\n typical_data = np.empty([0], dtype=np.int32)\n\n for data_type, label_dir_set in enumerate(label_dirs):\n print(\">> 现在进入【第%d类】数据\" % (data_type+1))\n for rotate_angle in label_dir_set:\n print(\"-- 需要旋转%d度的数据集:\" % (rotate_angle))\n # 获取csv文件列表\n csv_list = get_csv_list(\n root_path + str(data_type) + '/' + str(rotate_angle))\n # 获取csv文件点集数据\n csv_data = get_csv_data(csv_list)\n # 遍历样本数据\n for i, sample_data in enumerate(csv_data):\n # 求出点集的中心坐标点\n centroid_x, centroid_y = get_centroid(sample_data)\n # 根据中心坐标点旋转点集中的点\n \n for index, coordinate in enumerate(sample_data):\n x, y = coordinate\n n_x, n_y = n_rotate(\n radians(rotate_angle), x, y, centroid_x, centroid_y)\n # 旋转后的点集坐标中心化\n sample_data[index] = [n_x-centroid_x, n_y-centroid_y]\n # 旋转后的点集回归原列表\n csv_data[i] = sample_data\n # 归集点集标签\n typical_data = np.append(typical_data, [data_type], axis=0)\n # 将每个不同数量的样本合并到主列表中(n,1024,2)=>(m,n,1024,2)\n sum_data = np.append(sum_data, csv_data, axis=0)\n\n return sum_data, typical_data\n\n\nif __name__ == \"__main__\":\n\n sum_train_data, train_typical_data = get_label_and_data(\n TRAIN_CSV_PATH, label_dirs)\n sum_test_data, test_typical_data = get_label_and_data(\n TEST_CSV_PATH, label_dirs)\n\n # 随机打乱点集数据\n rand_sum_train_data, rand_train_typical_data = exchange_data_index(\n sum_train_data, train_typical_data)\n rand_sum_test_data, rand_test_typical_data = exchange_data_index(\n sum_test_data, test_typical_data)\n\n if os.access(data_path, os.F_OK) == False:\n os.mkdir(data_path)\n\n if os.access(train_file_path, os.F_OK) == True:\n os.remove(train_file_path)\n open(train_file_path, 'w')\n with h5py.File(train_file_path, 'r+') as f:\n f.create_dataset('data', data=rand_sum_train_data)\n f.create_dataset('label', data=rand_train_typical_data)\n\n if os.access(test_file_path, os.F_OK) == True:\n os.remove(test_file_path)\n open(test_file_path, 'w')\n with h5py.File(test_file_path, 'r+') as f:\n f.create_dataset('data', data=rand_sum_test_data)\n f.create_dataset('label', data=rand_test_typical_data)\n" ]
[ [ "numpy.genfromtxt", "numpy.append", "numpy.delete", "numpy.insert", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skinnider/low-data-generative-models
[ "6e743b6d1ba3265f58fcbd33f2c60e633cf25999", "6e743b6d1ba3265f58fcbd33f2c60e633cf25999" ]
[ "python/train_model.py", "python/clean-SMILES.py" ]
[ "\"\"\"\nTrain a language model to generate SMILES.\n\"\"\"\n\nimport argparse\nimport os\nimport numpy as np\nimport pandas as pd\nimport random\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n# suppress Chem.MolFromSmiles error output\nfrom rdkit import rdBase\nrdBase.DisableLog('rdApp.error')\n\n# set working directory\ngit_dir = os.path.expanduser(\"~/git/low-data-generative-models\")\npython_dir = git_dir + \"/python\"\nos.chdir(python_dir)\n\n# import classes\nfrom models import RNN, OneHotRNN, EarlyStopping\nfrom datasets import SmilesDataset, SelfiesDataset, SmilesCollate\nfrom functions import decrease_learning_rate, print_update, track_loss, \\\n sample_smiles, write_smiles\n\n### CLI\nparser = argparse.ArgumentParser(\n description='Chemical structure language model interface')\n# input file\nparser.add_argument('--smiles_file', type=str,\n help='location of the SMILES file to train on')\nparser.add_argument('--selfies', dest='selfies', action='store_true')\nparser.set_defaults(selfies=False)\n# output files\nparser.add_argument('--output_dir', type=str,\n help='directory to save trained models to')\n# RNN parameters\nparser.add_argument('--rnn_type', type=str, choices=['RNN', 'LSTM', 'GRU'],\n default='GRU', help='type of language model to train')\nparser.add_argument('--embedding_size', type=int, default=128,\n help='size of vocabulary embedding')\nparser.add_argument('--hidden_size', type=int, default=512,\n help='size of language model hidden layers')\nparser.add_argument('--n_layers', type=int, default=3,\n help='number of layers in language model')\nparser.add_argument('--dropout', type=float, default=0,\n help='amount of dropout (0-1) to apply to model')\nparser.add_argument('--bidirectional', type=bool, default=False,\n help='for LSTMs only, train a bidirectional model')\nparser.add_argument('--nonlinearity', type=str, choices=['tanh', 'relu'],\n default='tanh', help='for RNNs only, nonlinearity to use')\nparser.add_argument('--tie_weights', dest='tie_weights',\n help='require embedding/dense linear layers use the ' +\\\n 'same weights',\n action='store_true')\nparser.set_defaults(tie_weights=False)\n# optimization parameters\nparser.add_argument('--learning_rate', type=float, default=0.001,\n help='initial learning rate')\nparser.add_argument('--learning_rate_decay', default=None, # type=float,\n help='amount (0-1) to decrease learning rate by every ' +\\\n 'fixed number of steps')\nparser.add_argument('--learning_rate_decay_steps', default=10000, type=int,\n help='# of steps between learning rate decrements')\nparser.add_argument('--gradient_clip', default=None, # type=float,\n help='amount to which to clip the gradients')\n# training schedule\nparser.add_argument('--seed', type=int, default=0,\n help='seed for random number generator')\nparser.add_argument('--batch_size', type=int, default=128,\n help='batch size')\nparser.add_argument('--max_epochs', type=int, default=1000,\n help='maximum number of epochs to train for')\nparser.add_argument('--patience', type=int, default=100,\n help='patience for early stopping')\n# sampling from trained models\nparser.add_argument('--sample_idx', type=int, default=0,\n help='index of the model being trained (zero-indexed)')\nparser.add_argument('--sample_every_epochs', type=int,\n help='if set, sample SMILES from the trained model' +\n 'every n epochs')\nparser.add_argument('--sample_every_steps', type=int,\n help='if set, sample SMILES from the trained model' +\n 'every n steps')\nparser.add_argument('--log_every_epochs', type=int,\n help='log training/validation losses every n epochs')\nparser.add_argument('--log_every_steps', type=int,\n help='log training/validation losses every n steps')\nparser.add_argument('--sample_size', type=int, default=100000,\n help='size of each sample from the trained model')\n# start with pretrained model\nparser.add_argument('--pretrain_model', type=str, default=None,\n help='load parameters from a pretrained model')\n# enforce a larger vocabulary\nparser.add_argument('--vocab_file', type=str, default=None,\n help='file containing all tokens in vocabulary')\n# for use in grid\nparser.add_argument('--stop_if_exists', dest='stop_if_exists',\n action='store_true')\nparser.set_defaults(stop_if_exists=False)\n\n# parse arguments\nargs = parser.parse_args()\n\n# manually deal with gradient clipping\ntry:\n args.gradient_clip = float(args.gradient_clip)\nexcept (ValueError, TypeError):\n args.gradient_clip = None\n\n# manually deal with learning rate decay\ntry:\n args.learning_rate_decay = float(args.learning_rate_decay)\nexcept (ValueError, TypeError):\n args.learning_rate_decay = None\n\n# log args (make searching through logging directory easier)\nfor arg in vars(args):\n print(arg, \": \", getattr(args, arg), \"(\", type(getattr(args, arg)), \")\")\n\n# optionally stop if output file already exists\nif args.selfies:\n smiles_filename = \"sample-\" + str(args.sample_idx + 1) + \"-SELFIES.smi\"\nelse:\n smiles_filename = \"sample-\" + str(args.sample_idx + 1) + \"-SMILES.smi\"\nsmiles_file = os.path.join(args.output_dir, smiles_filename)\nif os.path.isfile(smiles_file) and args.stop_if_exists:\n print(\"output file \" + smiles_file + \" exists: stopping early\")\n sys.exit()\n\n# make output directories\nif not os.path.isdir(args.output_dir):\n try:\n os.makedirs(args.output_dir)\n except FileExistsError:\n pass\n\n## seed all RNGs\ntorch.manual_seed(args.seed)\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\nif torch.cuda.is_available():\n print(\"using cuda\")\n torch.cuda.manual_seed_all(args.seed)\n\n# set up dataset\nif args.selfies:\n dataset = SelfiesDataset(selfies_file=args.smiles_file)\nelse:\n dataset = SmilesDataset(smiles_file=args.smiles_file,\n vocab_file=args.vocab_file)\n\n# set up batching\nloader = DataLoader(dataset,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=SmilesCollate(dataset.vocabulary))\n\n# set up model\nif args.embedding_size > 0:\n model = RNN(vocabulary=dataset.vocabulary,\n rnn_type=args.rnn_type,\n embedding_size=args.embedding_size,\n hidden_size=args.hidden_size,\n n_layers=args.n_layers,\n dropout=args.dropout,\n bidirectional=args.bidirectional,\n tie_weights=args.tie_weights,\n nonlinearity=args.nonlinearity)\nelse:\n # no embedding layer (one-hot encoding)\n model = OneHotRNN(vocabulary=dataset.vocabulary,\n rnn_type=args.rnn_type,\n hidden_size=args.hidden_size,\n n_layers=args.n_layers,\n dropout=args.dropout,\n bidirectional=args.bidirectional,\n nonlinearity=args.nonlinearity)\n\n# optionally, load model parameters from file\nif args.pretrain_model is not None:\n model.load_state_dict(torch.load(args.pretrain_model))\n\n# set up optimizer\noptimizer = optim.Adam(model.parameters(),\n betas=(0.9, 0.999), ## default\n eps=1e-08, ## default\n lr=args.learning_rate)\n\n# set up early stopping\nearly_stop = EarlyStopping(patience=args.patience)\n\n# set up training schedule file\nsched_filename = \"training_schedule-\" + str(args.sample_idx + 1) + \".csv\"\nsched_file = os.path.join(args.output_dir, sched_filename)\n\n# iterate over epochs\ncounter = 0\nfor epoch in range(args.max_epochs):\n # iterate over batches\n for batch_idx, batch in tqdm(enumerate(loader), total=len(loader)):\n batch, lengths = batch\n\n # increment counter\n counter += 1\n\n # calculate loss\n log_p = model.loss(batch, lengths)\n loss = log_p.mean()\n\n # zero gradients, calculate new gradients, and take a step\n optimizer.zero_grad()\n loss.backward()\n # clip gradient\n if args.gradient_clip is not None:\n nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)\n\n optimizer.step()\n\n # check learning rate decay\n if args.learning_rate_decay is not None and \\\n counter % args.learning_rate_decay_steps == 0:\n decrease_learning_rate(optimizer,\n multiplier=args.learning_rate_decay)\n\n # print update and write training schedule?\n if args.log_every_steps is not None:\n if counter % args.log_every_steps == 0:\n print_update(model, dataset, epoch, batch_idx + 1, loss.item(),\n args.batch_size, selfies=args.selfies)\n track_loss(sched_file, model, dataset, epoch,\n counter, loss.item(), args.batch_size)\n\n # save SMILES?\n if args.sample_every_steps is not None:\n if counter % args.sample_every_steps == 0:\n sample_smiles(args.output_dir, args.sample_idx, model,\n args.sample_size, epoch, counter)\n\n # calculate validation loss\n validation, lengths = dataset.get_validation(args.batch_size)\n validation_loss = model.loss(validation, lengths).mean().detach()\n # check early stopping\n model_filename = \"model-\" + str(args.sample_idx + 1) + \".pt\"\n model_file = os.path.join(args.output_dir, model_filename)\n early_stop(validation_loss.item(), model, model_file, counter)\n\n if early_stop.stop:\n break\n\n # print update and write training schedule?\n if args.log_every_epochs is not None:\n print_update(model, dataset, epoch, 'NA', loss.item(), args.batch_size)\n track_loss(sched_file, model, dataset, epoch,\n counter, loss.item(), args.batch_size)\n\n # save SMILES?\n if args.sample_every_epochs is not None:\n sample_smiles(args.output_dir, args.sample_idx, model,\n args.sample_size, epoch, counter)\n\n if early_stop.stop:\n break\n\n# append information about final training step\nif args.log_every_epochs is not None or args.log_every_steps is not None:\n sched = pd.DataFrame({'epoch': [None],\n 'step': [early_stop.step_at_best],\n 'outcome': ['training loss'],\n 'value': [early_stop.best_loss]})\n sched.to_csv(sched_file, index=False, mode='a', header=False)\n\n# load the best model\nmodel.load_state_dict(torch.load(model_file))\nmodel.eval() ## enable evaluation modes\n\n# sample a set of SMILES from the final, trained model\nsampled_smiles = []\nwhile len(sampled_smiles) < args.sample_size:\n sampled_smiles.extend(model.sample(args.batch_size, return_smiles=True))\n\n# write sampled SMILES\nwrite_smiles(sampled_smiles, smiles_file)\n", "\"\"\"\nClean and canonicalize SMILES from chemical structure databases and write them\nto a line-delimited file.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom itertools import chain\nfrom rdkit import Chem\nfrom rdkit.Chem import SDMolSupplier\nfrom tqdm import tqdm\n\n# set working directory\ngit_dir = os.path.expanduser(\"~/git/low-data-generative-models\")\npython_dir = git_dir + \"/python\"\nos.chdir(python_dir)\nsys.path.append(python_dir)\n\n# import functions\nfrom functions import clean_mols, remove_salts_solvents, read_smiles, \\\n NeutraliseCharges\n# import Vocabulary\nfrom datasets import Vocabulary\n\n# parse arguments\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\n\n# read SMILES\nbasename = os.path.basename(input_file)\nif \"chembl\" in basename.lower():\n # read ChEMBL chemical representations\n chembl = pd.read_csv(input_file, sep='\\t')\n # get canonical SMILES\n smiles = chembl['canonical_smiles'].values\nelif \"gdb\" in basename.lower():\n # read GDB chemical representations\n gdb13 = pd.read_csv(input_file, sep='\\t', header=None)\n # get canonical SMILES\n smiles = gdb13[[0]].values\nelif \".sdf\" in basename.lower():\n # read all metabolites\n suppl = SDMolSupplier(input_file)\n mols = [x for x in suppl if not x is None]\n # convert back to SMILES for the rest of the preprocessing steps\n smiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in mols]\nelif \"ymdb\" in input_file.lower():\n # read YMDB\n ymdb = pd.read_csv(input_file)\n ymdb = ymdb.dropna()\n smiles = ymdb['smiles']\nelif \"coconut\" in input_file.lower():\n # read COCONUT (MetFrag-format CSV)\n coconut = pd.read_csv(input_file)\n smiles = coconut['clean_smiles'].values\nelse:\n smiles = read_smiles(input_file)\n\n# remove duplicated SMILES\nsmiles = np.unique(smiles)\n# record original count\ninitial_count = len(smiles)\nprint(\"parsing \" + str(initial_count) + \" unique SMILES\")\n\n# convert to molecules\nmols = clean_mols(smiles, stereochem=False)\n# remove molecules that could not be parsed\nmols = [mol for mol in mols if mol]\nprint(\"parsed \" + str(len(mols)) + \" unique, valid canonical SMILES\")\n\n# remove salts/solvents\nmols = [remove_salts_solvents(mol, hac=3) for mol in tqdm(mols)]\n# remove molecules that could not be parsed\nmols = [mol for mol in mols if mol]\n# remove charges\nmols = [NeutraliseCharges(mol) for mol in tqdm(mols)]\nprint(\"parsed \" + str(len(mols)) + \\\n \" molecules with >3 heavy atoms and 1 fragment\")\n\n# remove molecules with invalid atoms\n## what unique atoms are present in any molecule?\nelements = [[atom.GetSymbol() for atom in mol.GetAtoms()] for mol in mols]\ncounts = np.unique(list(chain(*elements)), return_counts=True)\n## define valid symbols\nvalid = set(['Br', 'C', 'Cl', 'F', 'H', 'I', 'N', 'O', 'P', 'S'])\nmols = [mols[idx] for idx, atoms in enumerate(elements) if \\\n len(set(atoms) - valid) == 0]\nprint(\"parsed \" + str(len(mols)) + \\\n \" molecules with all valid atoms (C/N/O/P/S/F/Br/Cl/I)\")\n\n# convert back to SMILES\nsmiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in tqdm(mols)]\nsmiles = np.unique(smiles)\n\n# print the vocabulary\nvocabulary = Vocabulary(smiles=smiles)\nprint(\"vocabulary of {} characters:\".format(len(vocabulary)))\nprint(vocabulary.characters)\n\n# remove any molecules containing tokens found in <0.01% of molecules,\n# or five or fewer molecules\nvocab_before = len(vocabulary)\nn_smiles = len(smiles)\nfor token in vocabulary.characters:\n token_smiles = [sm for sm in smiles if token in vocabulary.tokenize(sm)]\n pct_smiles = len(token_smiles) / n_smiles\n if pct_smiles < 0.01 / 100 or len(token_smiles) <= 10:\n # remove from SMILES\n smiles = list(set(smiles).difference(token_smiles))\n\n# recreate the vocabulary and print new dataset size\nvocabulary = Vocabulary(smiles=smiles)\nvocab_after = len(vocabulary)\nprint(\"after removing tokens found in <0.01% of molecules, {} remain\".format(\n len(smiles)))\nprint(\"updated vocabulary of {} (of {}) characters:\".format(\n vocab_after, vocab_before))\nprint(vocabulary.characters)\n\n# write to line-delimited file\nwith open(output_file, 'w') as f:\n for sm in smiles:\n f.write(sm + '\\n')\n\nprint(\"wrote \" + str(len(smiles)) + \" SMILES to output file: \" + output_file)\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "pandas.DataFrame", "torch.cuda.is_available", "torch.cuda.manual_seed_all" ], [ "pandas.read_csv", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
rlaehgns5399/GoogLeNet-Inception-tf
[ "eb9597634eec9a7b511e967ad8c7b2552563755f" ]
[ "src/helper/trainer.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: trainer.py\n# Author: Qian Ge <[email protected]>\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\n\ndef display(global_step,\n step,\n scaler_sum_list,\n name_list,\n collection,\n summary_val=None,\n summary_writer=None,\n ):\n print('[step: {}]'.format(global_step), end='')\n for val, name in zip(scaler_sum_list, name_list):\n print(' {}: {:.4f}'.format(name, val * 1. / step), end='')\n print('')\n if summary_writer is not None:\n s = tf.Summary()\n for val, name in zip(scaler_sum_list, name_list):\n s.value.add(tag='{}/{}'.format(collection, name),\n simple_value=val * 1. / step)\n summary_writer.add_summary(s, global_step)\n if summary_val is not None:\n summary_writer.add_summary(summary_val, global_step)\n\nclass Trainer(object):\n def __init__(self, train_model, valid_model, train_data, init_lr=1e-3):\n\n self._t_model = train_model\n self._v_model = valid_model\n self._train_data = train_data\n self._init_lr = init_lr\n\n self._train_op = train_model.get_train_op()\n self._train_loss_op = train_model.get_loss()\n self._train_accuracy_op = train_model.get_accuracy()\n\n self._valid_loss_op = valid_model.get_loss()\n self._valid_accuracy_op = valid_model.get_accuracy()\n # self._train_summary_op = train_model.get_train_summary()\n # self._valid_summary_op = train_model.get_valid_summary()\n\n self.global_step = 0\n self.epoch_id = 0\n\n def train_epoch(self, sess, keep_prob=1., summary_writer=None):\n if self.epoch_id < 35:\n self._lr = self._init_lr\n elif self.epoch_id < 50:\n self._lr = self._init_lr / 10.\n else:\n self._lr = self._init_lr / 100.\n # self._t_model.set_is_training(True)\n display_name_list = ['loss', 'accuracy']\n cur_summary = None\n\n cur_epoch = self._train_data.epochs_completed\n\n step = 0\n loss_sum = 0\n acc_sum = 0\n self.epoch_id += 1\n while cur_epoch == self._train_data.epochs_completed:\n self.global_step += 1\n step += 1\n\n batch_data = self._train_data.next_batch_dict()\n im = batch_data['image']\n label = batch_data['label']\n _, loss, acc = sess.run(\n [self._train_op, self._train_loss_op, self._train_accuracy_op], \n feed_dict={self._t_model.image: im,\n self._t_model.label: label,\n self._t_model.lr: self._lr,\n self._t_model.keep_prob: keep_prob})\n\n loss_sum += loss\n acc_sum += acc\n\n if step % 100 == 0 or step == 1:\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'train',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n\n print('==== epoch: {}, lr:{} ===='.format(cur_epoch, self._lr))\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'train',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n\n def valid_epoch(self, sess, dataflow, summary_writer=None):\n display_name_list = ['loss', 'accuracy']\n cur_summary = None\n dataflow.reset_epoch()\n\n step = 0\n loss_sum = 0\n acc_sum = 0\n while dataflow.epochs_completed < 1:\n step += 1\n\n batch_data = dataflow.next_batch_dict()\n im = batch_data['image']\n label = batch_data['label']\n loss, acc = sess.run(\n [self._valid_loss_op, self._valid_accuracy_op], \n feed_dict={self._v_model.image: im,\n self._v_model.label: label})\n\n loss_sum += loss\n acc_sum += acc\n\n print('[Valid]: ', end='')\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'valid',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n" ]
[ [ "tensorflow.Summary" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CalebEverett/fastai-dl2
[ "64d23592eddca6ca1f3647e73c319e97c8eb392b" ]
[ "fastai/torch_imports.py" ]
[ "import os\nimport torch, torchvision, torchtext\nfrom torch import nn, cuda, backends, FloatTensor, LongTensor, optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, TensorDataset\nfrom torch.nn.init import kaiming_uniform, kaiming_normal\nfrom torchvision.transforms import Compose\nfrom torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152\nfrom torchvision.models import vgg16_bn, vgg19_bn\nfrom torchvision.models import densenet121, densenet161, densenet169, densenet201\n\nfrom .models.resnext_50_32x4d import resnext_50_32x4d\nfrom .models.resnext_101_32x4d import resnext_101_32x4d\nfrom .models.resnext_101_64x4d import resnext_101_64x4d\nfrom .models.wrn_50_2f import wrn_50_2f\nfrom .models.inceptionresnetv2 import InceptionResnetV2\nfrom .models.inceptionv4 import InceptionV4\nfrom .models.nasnet import nasnetalarge\n\nfrom unet_models import unet11\n\nimport warnings\nwarnings.filterwarnings('ignore', message='Implicit dimension choice', category=UserWarning)\n\ndef children(m): return m if isinstance(m, (list, tuple)) else list(m.children())\ndef save_model(m, p): torch.save(m.state_dict(), p)\ndef load_model(m, p): m.load_state_dict(torch.load(p, map_location=lambda storage, loc: storage))\n\ndef load_pre(pre, f, fn):\n m = f()\n path = os.path.dirname(__file__)\n if pre: load_model(m, f'{path}/weights/{fn}.pth')\n return m\n\ndef _fastai_model(name, paper_title, paper_href):\n def add_docs_wrapper(f):\n f.__doc__ = f\"\"\"{name} model from\n `\"{paper_title}\" <{paper_href}>`_\n\n Args:\n pre (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n return f\n return add_docs_wrapper\n\n@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',\n 'https://arxiv.org/pdf/1602.07261.pdf')\ndef inception_4(pre): return children(inceptionv4(pretrained=pre))[0]\n\n@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',\n 'https://arxiv.org/pdf/1602.07261.pdf')\ndef inceptionresnet_2(pre): return load_pre(pre, InceptionResnetV2, 'inceptionresnetv2-d579a627')\n\n@_fastai_model('ResNeXt 50', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext50(pre): return load_pre(pre, resnext_50_32x4d, 'resnext_50_32x4d')\n\n@_fastai_model('ResNeXt 101_32', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext101(pre): return load_pre(pre, resnext_101_32x4d, 'resnext_101_32x4d')\n\n@_fastai_model('ResNeXt 101_64', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext101_64(pre): return load_pre(pre, resnext_101_64x4d, 'resnext_101_64x4d')\n\n@_fastai_model('Wide Residual Networks', 'Wide Residual Networks',\n 'https://arxiv.org/pdf/1605.07146.pdf')\ndef wrn(pre): return load_pre(pre, wrn_50_2f, 'wrn_50_2f')\n\n@_fastai_model('Densenet-121', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn121(pre): return children(densenet121(pre))[0]\n\n@_fastai_model('Densenet-169', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn161(pre): return children(densenet161(pre))[0]\n\n@_fastai_model('Densenet-161', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn169(pre): return children(densenet169(pre))[0]\n\n@_fastai_model('Densenet-201', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn201(pre): return children(densenet201(pre))[0]\n\n@_fastai_model('Vgg-16 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',\n 'https://arxiv.org/pdf/1409.1556.pdf')\ndef vgg16(pre): return children(vgg16_bn(pre))[0]\n\n@_fastai_model('Vgg-19 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',\n 'https://arxiv.org/pdf/1409.1556.pdf')\ndef vgg19(pre): return children(vgg19_bn(pre))[0]\n\n@_fastai_model('Vgg-11 with U-Net', 'TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation',\n 'https://arxiv.org/pdf/1801.05746.pdf')\ndef ternausnet(pre): return children(unet11(pre))" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qq456cvb/CPPF
[ "79366978854ae18b14c69ac850ea64b9dc286081" ]
[ "models/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .sprin import GlobalInfoProp, SparseSO3Conv\nimport numpy as np\n\n\nclass ResLayer(torch.nn.Module):\n def __init__(self, dim_in, dim_out, bn=False) -> None:\n super().__init__()\n assert(bn is False)\n self.fc1 = torch.nn.Linear(dim_in, dim_out)\n if bn:\n self.bn1 = torch.nn.BatchNorm1d(dim_out)\n else:\n self.bn1 = lambda x: x\n self.fc2 = torch.nn.Linear(dim_out, dim_out)\n if bn:\n self.bn2 = torch.nn.BatchNorm1d(dim_out)\n else:\n self.bn2 = lambda x: x\n if dim_in != dim_out:\n self.fc0 = torch.nn.Linear(dim_in, dim_out)\n else:\n self.fc0 = None\n \n def forward(self, x):\n x_res = x if self.fc0 is None else self.fc0(x)\n x = F.relu(self.bn1(self.fc1(x)))\n x = self.bn2(self.fc2(x))\n return x + x_res\n\n \nclass PointEncoder(nn.Module):\n def __init__(self, k, spfcs, out_dim, num_layers=2, num_nbr_feats=2) -> None:\n super().__init__()\n self.k = k\n self.spconvs = nn.ModuleList()\n self.spconvs.append(SparseSO3Conv(32, num_nbr_feats, out_dim, *spfcs))\n self.aggrs = nn.ModuleList()\n self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))\n for _ in range(num_layers - 1):\n self.spconvs.append(SparseSO3Conv(32, out_dim + out_dim // 4, out_dim, *spfcs))\n self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))\n\n def forward(self, pc, pc_normal, dist):\n nbrs_idx = torch.topk(dist, self.k, largest=False, sorted=False)[1] #[..., N, K]\n pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]\n pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]\n pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)\n \n pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]\n pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)\n \n feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))\n for i in range(len(self.spconvs) - 1):\n spconv = self.spconvs[i + 1]\n aggr = self.aggrs[i + 1]\n feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))\n feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))\n return feat\n \n def forward_nbrs(self, pc, pc_normal, nbrs_idx):\n pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]\n pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]\n pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)\n \n pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]\n pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)\n \n feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))\n for i in range(len(self.spconvs) - 1):\n spconv = self.spconvs[i + 1]\n aggr = self.aggrs[i + 1]\n feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))\n feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))\n return feat\n\n\nclass PPFEncoder(nn.Module):\n def __init__(self, ppffcs, out_dim) -> None:\n super().__init__()\n self.res_layers = nn.ModuleList()\n for i in range(len(ppffcs) - 1):\n dim_in, dim_out = ppffcs[i], ppffcs[i + 1]\n self.res_layers.append(ResLayer(dim_in, dim_out, bn=False))\n self.final = nn.Linear(ppffcs[-1], out_dim)\n\n def forward(self, pc, pc_normal, feat, dist=None, idxs=None):\n if idxs is not None:\n return self.forward_with_idx(pc[0], pc_normal[0], feat[0], idxs)[None]\n xx = pc.unsqueeze(-2) - pc.unsqueeze(-3)\n xx_normed = xx / (dist[..., None] + 1e-7)\n\n outputs = []\n for idx in torch.chunk(torch.arange(pc.shape[1]), 5):\n feat_chunk = feat[..., idx, :]\n target_shape = [*feat_chunk.shape[:-2], feat_chunk.shape[-2], feat.shape[-2], feat_chunk.shape[-1]] # B x NC x N x F\n xx_normed_chunk = xx_normed[..., idx, :, :]\n ppf = torch.cat([\n torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * xx_normed_chunk, -1, keepdim=True), \n torch.sum(pc_normal.unsqueeze(-3) * xx_normed_chunk, -1, keepdim=True), \n torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * pc_normal.unsqueeze(-3), -1, keepdim=True), \n dist[..., idx, :, None],\n ], -1)\n # ppf.zero_()\n final_feat = torch.cat([feat_chunk[..., None, :].expand(*target_shape), feat[..., None, :, :].expand(*target_shape), ppf], -1)\n \n output = final_feat\n for res_layer in self.res_layers:\n output = res_layer(output)\n outputs.append(output)\n \n output = torch.cat(outputs, dim=-3)\n return self.final(output)\n\n def forward_with_idx(self, pc, pc_normal, feat, idxs):\n a_idxs = idxs[:, 0]\n b_idxs = idxs[:, 1]\n xy = pc[a_idxs] - pc[b_idxs]\n xy_norm = torch.norm(xy, dim=-1)\n xy_normed = xy / (xy_norm[..., None] + 1e-7)\n pnormal_cos = pc_normal[a_idxs] * pc_normal[b_idxs]\n ppf = torch.cat([\n torch.sum(pc_normal[a_idxs] * xy_normed, -1, keepdim=True),\n torch.sum(pc_normal[b_idxs] * xy_normed, -1, keepdim=True),\n torch.sum(pnormal_cos, -1, keepdim=True),\n xy_norm[..., None],\n ], -1)\n # ppf.zero_()\n \n final_feat = torch.cat([feat[a_idxs], feat[b_idxs], ppf], -1)\n \n output = final_feat\n for res_layer in self.res_layers:\n output = res_layer(output)\n return self.final(output)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.norm", "torch.cat", "torch.nn.ModuleList", "torch.sum", "torch.nn.Linear", "torch.arange", "torch.topk" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ram81/habitat-imitation-baselines
[ "c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505" ]
[ "habitat/tasks/nav/object_nav_task.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom typing import Any, List, Optional\n\nimport attr\nfrom cv2 import log\nimport numpy as np\nfrom gym import spaces\n\nfrom habitat.config import Config\nfrom habitat.core.dataset import SceneState\nfrom habitat.core.logging import logger\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import AgentState, Sensor, SensorTypes\nfrom habitat.core.utils import not_none_validator\nfrom habitat.tasks.nav.nav import (\n NavigationEpisode,\n NavigationGoal,\n NavigationTask\n)\n\ntry:\n from habitat.datasets.object_nav.object_nav_dataset import (\n ObjectNavDatasetV1,\n )\nexcept ImportError:\n pass\n\n\ntask_cat2mpcat40 = [\n 3, # ('chair', 2, 0)\n 5, # ('table', 4, 1)\n 6, # ('picture', 5, 2)\n 7, # ('cabinet', 6, 3)\n 8, # ('cushion', 7, 4)\n 10, # ('sofa', 9, 5),\n 11, # ('bed', 10, 6)\n 13, # ('chest_of_drawers', 12, 7),\n 14, # ('plant', 13, 8)\n 15, # ('sink', 14, 9)\n 18, # ('toilet', 17, 10),\n 19, # ('stool', 18, 11),\n 20, # ('towel', 19, 12)\n 22, # ('tv_monitor', 21, 13)\n 23, # ('shower', 22, 14)\n 25, # ('bathtub', 24, 15)\n 26, # ('counter', 25, 16),\n 27, # ('fireplace', 26, 17),\n 33, # ('gym_equipment', 32, 18),\n 34, # ('seating', 33, 19),\n 38, # ('clothes', 37, 20),\n 43, # ('foodstuff', 42, 21),\n 44, # ('stationery', 43, 22),\n 45, # ('fruit', 44, 23),\n 46, # ('plaything', 45, 24),\n 47, # ('hand_tool', 46, 25),\n 48, # ('game_equipment', 47, 26),\n 49, # ('kitchenware', 48, 27)\n]\n\nmapping_mpcat40_to_goal21 = {\n 3: 1,\n 5: 2,\n 6: 3,\n 7: 4,\n 8: 5,\n 10: 6,\n 11: 7,\n 13: 8,\n 14: 9,\n 15: 10,\n 18: 11,\n 19: 12,\n 20: 13,\n 22: 14,\n 23: 15,\n 25: 16,\n 26: 17,\n 27: 18,\n 33: 19,\n 34: 20,\n 38: 21,\n 43: 22, # ('foodstuff', 42, task_cat: 21)\n 44: 28, # ('stationery', 43, task_cat: 22)\n 45: 26, # ('fruit', 44, task_cat: 23)\n 46: 25, # ('plaything', 45, task_cat: 24)\n 47: 24, # ('hand_tool', 46, task_cat: 25)\n 48: 23, # ('game_equipment', 47, task_cat: 26)\n 49: 27, # ('kitchenware', 48, task_cat: 27)\n}\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass AgentStateSpec:\n r\"\"\"Agent data specifications that capture states of agent and sensor in replay state.\n \"\"\"\n position: Optional[List[float]] = attr.ib(default=None)\n rotation: Optional[List[float]] = attr.ib(default=None)\n sensor_data: Optional[dict] = attr.ib(default=None)\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ReplayActionSpec:\n r\"\"\"Replay specifications that capture metadata associated with action.\n \"\"\"\n action: str = attr.ib(default=None, validator=not_none_validator)\n agent_state: Optional[AgentStateSpec] = attr.ib(default=None)\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ObjectGoalNavEpisode(NavigationEpisode):\n r\"\"\"ObjectGoal Navigation Episode\n\n :param object_category: Category of the obect\n \"\"\"\n object_category: Optional[str] = None\n reference_replay: Optional[List[ReplayActionSpec]] = None\n scene_state: Optional[List[SceneState]] = None\n is_thda: Optional[bool] = False\n scene_dataset: Optional[str] = \"mp3d\"\n\n @property\n def goals_key(self) -> str:\n r\"\"\"The key to retrieve the goals\"\"\"\n return f\"{os.path.basename(self.scene_id)}_{self.object_category}\"\n\n\[email protected](auto_attribs=True)\nclass ObjectViewLocation:\n r\"\"\"ObjectViewLocation provides information about a position around an object goal\n usually that is navigable and the object is visible with specific agent\n configuration that episode's dataset was created.\n that is target for\n navigation. That can be specify object_id, position and object\n category. An important part for metrics calculation are view points that\n describe success area for the navigation.\n\n Args:\n agent_state: navigable AgentState with a position and a rotation where\n the object is visible.\n iou: an intersection of a union of the object and a rectangle in the\n center of view. This metric is used to evaluate how good is the object\n view form current position. Higher iou means better view, iou equals\n 1.0 if whole object is inside of the rectangle and no pixel inside\n the rectangle belongs to anything except the object.\n \"\"\"\n agent_state: AgentState\n iou: Optional[float]\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ObjectGoal(NavigationGoal):\n r\"\"\"Object goal provides information about an object that is target for\n navigation. That can be specify object_id, position and object\n category. An important part for metrics calculation are view points that\n describe success area for the navigation.\n\n Args:\n object_id: id that can be used to retrieve object from the semantic\n scene annotation\n object_name: name of the object\n object_category: object category name usually similar to scene semantic\n categories\n room_id: id of a room where object is located, can be used to retrieve\n room from the semantic scene annotation\n room_name: name of the room, where object is located\n view_points: navigable positions around the object with specified\n proximity of the object surface used for navigation metrics calculation.\n The object is visible from these positions.\n \"\"\"\n\n object_id: str = attr.ib(default=None, validator=not_none_validator)\n object_name: Optional[str] = None\n object_name_id: Optional[int] = None\n object_category: Optional[str] = None\n room_id: Optional[str] = None\n room_name: Optional[str] = None\n view_points: Optional[List[ObjectViewLocation]] = None\n\n\[email protected]_sensor\nclass ObjectGoalSensor(Sensor):\n r\"\"\"A sensor for Object Goal specification as observations which is used in\n ObjectGoal Navigation. The goal is expected to be specified by object_id or\n semantic category id.\n For the agent in simulator the forward direction is along negative-z.\n In polar coordinate format the angle returned is azimuth to the goal.\n Args:\n sim: a reference to the simulator for calculating task observations.\n config: a config for the ObjectGoalSensor sensor. Can contain field\n GOAL_SPEC that specifies which id use for goal specification,\n GOAL_SPEC_MAX_VAL the maximum object_id possible used for\n observation space definition.\n dataset: a Object Goal navigation dataset that contains dictionaries\n of categories id to text mapping.\n \"\"\"\n cls_uuid: str = \"objectgoal\"\n\n def __init__(\n self,\n sim,\n config: Config,\n dataset: \"ObjectNavDatasetV1\",\n *args: Any,\n **kwargs: Any,\n ):\n self._sim = sim\n self._dataset = dataset\n super().__init__(config=config)\n\n def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return self.cls_uuid\n\n def _get_sensor_type(self, *args: Any, **kwargs: Any):\n return SensorTypes.SEMANTIC\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n sensor_shape = (1,)\n max_value = self.config.GOAL_SPEC_MAX_VAL - 1\n if self.config.GOAL_SPEC == \"TASK_CATEGORY_ID\":\n max_value = max(\n self._dataset.category_to_task_category_id.values()\n )\n logger.info(\"max object cat: {}\".format(max_value))\n logger.info(\"cats: {}\".format(self._dataset.category_to_task_category_id.values()))\n\n return spaces.Box(\n low=0, high=max_value, shape=sensor_shape, dtype=np.int64\n )\n\n def get_observation(\n self,\n observations,\n *args: Any,\n episode: ObjectGoalNavEpisode,\n **kwargs: Any,\n ) -> Optional[int]:\n\n if len(episode.goals) == 0:\n logger.error(\n f\"No goal specified for episode {episode.episode_id}.\"\n )\n return None\n if not isinstance(episode.goals[0], ObjectGoal):\n logger.error(\n f\"First goal should be ObjectGoal, episode {episode.episode_id}.\"\n )\n return None\n category_name = episode.object_category\n if self.config.GOAL_SPEC == \"TASK_CATEGORY_ID\":\n return np.array(\n [self._dataset.category_to_task_category_id[category_name]],\n dtype=np.int64,\n )\n elif self.config.GOAL_SPEC == \"OBJECT_ID\":\n obj_goal = episode.goals[0]\n assert isinstance(obj_goal, ObjectGoal) # for type checking\n return np.array([obj_goal.object_name_id], dtype=np.int64)\n else:\n raise RuntimeError(\n \"Wrong GOAL_SPEC specified for ObjectGoalSensor.\"\n )\n\n\[email protected]_task(name=\"ObjectNav-v1\")\nclass ObjectNavigationTask(NavigationTask):\n r\"\"\"An Object Navigation Task class for a task specific methods.\n Used to explicitly state a type of the task in config.\n \"\"\"\n _is_episode_active: bool\n _prev_action: int\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self._is_episode_active = False\n\n def overwrite_sim_config(self, sim_config, episode):\n super().overwrite_sim_config(sim_config, episode)\n\n sim_config.defrost()\n sim_config.scene_state = episode.scene_state\n sim_config.freeze()\n \n return sim_config\n\n def _check_episode_is_active(self, action, *args: Any, **kwargs: Any) -> bool:\n return not getattr(self, \"is_stop_called\", False)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ehwa009/Eye_Motion_Dataset
[ "42a1c897dc4209c6bb2de94c915ab36995855202" ]
[ "run_preprocessing.py" ]
[ "import pickle\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport math\n\nfrom tqdm import tqdm\nfrom sklearn import decomposition\n\nCENTER_X = int(960 / 3 / 2)\nCENTER_Y = int(540 / 3 / 2)\n\n# CENTER_X = 0\n# CENTER_Y = 0\n\n\ndef load_data(path, data_size=None):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n if data_size != -1:\n dataset = data[:data_size]\n else:\n dataset = data[:]\n return dataset\n\n\ndef save_data(path, data):\n with open(path, 'wb') as f:\n pickle.dump(data, f)\n\n\n'''\nfilling empty coordination, \nrelocate landmark position, \nand filtering landmarks which have abnormal pulpil coordination \n'''\ndef run_fill_filter(eye_dataset):\n for ed in tqdm(eye_dataset):\n # preprocessing landmarks\n # print('[INFO] Current video: {}'.format(ed['vid']))\n for clip_info in ed['clip_info']:\n landmarks = clip_info['landmarks']\n filled_landmarks = []\n for landmark in landmarks:\n ci_df = pd.DataFrame(np.array(landmark))\n ci_df = ci_df.replace(0, np.nan)\n ci_df = ci_df.fillna(method='ffill') # fill NaN values in dataset\n ci_df = ci_df.rolling(3).mean() # moving average filtering\n temp_lm = []\n for landmark in ci_df.values.tolist(): \n filled = [int(lm) for lm in landmark if not(np.isnan(lm))]\n if len(filled) == 50:\n # centering\n diff_x = CENTER_X - filled[48]\n diff_y = CENTER_Y - filled[49]\n for f_i in range(0, len(filled), 2):\n filled[f_i] += diff_x\n filled[f_i+1] += diff_y\n # check right pupil is outside of eye region\n condition1 = filled[0] > filled[4] and filled[0] < filled[10]\n condition2 = filled[1] > filled[7] and filled[1] > filled[9]\n condition3 = filled[1] < filled[13] and filled[1] < filled[14]\n if condition1 and condition2 and condition3:\n temp_lm.append(filled)\n filled_landmarks.append(temp_lm)\n clip_info['landmarks'] = filled_landmarks\n \n return eye_dataset\n\n\n'''\nNormalize eye expression motion scale over whole dataset.\nTo avoid pulpil dislocation, we use same vector on right and left pulpil.\n'''\ndef run_normalization(eye_dataset):\n eb_standard_len = 100\n\n def get_dist(x1, y1, x2, y2):\n return np.sqrt((x1-x2) ** 2 + (y1- y2) ** 2)\n\n def get_theta(var_x, var_y, fix_x, fix_y):\n return math.atan2(var_y - fix_y, var_x - fix_x)\n\n def get_new_coor(theta, dist, point):\n return dist * np.array([math.cos(theta), \n math.sin(theta)]) + np.array([point[0], point[1]])\n \n def run_len_norm(var_x, var_y, fix_x, fix_y, expected_len):\n angle = get_theta(var_x, var_y, fix_x, fix_y)\n new_coor = get_new_coor(angle, expected_len, [fix_x, fix_y])\n return new_coor\n\n for ed in tqdm(eye_dataset):\n # preprocessing landmarks\n # print('[INFO] Current video: {}'.format(ed['vid']))\n for clip_info in ed['clip_info']:\n tmp_landmarks = []\n for landmark in clip_info['landmarks']:\n tmp_landmark = []\n for lm in landmark:\n # calculate different ratio with standard length\n right_len_ratio = eb_standard_len / get_dist(lm[46], lm[47], lm[48], lm[49])\n left_len_ratio = eb_standard_len / get_dist(lm[28], lm[29], lm[48], lm[49])\n len_ratio = (right_len_ratio + left_len_ratio) / 2\n fix_x, fix_y = lm[48], lm[49]\n new_coor_list = []\n for lm_i in range(0, len(lm[:48]), 2):\n new_coor = run_len_norm(lm[lm_i], lm[lm_i+1], fix_x, fix_y,\n get_dist(lm[lm_i], lm[lm_i+1], fix_x, fix_y) * len_ratio)\n new_coor_list += [int(new_coor[0]), int(new_coor[1])]\n # pupil preprocessing\n right_theta = get_theta(lm[0], lm[1], lm[6], lm[7])\n right_dist = get_dist(lm[0], lm[1], lm[6], lm[7])\n left_new_pulpil = get_new_coor(right_theta, right_dist, [lm[18], lm[19]])\n lm[2] = int(left_new_pulpil[0])\n lm[3] = int(left_new_pulpil[1])\n new_coor_list += [fix_x, fix_y]\n tmp_landmark.append(new_coor_list) \n tmp_landmarks.append(tmp_landmark)\n clip_info['landmarks'] = tmp_landmarks\n \n return eye_dataset\n\n\n'''\nRun PCA.\nWe set 7 components to run pca.\n'''\ndef run_estimator(eye_dataset, opt):\n landmark_list = []\n for ed in eye_dataset:\n for clip_info in ed['clip_info']:\n for clip_landmarks in clip_info['landmarks']:\n for landmarks in clip_landmarks:\n landmark_list.append(landmarks)\n\n landmark_array = np.array(landmark_list)\n n_samples, n_features = landmark_array.shape\n print('[INFO] n_samples:{}, n_features:{}'.format(n_samples, n_features))\n print('[INFO] Estimated running time: {:0.2f} hrs with {} fps'.format(n_samples/opt.fps/60/60, opt.fps))\n\n data = landmark_array[:, :-2]\n estimator = decomposition.PCA(opt.n_components, svd_solver='randomized', whiten=True)\n estimator.fit(data)\n var_ratio = estimator.explained_variance_ratio_\n print('[INFO] {} number of components explain {:0.2f} of original dataset.'.format(opt.n_components, np.sum(var_ratio)))\n print('[INFO] Without first and seconde axis, rest of hyperplain consists of {:0.2f} of original dataset.'.format(np.sum(var_ratio[3:])))\n \n return estimator\n\n\n'''\nBased on learned PCA eigen vectors (7 hyperplanes that can explain original dataset),\nWe transform 50 dimention to 7 dimention to represent eye expression.\nDue to first and second egien vectors represent rotating motion in our pca space,\nwe make these values to zero.\n'''\ndef run_transform(eye_dataset, estimator, opt):\n for ed in tqdm(eye_dataset):\n for clip_info in ed['clip_info']:\n landmarks = clip_info['landmarks']\n transformed_landmarks = []\n for landmark in landmarks:\n tmp_trans = []\n for lm in landmark:\n transformed_array = estimator.transform(np.array([lm[:-2]]))\n transformed_list = transformed_array.tolist()[0]\n if opt.is_rotation_killed: # we killed pca hyperplanes which have a rotation\n # transformed_list[0] = int(transformed_list[0]/3)\n # transformed_list[1] = int(transformed_list[1]/3)\n transformed_list[0] = 0\n transformed_list[1] = 0\n tmp_trans.append(transformed_list)\n transformed_landmarks.append(tmp_trans)\n clip_info['landmarks'] = transformed_landmarks\n \n return eye_dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-dataset_path', default='./dataset')\n parser.add_argument('-data_size', type=int, default=-1) # -1 means whole dataset\n parser.add_argument('-fps', type=int, default=10)\n parser.add_argument('-n_components', type=int, default=7)\n parser.add_argument('-is_rotation_killed', type=bool, default=True)\n\n opt = parser.parse_args()\n\n eye_dataset = load_data('{}/eye_motion_dataset.pickle'.format(opt.dataset_path), opt.data_size)\n print('[INFO] Dataset length: {}'.format(len(eye_dataset)))\n \n print('[INFO] Filling, filtering and centering is now processing.')\n eye_dataset = run_fill_filter(eye_dataset)\n\n print('[INFO] Normalization is now processing.')\n eye_dataset = run_normalization(eye_dataset)\n\n print('[INFO] Estimator is now running.')\n estimator = run_estimator(eye_dataset, opt)\n\n print('[INFO] Landmarks are now transforming.')\n eye_dataset = run_transform(eye_dataset, estimator, opt)\n \n # save processed dataset\n processed_dataset = {'eye_dataset': eye_dataset,\n 'estimator': estimator,\n }\n save_path = '{}/processed_eye_motion_dataset_pca_{}.pickle'.format(opt.dataset_path, estimator.n_components)\n print('[INFO] Save preprocessed dataset at {}'.format(save_path))\n save_data(save_path, processed_dataset)\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.sqrt", "numpy.isnan", "numpy.array", "sklearn.decomposition.PCA", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FischbachLab/hCom_variable_regions
[ "6f1108c461a7e31964d1d81a83c03b9f4dad4c76" ]
[ "summarize_clstr_table.py" ]
[ "#!/usr/bin/env python3\n## How many clusters have more than one organisms as it's members\nimport sys\nimport pandas as pd\nimport logging\n\n\ndef main():\n clstr_table = sys.argv[1]\n output = sys.argv[2]\n\n clstr_df = pd.read_table(clstr_table, header=0)\n clstr_df[\"organism\"] = clstr_df[\"id\"].apply(lambda x: x.split(\":\")[2].split(\"_\")[0])\n\n summ_df = clstr_df.groupby(\"clstr\").agg(\n num_organisms=(\"organism\", pd.Series.nunique), organism_list=(\"organism\", set)\n )\n\n close_strains = set()\n for row in summ_df.query(\"num_organisms > 1\").itertuples(index=False):\n close_strains.update(row.organism_list)\n\n logging.info(\n f\"There are {len(close_strains)} strains in the community for which another strain exists with an identical V3-V4 region\"\n )\n\n summ_df[\"organism_list\"] = summ_df[\"organism_list\"].apply(\n lambda x: \"; \".join(set(x))\n )\n summ_df = summ_df.sort_values(\"num_organisms\", ascending=False)\n\n summ_df.to_csv(output)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s\\t[%(levelname)s]:\\t%(message)s\",\n )\n main()\n" ]
[ [ "pandas.read_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ZZIQIN/FATE
[ "cc6783927564cbb15c067d5010f1cdf82a5de20a", "cc6783927564cbb15c067d5010f1cdf82a5de20a" ]
[ "federatedml/ftl/hetero_ftl/hetero_ftl_host.py", "federatedml/ftl/encrypted_ftl.py" ]
[ "#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\n\nimport numpy as np\n\nfrom arch.api.utils import log_utils\nfrom federatedml.evaluation import Evaluation\nfrom federatedml.ftl.data_util.common_data_util import overlapping_samples_converter, load_model_parameters, \\\n save_model_parameters, create_table, convert_instance_table_to_dict, convert_instance_table_to_array, \\\n add_random_mask_for_list_of_values, remove_random_mask_from_list_of_values\nfrom federatedml.ftl.data_util.log_util import create_shape_msg\nfrom federatedml.ftl.eggroll_computation.helper import decrypt_matrix\nfrom federatedml.ftl.encrypted_ftl import EncryptedFTLHostModel\nfrom federatedml.ftl.encryption.encryption import generate_encryption_key_pair, decrypt_scalar, decrypt_array\nfrom federatedml.ftl.faster_encrypted_ftl import FasterEncryptedFTLHostModel\nfrom federatedml.ftl.hetero_ftl.hetero_ftl_base import HeteroFTLParty\nfrom federatedml.ftl.plain_ftl import PlainFTLHostModel\nfrom federatedml.param.param import FTLModelParam\nfrom federatedml.util import consts\nfrom federatedml.util.transfer_variable import HeteroFTLTransferVariable\n\nLOGGER = log_utils.getLogger()\n\n\nclass HeteroFTLHost(HeteroFTLParty):\n\n def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroFTLHost, self).__init__()\n self.host_model = host\n self.model_param = model_param\n self.transfer_variable = transfer_variable\n self.max_iter = model_param.max_iter\n self.n_iter_ = 0\n\n def prepare_data(self, host_data):\n LOGGER.info(\"@ start host prepare data\")\n host_features_dict, _, host_sample_indexes = convert_instance_table_to_dict(host_data)\n host_sample_indexes = np.array(host_sample_indexes)\n\n self._do_remote(host_sample_indexes,\n name=self.transfer_variable.host_sample_indexes.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_sample_indexes),\n role=consts.GUEST,\n idx=-1)\n\n guest_sample_indexes = self._do_get(name=self.transfer_variable.guest_sample_indexes.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_sample_indexes),\n idx=-1)[0]\n\n host_features, overlap_indexes, _ = overlapping_samples_converter(host_features_dict, host_sample_indexes,\n guest_sample_indexes)\n return host_features, overlap_indexes\n\n def classified(self, prob_table, threshold):\n \"\"\"\n convert a probability table into a predicted class table.\n \"\"\"\n predict_table = prob_table.mapValues(lambda x: 1 if x > threshold else 0)\n return predict_table\n\n def evaluate(self, labels, pred_prob, pred_labels, evaluate_param):\n LOGGER.info(\"@ start host evaluate\")\n predict_res = None\n if evaluate_param.classi_type == consts.BINARY:\n predict_res = pred_prob\n elif evaluate_param.classi_type == consts.MULTY:\n predict_res = pred_labels\n else:\n LOGGER.warning(\"unknown classification type, return None as evaluation results\")\n\n eva = Evaluation(evaluate_param.classi_type)\n eva_report = eva.report(labels, predict_res, evaluate_param.metrics, evaluate_param.thresholds,\n evaluate_param.pos_label)\n\n LOGGER.info(\"@ evaluation report:\" + str(eva_report))\n return eva_report\n\n def predict(self, host_data, predict_param):\n LOGGER.info(\"@ start host predict\")\n features, labels, instances_indexes = convert_instance_table_to_array(host_data)\n host_x = np.squeeze(features)\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n\n host_prob = self.host_model.predict(host_x)\n self._do_remote(host_prob,\n name=self.transfer_variable.host_prob.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.host_prob),\n role=consts.GUEST, idx=-1)\n\n pred_prob = self._do_get(name=self.transfer_variable.pred_prob.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.pred_prob),\n idx=-1)[0]\n\n pred_prob = np.squeeze(pred_prob)\n LOGGER.debug(\"pred_prob: \" + str(pred_prob.shape))\n\n pred_prob_table = create_table(pred_prob, instances_indexes)\n actual_label_table = create_table(labels, instances_indexes)\n pred_label_table = self.classified(pred_prob_table, predict_param.threshold)\n if predict_param.with_proba:\n predict_result = actual_label_table.join(pred_prob_table, lambda label, prob: (label if label > 0 else 0, prob))\n predict_result = predict_result.join(pred_label_table, lambda x, y: (x[0], x[1], y))\n else:\n predict_result = actual_label_table.join(pred_label_table, lambda a_label, p_label: (a_label, None, p_label))\n return predict_result\n\n def load_model(self, model_table_name, model_namespace):\n LOGGER.info(\"@ load host model from name/ns\" + \", \" + str(model_table_name) + \", \" + str(model_namespace))\n model_parameters = load_model_parameters(model_table_name, model_namespace)\n self.host_model.restore_model(model_parameters)\n\n def save_model(self, model_table_name, model_namespace):\n LOGGER.info(\"@ save host model to name/ns\" + \", \" + str(model_table_name) + \", \" + str(model_namespace))\n _ = save_model_parameters(self.host_model.get_model_parameters(), model_table_name, model_namespace)\n\n\nclass HeteroPlainFTLHost(HeteroFTLHost):\n\n def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroPlainFTLHost, self).__init__(host, model_param, transfer_variable)\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n while self.n_iter_ < self.max_iter:\n host_comp = self.host_model.send_components()\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n\n self.host_model.receive_components(guest_comp)\n\n is_stop = self._do_get(name=self.transfer_variable.is_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n\n\"\"\"\nCentralized encryption scheme with an arbiter in the loop for decryption.\n\"\"\"\n\n\nclass HeteroEncryptFTLHost(HeteroFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: EncryptedFTLHostModel = host\n\n def _precompute(self):\n pass\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n # get public key from arbiter\n public_key = self._do_get(name=self.transfer_variable.paillier_pubkey.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),\n idx=-1)[0]\n\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n self.host_model.set_public_key(public_key)\n\n start_time = time.time()\n while self.n_iter_ < self.max_iter:\n host_comp = self.host_model.send_components()\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_components(guest_comp)\n\n self._precompute()\n\n encrypt_host_gradients = self.host_model.send_gradients()\n self._do_remote(encrypt_host_gradients, name=self.transfer_variable.encrypt_host_gradient.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.encrypt_host_gradient, self.n_iter_),\n role=consts.ARBITER,\n idx=-1)\n\n decrypt_host_gradients = self._do_get(name=self.transfer_variable.decrypt_host_gradient.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.decrypt_host_gradient, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_gradients(decrypt_host_gradients)\n\n is_stop = self._do_get(name=self.transfer_variable.is_encrypted_ftl_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_encrypted_ftl_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n end_time = time.time()\n LOGGER.info(\"@ running time: \" + str(end_time - start_time))\n\n\nclass FasterHeteroEncryptFTLHost(HeteroEncryptFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(FasterHeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: FasterEncryptedFTLHostModel = host\n\n def _precompute(self):\n LOGGER.info(\"@ start host precompute\")\n\n host_precomputed_comp = self.host_model.send_precomputed_components()\n self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_precomputed_components(guest_precomputed_comp)\n\n\n\"\"\"\nDecentralized encryption scheme without arbiter in the loop.\n\"\"\"\n\n\nclass HeteroDecentralizedEncryptFTLHost(HeteroFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: EncryptedFTLHostModel = host\n self.public_key = None\n self.private_key = None\n self.guest_public_key = None\n\n def _precompute(self):\n pass\n\n def prepare_encryption_key_pair(self):\n LOGGER.info(\"@ start host prepare encryption key pair\")\n\n self.public_key, self.private_key = generate_encryption_key_pair()\n # exchange public_key with guest\n self._do_remote(self.public_key, name=self.transfer_variable.host_public_key.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_public_key,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n self.guest_public_key = self._do_get(name=self.transfer_variable.guest_public_key.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_public_key, self.n_iter_),\n idx=-1)[0]\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n self.prepare_encryption_key_pair()\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n self.host_model.set_public_key(self.public_key)\n self.host_model.set_guest_public_key(self.guest_public_key)\n self.host_model.set_private_key(self.private_key)\n\n start_time = time.time()\n while self.n_iter_ < self.max_iter:\n\n # Stage 1: compute and encrypt components (using host public key) required by guest to\n # calculate gradients and loss.\n LOGGER.debug(\"@ Stage 1: \")\n host_comp = self.host_model.send_components()\n LOGGER.debug(\"send enc host_comp: \" + create_shape_msg(host_comp))\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 2: receive guest components in encrypted form (encrypted by guest public key),\n # and calculate host gradients in encrypted form (encrypted by guest public key),\n # and send them to guest for decryption\n LOGGER.debug(\"@ Stage 2: \")\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n LOGGER.debug(\"receive enc guest_comp: \" + create_shape_msg(guest_comp))\n self.host_model.receive_components(guest_comp)\n\n self._precompute()\n\n # calculate host gradients in encrypted form (encrypted by guest public key)\n encrypt_host_gradients = self.host_model.send_gradients()\n LOGGER.debug(\"send encrypt_guest_gradients: \" + create_shape_msg(encrypt_host_gradients))\n\n # add random mask to encrypt_host_gradients and send them to guest for decryption\n masked_enc_host_gradients, gradients_masks = add_random_mask_for_list_of_values(encrypt_host_gradients)\n\n LOGGER.debug(\"send masked_enc_host_gradients: \" + create_shape_msg(masked_enc_host_gradients))\n self._do_remote(masked_enc_host_gradients, name=self.transfer_variable.masked_enc_host_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_host_gradients, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 3: receive and then decrypt masked encrypted guest gradients and masked encrypted guest loss,\n # and send them to guest\n LOGGER.debug(\"@ Stage 3: \")\n masked_enc_guest_gradients = self._do_get(name=self.transfer_variable.masked_enc_guest_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_guest_gradients, self.n_iter_),\n idx=-1)[0]\n\n masked_enc_guest_loss = self._do_get(name=self.transfer_variable.masked_enc_loss.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_loss, self.n_iter_),\n idx=-1)[0]\n\n masked_dec_guest_gradients = self.__decrypt_gradients(masked_enc_guest_gradients)\n masked_dec_guest_loss = self.__decrypt_loss(masked_enc_guest_loss)\n\n LOGGER.debug(\"send masked_dec_guest_gradients: \" + create_shape_msg(masked_dec_guest_gradients))\n self._do_remote(masked_dec_guest_gradients, name=self.transfer_variable.masked_dec_guest_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_guest_gradients, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n LOGGER.debug(\"send masked_dec_guest_loss: \" + str(masked_dec_guest_loss))\n self._do_remote(masked_dec_guest_loss, name=self.transfer_variable.masked_dec_loss.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_loss, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 4: receive masked but decrypted host gradients from guest and remove mask,\n # and update host model parameters using these gradients.\n LOGGER.debug(\"@ Stage 4: \")\n masked_dec_host_gradients = self._do_get(name=self.transfer_variable.masked_dec_host_gradients.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.masked_dec_host_gradients, self.n_iter_),\n idx=-1)[0]\n LOGGER.debug(\"receive masked_dec_host_gradients: \" + create_shape_msg(masked_dec_host_gradients))\n\n cleared_dec_host_gradients = remove_random_mask_from_list_of_values(masked_dec_host_gradients, gradients_masks)\n\n # update host model parameters using these gradients.\n self.host_model.receive_gradients(cleared_dec_host_gradients)\n\n # Stage 5: determine whether training is terminated.\n LOGGER.debug(\"@ Stage 5: \")\n is_stop = self._do_get(name=self.transfer_variable.is_decentralized_enc_ftl_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_decentralized_enc_ftl_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n end_time = time.time()\n LOGGER.info(\"@ running time: \" + str(end_time - start_time))\n\n def __decrypt_gradients(self, encrypt_gradients):\n return decrypt_matrix(self.private_key, encrypt_gradients[0]), decrypt_array(self.private_key, encrypt_gradients[1])\n\n def __decrypt_loss(self, encrypt_loss):\n return decrypt_scalar(self.private_key, encrypt_loss)\n\n\nclass FasterHeteroDecentralizedEncryptFTLHost(HeteroDecentralizedEncryptFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(FasterHeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: FasterEncryptedFTLHostModel = host\n\n def _precompute(self):\n LOGGER.debug(\"@ start precompute\")\n\n host_precomputed_comp = self.host_model.send_precomputed_components()\n self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_precomputed_components(guest_precomputed_comp)\n\n\nclass HostFactory(object):\n\n @classmethod\n def create(cls, ftl_model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable, ftl_local_model):\n if ftl_model_param.is_encrypt:\n if ftl_model_param.enc_ftl == \"dct_enc_ftl\":\n # decentralized encrypted ftl host\n LOGGER.debug(\"@ create decentralized encrypted ftl_host\")\n host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n elif ftl_model_param.enc_ftl == \"dct_enc_ftl2\":\n # decentralized encrypted faster ftl host\n LOGGER.debug(\"@ create decentralized encrypted faster ftl_host\")\n host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = FasterHeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n elif ftl_model_param.enc_ftl == \"enc_ftl2\":\n # encrypted faster ftl host\n LOGGER.debug(\"@ create encrypted faster ftl_host\")\n host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = FasterHeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n else:\n # encrypted ftl host\n LOGGER.debug(\"@ create encrypted ftl_host\")\n host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n\n else:\n # plain ftl host\n LOGGER.debug(\"@ create plain ftl_host\")\n host_model = PlainFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroPlainFTLHost(host_model, ftl_model_param, transfer_variable)\n return host\n\n\n", "#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nfrom federatedml.ftl.eggroll_computation.helper import compute_sum_XY, \\\n compute_XY, encrypt_matrix, compute_XY_plus_Z, \\\n encrypt_matmul_2_ob, encrypt_matmul_3, compute_X_plus_Y\nfrom federatedml.ftl.encryption import encryption\nfrom federatedml.ftl.encryption.encryption import decrypt_array, decrypt_matrix, decrypt_scalar\nfrom federatedml.ftl.plain_ftl import PlainFTLGuestModel, PlainFTLHostModel\n\n\nclass EncryptedFTLGuestModel(PlainFTLGuestModel):\n\n def __init__(self, local_model, model_param, public_key=None, host_public_key=None, private_key=None,\n is_min_gen_enc=True, is_trace=False):\n super(EncryptedFTLGuestModel, self).__init__(local_model, model_param, is_trace)\n self.public_key = public_key\n self.private_key = private_key\n self.host_public_key = host_public_key\n self.is_min_gen_enc = is_min_gen_enc\n\n def set_public_key(self, public_key):\n self.public_key = public_key\n\n def set_host_public_key(self, public_key):\n self.host_public_key = public_key\n\n def set_private_key(self, private_key):\n self.private_key = private_key\n\n def send_components(self):\n if self.is_min_gen_enc:\n self.logger.debug(\"using min_gen_enc\")\n\n self._compute_components()\n\n # phi has shape (1, feature_dim)\n # phi_2 has shape (feature_dim, feature_dim)\n enc_phi = encryption.encrypt_matrix(self.public_key, self.phi)\n enc_phi_2 = encrypt_matmul_2_ob(self.phi.transpose(), enc_phi)\n\n # enc_y_overlap_2_phi_2 = 0.25 * np.expand_dims(self.y_overlap_2, axis=2) * enc_phi_2\n # enc_y_overlap_phi = -0.5 * self.y_overlap * enc_phi\n enc_y_overlap_2_phi_2 = compute_XY(0.25 * np.expand_dims(self.y_overlap_2, axis=2),\n np.tile(enc_phi_2, (self.y_overlap_2.shape[0], 1, 1)))\n enc_y_overlap_phi = compute_XY(-0.5 * self.y_overlap, np.tile(enc_phi, (self.y_overlap.shape[0], 1)))\n enc_mapping_comp_A = encrypt_matrix(self.public_key, self.mapping_comp_A)\n\n return [enc_y_overlap_2_phi_2, enc_y_overlap_phi, enc_mapping_comp_A]\n else:\n components = super(EncryptedFTLGuestModel, self).send_components()\n return self.__encrypt_components(components)\n\n def __encrypt_components(self, components):\n enc_comp_0 = encrypt_matrix(self.public_key, components[0])\n enc_comp_1 = encrypt_matrix(self.public_key, components[1])\n enc_comp_2 = encrypt_matrix(self.public_key, components[2])\n return [enc_comp_0, enc_comp_1, enc_comp_2]\n\n def receive_components(self, components):\n self.enc_uB_overlap = components[0]\n self.enc_uB_overlap_2 = components[1]\n self.enc_mapping_comp_B = components[2]\n self._update_gradients()\n self._update_loss()\n\n def _update_gradients(self):\n\n # y_overlap_2 have shape (len(overlap_indexes), 1),\n # phi has shape (1, feature_dim),\n # y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim)\n y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1)\n\n # uB_2_overlap has shape (len(overlap_indexes), feature_dim, feature_dim)\n enc_y_overlap_2_phi_uB_overlap_2 = encrypt_matmul_3(y_overlap_2_phi, self.enc_uB_overlap_2)\n enc_loss_grads_const_part1 = np.sum(0.25 * np.squeeze(enc_y_overlap_2_phi_uB_overlap_2, axis=1), axis=0)\n\n if self.is_trace:\n self.logger.debug(\"enc_y_overlap_2_phi_uB_overlap_2 shape\" + str(enc_y_overlap_2_phi_uB_overlap_2.shape))\n self.logger.debug(\"enc_loss_grads_const_part1 shape\" + str(enc_loss_grads_const_part1.shape))\n\n y_overlap = np.tile(self.y_overlap, (1, self.enc_uB_overlap.shape[-1]))\n enc_loss_grads_const_part2 = compute_sum_XY(y_overlap * 0.5, self.enc_uB_overlap)\n\n enc_const = enc_loss_grads_const_part1 - enc_loss_grads_const_part2\n enc_const_overlap = np.tile(enc_const, (len(self.overlap_indexes), 1))\n enc_const_nonoverlap = np.tile(enc_const, (len(self.non_overlap_indexes), 1))\n y_non_overlap = np.tile(self.y[self.non_overlap_indexes], (1, self.enc_uB_overlap.shape[-1]))\n\n if self.is_trace:\n self.logger.debug(\"enc_const shape:\" + str(enc_const.shape))\n self.logger.debug(\"enc_const_overlap shape\" + str(enc_const_overlap.shape))\n self.logger.debug(\"enc_const_nonoverlap shape\" + str(enc_const_nonoverlap.shape))\n self.logger.debug(\"y_non_overlap shape\" + str(y_non_overlap.shape))\n\n enc_grad_A_nonoverlap = compute_XY(self.alpha * y_non_overlap / len(self.y), enc_const_nonoverlap)\n enc_grad_A_overlap = compute_XY_plus_Z(self.alpha * y_overlap / len(self.y), enc_const_overlap,\n self.enc_mapping_comp_B)\n\n if self.is_trace:\n self.logger.debug(\"enc_grad_A_nonoverlap shape\" + str(enc_grad_A_nonoverlap.shape))\n self.logger.debug(\"enc_grad_A_overlap shape\" + str(enc_grad_A_overlap.shape))\n\n enc_loss_grad_A = [[0 for _ in range(self.enc_uB_overlap.shape[1])] for _ in range(len(self.y))]\n # TODO: need more efficient way to do following task\n for i, j in enumerate(self.non_overlap_indexes):\n enc_loss_grad_A[j] = enc_grad_A_nonoverlap[i]\n for i, j in enumerate(self.overlap_indexes):\n enc_loss_grad_A[j] = enc_grad_A_overlap[i]\n\n enc_loss_grad_A = np.array(enc_loss_grad_A)\n\n if self.is_trace:\n self.logger.debug(\"enc_loss_grad_A shape\" + str(enc_loss_grad_A.shape))\n self.logger.debug(\"enc_loss_grad_A\" + str(enc_loss_grad_A))\n\n self.loss_grads = enc_loss_grad_A\n self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads(\n self.X, enc_loss_grad_A)\n\n def send_gradients(self):\n return [self.enc_grads_W, self.enc_grads_b]\n\n def receive_gradients(self, gradients):\n self.localModel.apply_gradients(gradients)\n\n def send_loss(self):\n return self.loss\n\n def receive_loss(self, loss):\n self.loss = loss\n\n def _update_loss(self):\n uA_overlap_prime = - self.uA_overlap / self.feature_dim\n enc_loss_overlap = np.sum(compute_sum_XY(uA_overlap_prime, self.enc_uB_overlap))\n enc_loss_y = self.__compute_encrypt_loss_y(self.enc_uB_overlap, self.enc_uB_overlap_2, self.y_overlap, self.phi)\n self.loss = self.alpha * enc_loss_y + enc_loss_overlap\n\n def __compute_encrypt_loss_y(self, enc_uB_overlap, enc_uB_overlap_2, y_overlap, phi):\n enc_uB_phi = encrypt_matmul_2_ob(enc_uB_overlap, phi.transpose())\n enc_uB_2 = np.sum(enc_uB_overlap_2, axis=0)\n enc_phi_uB_2_Phi = encrypt_matmul_2_ob(encrypt_matmul_2_ob(phi, enc_uB_2), phi.transpose())\n enc_loss_y = (-0.5 * compute_sum_XY(y_overlap, enc_uB_phi)[0] + 1.0 / 8 * np.sum(enc_phi_uB_2_Phi)) + len(\n y_overlap) * np.log(2)\n return enc_loss_y\n\n def get_loss_grads(self):\n return self.loss_grads\n\n\nclass EncryptedFTLHostModel(PlainFTLHostModel):\n\n def __init__(self, local_model, model_param, public_key=None, guest_public_key=None, private_key=None,\n is_min_gen_enc=True, is_trace=False):\n super(EncryptedFTLHostModel, self).__init__(local_model, model_param, is_trace)\n self.public_key = public_key\n self.private_key = private_key\n self.guest_public_key = guest_public_key\n self.is_min_gen_enc = is_min_gen_enc\n\n def set_public_key(self, public_key):\n self.public_key = public_key\n\n def set_guest_public_key(self, public_key):\n self.guest_public_key = public_key\n\n def set_private_key(self, private_key):\n self.private_key = private_key\n\n def send_components(self):\n if self.is_min_gen_enc:\n self.logger.debug(\"using min_gen_enc\")\n\n self._compute_components()\n\n # enc_uB_overlap has shape (len(overlap_indexes), feature_dim)\n # enc_uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim)\n enc_uB_overlap = encrypt_matrix(self.public_key, self.uB_overlap)\n enc_uB_overlap_2 = encrypt_matmul_3(np.expand_dims(self.uB_overlap, axis=2),\n np.expand_dims(enc_uB_overlap, axis=1))\n\n # enc_mapping_comp_B has shape (len(overlap_indexes), feature_dim)\n scale_factor = np.tile((-1 / self.feature_dim), (enc_uB_overlap.shape[0], enc_uB_overlap.shape[1]))\n enc_mapping_comp_B = compute_XY(enc_uB_overlap, scale_factor)\n # enc_mapping_comp_B = enc_uB_overlap * (-1 / self.feature_dim)\n # enc_mapping_comp_B = encrypt_matrix(self.public_key, self.mapping_comp_B)\n\n return [enc_uB_overlap, enc_uB_overlap_2, enc_mapping_comp_B]\n else:\n components = super(EncryptedFTLHostModel, self).send_components()\n return self.__encrypt_components(components)\n\n def __encrypt_components(self, components):\n enc_comp_0 = encrypt_matrix(self.public_key, components[0])\n enc_comp_1 = encrypt_matrix(self.public_key, components[1])\n enc_comp_2 = encrypt_matrix(self.public_key, components[2])\n return [enc_comp_0, enc_comp_1, enc_comp_2]\n\n def receive_components(self, components):\n self.enc_y_overlap_2_phi_2 = components[0]\n self.enc_y_overlap_phi = components[1]\n self.enc_mapping_comp_A = components[2]\n self._update_gradients()\n\n def _update_gradients(self):\n uB_overlap_ex = np.expand_dims(self.uB_overlap, axis=1)\n enc_uB_overlap_y_overlap_2_phi_2 = encrypt_matmul_3(uB_overlap_ex, self.enc_y_overlap_2_phi_2)\n enc_l1_grad_B = compute_X_plus_Y(np.squeeze(enc_uB_overlap_y_overlap_2_phi_2, axis=1), self.enc_y_overlap_phi)\n enc_loss_grad_B = compute_X_plus_Y(self.alpha * enc_l1_grad_B, self.enc_mapping_comp_A)\n\n self.loss_grads = enc_loss_grad_B\n self.enc_grads_W, self.enc_grads_b = self.localModel.compute_encrypted_params_grads(\n self.X[self.overlap_indexes], enc_loss_grad_B)\n\n def send_gradients(self):\n return [self.enc_grads_W, self.enc_grads_b]\n\n def receive_gradients(self, gradients):\n self.localModel.apply_gradients(gradients)\n\n def get_loss_grads(self):\n return self.loss_grads\n\n\nclass LocalEncryptedFederatedTransferLearning(object):\n\n def __init__(self, guest: EncryptedFTLGuestModel, host: EncryptedFTLHostModel, private_key=None):\n super(LocalEncryptedFederatedTransferLearning, self).__init__()\n self.guest = guest\n self.host = host\n self.private_key = private_key\n\n def fit(self, X_A, X_B, y, overlap_indexes, non_overlap_indexes):\n self.guest.set_batch(X_A, y, non_overlap_indexes, overlap_indexes)\n self.host.set_batch(X_B, overlap_indexes)\n\n comp_B = self.host.send_components()\n comp_A = self.guest.send_components()\n\n self.guest.receive_components(comp_B)\n self.host.receive_components(comp_A)\n\n encrypt_gradients_A = self.guest.send_gradients()\n encrypt_gradients_B = self.host.send_gradients()\n\n self.guest.receive_gradients(self.__decrypt_gradients(encrypt_gradients_A))\n self.host.receive_gradients(self.__decrypt_gradients(encrypt_gradients_B))\n\n encrypt_loss = self.guest.send_loss()\n loss = self.__decrypt_loss(encrypt_loss)\n\n return loss\n\n def predict(self, X_B):\n msg = self.host.predict(X_B)\n return self.guest.predict(msg)\n\n def __decrypt_gradients(self, encrypt_gradients):\n return decrypt_matrix(self.private_key, encrypt_gradients[0]), decrypt_array(self.private_key,\n encrypt_gradients[1])\n\n def __decrypt_loss(self, encrypt_loss):\n return decrypt_scalar(self.private_key, encrypt_loss)\n" ]
[ [ "numpy.squeeze", "numpy.array" ], [ "numpy.log", "numpy.expand_dims", "numpy.squeeze", "numpy.tile", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
warlock8hz/h5pyViewer
[ "4955aa6fdd66255738bd86d7b8947282133c5b82", "4955aa6fdd66255738bd86d7b8947282133c5b82" ]
[ "h5pyViewer/FrmPyFAI.py", "h5pyViewer/hdfImage.py" ]
[ "#!/usr/bin/env python\n#*-----------------------------------------------------------------------*\n#| |\n#| Copyright (c) 2013 by Paul Scherrer Institute (http://www.psi.ch) |\n#| |\n#| Author Thierry Zamofing ([email protected]) |\n#*-----------------------------------------------------------------------*\n'''\nimplements an image view to show a colored image of a hdf5 dataset.\n'''\n\nif __name__ == '__main__':\n #Used to guarantee to use at least Wx2.8\n import wxversion\n wxversion.ensureMinimal('2.8')\nimport wx\nimport matplotlib as mpl\nif __name__ == '__main__':\n mpl.use('WXAgg')\n #or mpl.use('WX')\n #matplotlib.get_backend()\n\nimport os,h5py\nimport numpy as np\nimport utilities as ut\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nimport os,h5py\nfrom GLCanvasImg import *\nimport pyFAI\nfrom hdfImageGL import HdfImageGLFrame\nfrom glumpy.image.texture import Texture\nfrom scipy import ndimage as ndi\n\ndef FindCenter(arr):\n m=ndi.median_filter(arr, 5)\n sx=m.sum(1)\n sy=m.sum(0)\n shape=arr.shape\n xx=np.arange(shape[0])\n yy=np.arange(shape[1])\n x=(xx*sx).sum()/sx.sum()\n y=(yy*sy).sum()/sy.sum()\n #print x,y\n #import pylab as plt #used for the colormaps\n #plt.figure()\n #plt.subplot(211)\n #plt.plot(sx)\n #plt.subplot(212)\n #plt.plot(sy)\n #plt.show(block=False)\n return (x,y)\n\nclass MPLCanvasPyFAI1D(FigureCanvas):\n def __init__(self,parent,SetStatusCB=None):\n if SetStatusCB:\n self.SetStatusCB=SetStatusCB\n fig = mpl.figure.Figure()\n ax = fig.add_axes([0.075,0.1,0.75,0.85])\n FigureCanvas.__init__(self,parent, -1, fig)\n #self.mpl_connect('motion_notify_event', self.OnMotion)\n #self.mpl_connect('button_press_event', self.OnBtnPress)\n #self.mpl_connect('button_release_event', self.OnBtnRelease)\n #self.mpl_connect('scroll_event', self.OnBtnScroll)\n #self.mpl_connect('key_press_event',self.OnKeyPress)\n self.fig=fig\n self.ax=ax\n\n def InitChild(self,data):\n fig=self.fig\n ax=self.ax\n ctrX,ctrY=self.center=FindCenter(data)\n self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)\n #canvas=self.canvas\n self.numPtTh=int(np.average(data.shape)/2.)\n out=self.ai.xrpd(data,self.numPtTh)\n self.hl=ax.plot(*out)\n ax.set_yscale('log')\n #canvas.data=imgPolar\n #print imgPolar.shape\n #out=ai.xrpd(imgData,1000)\n #out=ai.xrpd_OpenCL(imgData,1000)\n #import pylab\n #pylab.plot(*out)\n #pylab.yscale(\"log\")\n #pylab.show()\n\nclass HdfPyFAI1DFrame(wx.Frame):\n def __init__(self, parent,lbl,hid):\n wx.Frame.__init__(self, parent, title=lbl, size=wx.Size(850, 650))\n imgDir=ut.Path.GetImage()\n icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n\n t=type(hid)\n if t==h5py.h5d.DatasetID:\n data=h5py.Dataset(hid)\n\n canvas = MPLCanvasPyFAI1D(self,self.SetStatusCB)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(sizer)\n\n toolbar=ut.AddToolbar(canvas,sizer)\n\n wxAxCtrlLst=[]\n l=len(data.shape)\n idxXY=(l-2,l-1)\n for idx,l in enumerate(data.shape):\n if idx in idxXY:\n continue\n wxAxCtrl=ut.SliderGroup(self, label='Axis:%d'%idx,range=(0,l-1))\n wxAxCtrl.idx=idx\n wxAxCtrlLst.append(wxAxCtrl)\n sizer.Add(wxAxCtrl.sizer, 0, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)\n wxAxCtrl.SetCallback(HdfPyFAI1DFrame.OnSetView,wxAxCtrl)\n\n sl=ut.GetSlice(idxXY,data.shape,wxAxCtrlLst)\n\n canvas.InitChild(data[sl])\n\n #self.Fit()\n self.Centre()\n\n self.BuildMenu()\n self.canvas=canvas\n self.sizer=sizer\n self.toolbar=toolbar\n self.data=data\n self.idxXY=idxXY\n self.wxAxCtrlLst=wxAxCtrlLst\n\n def BuildMenu(self):\n mnBar = wx.MenuBar()\n\n #-------- Edit Menu --------\n mn = wx.Menu()\n #mnItem=mn.Append(wx.ID_ANY, 'Setup Colormap', 'Setup the color mapping ');self.Bind(wx.EVT_MENU, self.OnColmapSetup, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Linear Mapping', 'Use a linear values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLin, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Log Mapping', 'Use a logarithmic values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLog, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Invert X-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n #self.mnIDxAxis=mnItem.GetId()\n #mnItem=mn.Append(wx.ID_ANY, 'Invert Y-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n mnBar.Append(mn, '&Edit')\n mn = wx.Menu()\n #mnItem=mn.Append(wx.ID_ANY, 'Help', 'How to use the image viewer');self.Bind(wx.EVT_MENU, self.OnHelp, mnItem)\n mnBar.Append(mn, '&Help')\n\n self.SetMenuBar(mnBar)\n self.CreateStatusBar()\n\n def SetIdxXY(self,x,y):\n self.idxXY=(x,y)\n\n @staticmethod\n def SetStatusCB(obj,mode,v):\n if mode==0:\n obj.SetStatusText( \"x= %d y=%d val=%g\"%v,0)\n elif mode==1:\n obj.SetStatusText( \"Colormap Value %d (drag to scale)\"%v,0)\n else:\n raise KeyError('wrong mode')\n\n @staticmethod\n def OnSetView(usrData,value,msg):\n 'called when a slice is selected with the slider controls'\n imgFrm=usrData.slider.Parent\n #imgFrm.img.set_array(imgFrm.data[usrData.value,...])\n data=imgFrm.data\n sl=ut.GetSlice(imgFrm.idxXY,data.shape,imgFrm.wxAxCtrlLst)\n\n hl=imgFrm.canvas.hl\n ai=imgFrm.canvas.ai\n numPtTh=imgFrm.canvas.numPtTh\n out=ai.xrpd(data[sl],numPtTh)\n hl[0].set_ydata(out[1])\n imgFrm.canvas.draw()\n pass\n\n\n###########################################\n\nclass HdfPyFAIFrame(HdfImageGLFrame):\n def __init__(self, parent, title, hid):\n HdfImageGLFrame.__init__(self, parent, title, hid)\n #HdfPyFAI1DFrame(self, title, hid)\n canvas=self.canvas\n raw=canvas.data\n ctrX,ctrY=FindCenter(raw)\n self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)\n\n raw\n self.numPtTh=int(np.average(raw.shape)/2.)\n self.numPtCh=360\n\n imgPolar,theta,chi=self.ai.xrpd2(raw,self.numPtTh,self.numPtCh)\n canvas.data=imgPolar\n print (imgPolar.shape)\n\n def BuildMenu(self):\n HdfImageGLFrame.BuildMenu(self)\n mnBar=self.GetMenuBar()\n mn=mnBar.GetMenu(0)\n itemLst=mn.GetMenuItems()\n it=itemLst[0]\n it.GetItemLabel()\n mnItem=mn.Append(wx.ID_ANY, 'Setup FAI', 'Setup fast azimutal integration ');self.Bind(wx.EVT_MENU, self.OnFAISetup, mnItem)\n\n @staticmethod\n def OnSetView(usrData,value,msg):\n 'called when a slice is selected with the slider controls'\n frm=usrData.slider.Parent\n ds=frm.dataSet\n canvas=frm.canvas\n glImg=canvas.glImg\n sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)\n imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)\n canvas.data[:]=imgPolar[:]\n glImg.data[:]=canvas.GetTxrData()\n glImg.update()\n canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !\n #canvas.Refresh(False)\n #canvas.Update()\n pass\n\n def OnFAISetup(self, event):\n dlg=DlgSetupPyFAI(self)\n if dlg.ShowModal()==wx.ID_OK:\n pass\n dlg.Destroy()\n\n\nclass DlgSetupPyFAI(wx.Dialog):\n def __init__(self,parent):\n wx.Dialog.__init__(self,parent,-1,'pyFAI Setup')\n ai=parent.ai\n #glColBar=parent.glColBar\n #dataRange=parent.dataRange\n txtCtrX=wx.StaticText(self,-1,'center X')\n txtCtrY=wx.StaticText(self,-1,'center Y')\n txtNumPtTh=wx.StaticText(self,-1,'number of pt in Theta')\n txtNumPtCh=wx.StaticText(self,-1,'number of pt in Chi')\n txtMethod=wx.StaticText(self,-1,'method')\n\n\n\n self.edCtrX=edCtrX=wx.TextCtrl(self,-1,'%g'%ai.get_poni1(),style=wx.TE_PROCESS_ENTER)\n self.edCtrY=edCtrY=wx.TextCtrl(self,-1,'%g'%ai.get_poni2(),style=wx.TE_PROCESS_ENTER)\n self.edNumPtTh=edNumPtTh=wx.TextCtrl(self,-1,'%g'%parent.numPtTh,style=wx.TE_PROCESS_ENTER)\n self.edNumPtCh=edNumPtCh=wx.TextCtrl(self,-1,'%g'%parent.numPtCh,style=wx.TE_PROCESS_ENTER)\n self.cbMethod=cbMethod=wx.ComboBox(self, -1, choices=('default','numny'), style=wx.CB_READONLY)\n #cbtxrFunc.SetSelection(parent.txrTrfFunc)\n\n sizer=wx.BoxSizer(wx.VERTICAL)\n fgs=wx.FlexGridSizer(5,2,5,5)\n fgs.Add(txtCtrX,0,wx.ALIGN_RIGHT)\n fgs.Add(edCtrX,0,wx.EXPAND)\n fgs.Add(txtCtrY,0,wx.ALIGN_RIGHT)\n fgs.Add(edCtrY,0,wx.EXPAND)\n fgs.Add(txtNumPtTh,0,wx.ALIGN_RIGHT)\n fgs.Add(edNumPtTh,0,wx.EXPAND)\n fgs.Add(txtNumPtCh,0,wx.ALIGN_RIGHT)\n fgs.Add(edNumPtCh,0,wx.EXPAND)\n fgs.Add(txtMethod,0,wx.ALIGN_RIGHT)\n fgs.Add(cbMethod,0,wx.EXPAND)\n sizer.Add(fgs,0,wx.EXPAND|wx.ALL,5)\n\n #edVMin.SetFocus()\n\n btns = self.CreateButtonSizer(wx.OK|wx.CANCEL)\n btnApply=wx.Button(self, -1, 'Apply')\n btns.Add(btnApply, 0, wx.ALL, 5)\n sizer.Add(btns,0,wx.EXPAND|wx.ALL,5)\n self.Bind(wx.EVT_BUTTON, self.OnModify, id=wx.ID_OK)\n self.Bind(wx.EVT_BUTTON, self.OnModify, btnApply)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edCtrX)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edCtrY)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edNumSector)\n self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbMethod)\n self.SetSizer(sizer)\n sizer.Fit(self)\n\n def OnModify(self, event):\n print ('OnModify')\n frm=self.GetParent()\n ds=frm.dataSet\n canvas=frm.canvas\n glImg=canvas.glImg\n ai=frm.ai\n ai.set_poni1(float(self.edCtrX.Value))\n ai.set_poni2(float(self.edCtrY.Value))\n frm.numPtTh=int(self.edNumPtTh.Value)\n frm.numPtCh=int(self.edNumPtCh.Value)\n sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)\n imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)\n if canvas.data.shape==imgPolar.shape:\n canvas.data[:]=imgPolar[:]\n glImg.data[:]=canvas.GetTxrData()\n else:\n canvas.data=imgPolar;\n glImg._data=canvas.GetTxrData()\n glImg._texture=Texture(glImg._data)\n #self.glImg=glImg=glumpy.image.Image(txrData, colormap=colMap,vmin=txrRng[0], vmax=txrRng[1])\n print (canvas.data.shape,glImg.data.shape)\n glImg.update()\n canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !\n frm.Refresh(False)\n if event.GetId()==wx.ID_OK:\n event.Skip()#do not consume (use event to close the window and sent return code)\n\n\nif __name__ == '__main__':\n import os,sys,argparse #since python 2.7\n def GetParser(required=True):\n fnHDF='/scratch/detectorData/e14472_00033.hdf5'\n #lbl='mcs'\n lbl='pilatus_1'\n #lbl='spec'\n elem='/entry/data/'+lbl\n exampleCmd='--hdfFile='+fnHDF+' --elem='+elem\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=__doc__,\n epilog='Example:\\n'+os.path.basename(sys.argv[0])+' '+exampleCmd+'\\n ')\n parser.add_argument('--hdfFile', required=required, default=fnHDF, help='the hdf5 to show')\n parser.add_argument('--elem', required=required, default=elem, help='the path to the element in the hdf5 file')\n return parser\n args = parser.parse_args()\n return args\n\n class App(wx.App):\n def OnInit(self):\n parser=GetParser()\n #parser=GetParser(False) # debug with exampleCmd\n args = parser.parse_args()\n try:\n self.fid=fid=h5py.h5f.open(args.hdfFile)\n except IOError as e:\n sys.stderr.write('Unable to open File: '+args.hdfFile+'\\n')\n parser.print_usage(sys.stderr)\n return True\n try:\n hid = h5py.h5o.open(fid,args.elem)\n except KeyError as e:\n sys.stderr.write('Unable to open Object: '+args.elem+'\\n')\n parser.print_usage(sys.stderr)\n return True\n frame = HdfPyFAIFrame(None,args.elem,hid)\n #frame = HdfPyFAI1DFrame(None,args.elem,hid)\n frame.Show()\n self.SetTopWindow(frame)\n return True\n\n def OnExit(self):\n self.fid.close()\n\n ut.StopWatch.Start()\n app = App()\n app.MainLoop()\n", "#!/usr/bin/env python\n#*-----------------------------------------------------------------------*\n#| |\n#| Copyright (c) 2013 by Paul Scherrer Institute (http://www.psi.ch) |\n#| |\n#| Author Thierry Zamofing ([email protected]) |\n#*-----------------------------------------------------------------------*\n'''\nimplements an image view to show a colored image of a hdf5 dataset.\n'''\n\nif __name__ == '__main__':\n #Used to guarantee to use at least Wx2.8\n import wxversion\n wxversion.ensureMinimal('2.8')\nimport wx\nimport matplotlib as mpl\nif __name__ == '__main__':\n mpl.use('WXAgg')\n #or mpl.use('WX')\n #matplotlib.get_backend()\n\nimport os,h5py\nimport numpy as np\nimport utilities as ut\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nimport pylab as plt #used for the colormaps\ntry:\n from libDetXR.procMoment import ProcMoment\nexcept ImportError as e:\n print ('ImportError: '+e.message)\n\n\n#from scipy import ndimage as ndi\n\n\n#or from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas\n#The source of the DraggableColorbar is from:\n#http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html\n\n#class ShiftedLogNorm(mpl.colors.Normalize):\nclass ShiftedLogNorm(mpl.colors.LogNorm):\n #copied and modified from LogNorm\n def __call__(self, value, clip=None):\n #print value.shape,self.vmin,self.vmax,self.clip,clip\n if clip is None:\n clip = self.clip\n ofs0=1-self.vmin\n ofs1=1./(np.log(self.vmax+1-self.vmin))\n result=np.log(value+ofs0)*ofs1\n result = np.ma.masked_less_equal(result, 0, copy=False)\n return result\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin, vmax = self.vmin, self.vmax\n ofs0=1-vmin\n if mpl.cbook.iterable(value):\n val = np.ma.asarray(value)\n return vmin * np.ma.power((vmax/vmin), val)-ofs0\n else:\n return vmin * pow((vmax/vmin), value)-ofs0\n def autoscale_None(self, A):\n if self.vmin is None:\n self.vmin = np.ma.min(A)\n if self.vmax is None:\n self.vmax = np.ma.max(A)\n pass\n def autoscale(self, A):\n pass\n\nclass MPLCanvasImg(FigureCanvas):\n def __init__(self,parent,SetStatusCB=None):\n if SetStatusCB:\n self.SetStatusCB=SetStatusCB\n fig = mpl.figure.Figure()\n ax = fig.add_axes([0.075,0.1,0.75,0.85])\n FigureCanvas.__init__(self,parent, -1, fig)\n self.mpl_connect('motion_notify_event', self.OnMotion)\n self.mpl_connect('button_press_event', self.OnBtnPress)\n self.mpl_connect('button_release_event', self.OnBtnRelease)\n self.mpl_connect('scroll_event', self.OnBtnScroll)\n self.mpl_connect('key_press_event',self.OnKeyPress)\n self.fig=fig\n self.ax=ax\n\n def InitChild(self,data):\n if data.dtype==np.complex128:\n self.dataRaw=data\n #data=np.angle(data)\n data=np.absolute(data)\n\n fig=self.fig\n ax=self.ax\n\n msk=~np.isnan(data);msk=data[msk]\n avg=np.average(msk); std=np.std(msk)\n vmin=np.min(msk);vmax=np.max(msk)\n vmin=max(vmin,avg-3*std);vmax=min(vmax,avg+3*std)\n if vmin==0:vmin=1\n if vmax<=vmin:\n vmax=vmin+1\n\n #norm=ShiftedLogNorm()\n norm=mpl.colors.Normalize()\n #img = ax.imshow(data,interpolation='nearest',cmap=mpl.cm.jet, norm=ShiftedLogNorm(vmin=vmin, vmax=vmax))\n\n img = ax.imshow(data,interpolation='nearest',cmap=mpl.cm.jet, vmin=vmin, vmax=vmax)\n colBar=fig.colorbar(img,orientation='vertical',norm=norm)\n #colBar.norm=ShiftedLogNorm(vmin=vmin, vmax=vmax)\n colBar.norm=mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n img.set_norm(colBar.norm)\n img.cmap._init();bg=img.cmap._lut[0].copy();bg[:-1]/=4\n ax.set_axis_bgcolor(bg)\n\n\n self.colBar=colBar\n self.colCycle = sorted([i for i in dir(plt.cm) if hasattr(getattr(plt.cm,i),'N')])\n self.colIndex = self.colCycle.index(colBar.get_cmap().name)\n self.img=img\n\n def OnMotion(self,event):\n #print event,event.x,event.y,event.inaxes,event.xdata,event.ydata\n if event.inaxes==self.ax:\n x=int(round(event.xdata))\n y=int(round(event.ydata))\n try:\n v=self.img.get_array()[y,x]\n except IndexError as e:\n pass\n else:\n #print x,y,v\n self.SetStatusCB(self.Parent,0,(x,y,v))\n elif event.inaxes==self.colBar.ax:\n colBar=self.colBar\n pt=colBar.ax.bbox.get_points()[:,1]\n nrm=colBar.norm\n vmin,vmax,p0,p1,pS = (nrm.vmin,nrm.vmax,pt[0],pt[1],event.y)\n if isinstance(colBar.norm,mpl.colors.LogNorm):#type(colBar.norm)==mpl.colors.LogNorm does not work...\n vS=0\n else:#scale around point\n vS=vmin+(vmax-vmin)/(p1-p0)*(pS-p0)\n self.SetStatusCB(self.Parent,1,vS)\n try:\n vmin,vmax,p0,p1,pS=self.colBarPressed\n except AttributeError:\n return\n #if event.inaxes != self.cbar.ax: return\n colBar=self.colBar\n #print vmin,vmax,p0,p1,pS,type(colBar.norm)\n #print 'x0=%f, xpress=%f, event.xdata=%f, dx=%f, x0+dx=%f'%(x0, xpress, event.xdata, dx, x0+dx)\n\n if isinstance(colBar.norm,mpl.colors.LogNorm):#type(colBar.norm)==mpl.colors.LogNorm does not work...\n if event.button==1:\n #colBar.norm.vmin=.1\n colBar.norm.vmax=vmax*np.exp((pS-event.y)/100)\n #scale= np.exp((event.y-pS)/100)\n elif event.button==1:#move top,bottom,both\n pD = event.y - pS\n vD=(vmax-vmin)/(p1-p0)*(pS-event.y)\n colBar.norm.vmin = vmin+vD\n colBar.norm.vmax = vmax+vD\n elif event.button==3:#scale around point\n scale= np.exp((pS-event.y)/100)\n vS=vmin+(vmax-vmin)/(p1-p0)*(pS-p0)\n #print scale,vS\n colBar.norm.vmin = vS-scale*(vS-vmin)\n colBar.norm.vmax = vS-scale*(vS-vmax)\n self.img.set_norm(colBar.norm)#force image to redraw\n colBar.patch.figure.canvas.draw()\n\n def OnBtnPress(self, event):\n \"\"\"on button press we will see if the mouse is over us and store some data\"\"\"\n #print dir(event.guiEvent)\n if event.inaxes == self.colBar.ax:\n #if event.guiEvent.LeftDClick()==True:\n # print dlg\n pt=self.colBar.ax.bbox.get_points()[:,1]\n nrm=self.colBar.norm\n self.colBarPressed = (nrm.vmin,nrm.vmax,pt[0],pt[1],event.y)\n #self.colBarPressed = event.x, event.y\n #print self.colBarPressed\n #self.OnMouse(event)\n pass\n\n def OnBtnRelease(self, event):\n \"\"\"on release we reset the press data\"\"\"\n #self.OnMouse(event)\n try: del self.colBarPressed\n except AttributeError: pass\n\n def OnBtnScroll(self, event):\n #self.OnMouse(event)\n colBar=self.colBar\n if event.inaxes==colBar.ax:\n pt=colBar.ax.bbox.get_points()[:,1]\n nrm=colBar.norm\n vmin,vmax,p0,p1,pS = (nrm.vmin,nrm.vmax,pt[0],pt[1],event.y)\n if isinstance(colBar.norm,mpl.colors.LogNorm):#type(colBar.norm)==mpl.colors.LogNorm does not work...\n scale= np.exp((-event.step)/10)\n colBar.norm.vmax=vmax*scale\n else:#scale around point\n scale= np.exp((-event.step)/10)\n vS=vmin+(vmax-vmin)/(p1-p0)*(pS-p0)\n #print scale,vS\n colBar.norm.vmin = vS-scale*(vS-vmin)\n colBar.norm.vmax = vS-scale*(vS-vmax)\n self.img.set_norm(colBar.norm)#force image to redraw\n colBar.patch.figure.canvas.draw()\n\n def OnKeyPress(self, event):\n colCycle=self.colCycle\n colBar=self.colBar\n if event.key=='down':\n self.colIndex += 1\n elif event.key=='up':\n self.colIndex -= 1\n self.colIndex%=len(colCycle)\n cmap = colCycle[self.colIndex]\n colBar.set_cmap(cmap)\n colBar.draw_all()\n self.img.set_cmap(cmap)\n self.img.get_axes().set_title(cmap)\n colBar.patch.figure.canvas.draw()\n\n def OnMouse(self, event):\n for k in dir(event):\n if k[0]!='_':\n print (k,getattr(event,k))\n\nclass DlgColBarSetupOld(wx.Dialog):\n def __init__(self,parent):\n wx.Dialog.__init__(self,parent,-1,'Colormap Setup')\n colBar=parent.canvas.colBar\n cmap=colBar.cmap\n nrm=colBar.norm\n txtVMin=wx.StaticText(self,-1,'vmin')\n txtVMax=wx.StaticText(self,-1,'vmax')\n self.edVMin=edVMin=wx.TextCtrl(self,-1,'%g'%nrm.vmin)\n self.edVMax=edVMax=wx.TextCtrl(self,-1,'%g'%nrm.vmax)\n sizer=wx.BoxSizer(wx.VERTICAL)\n fgs=wx.FlexGridSizer(3,2,5,5)\n fgs.Add(txtVMin,0,wx.ALIGN_RIGHT)\n fgs.Add(edVMin,0,wx.EXPAND)\n fgs.Add(txtVMax,0,wx.ALIGN_RIGHT)\n fgs.Add(edVMax,0,wx.EXPAND)\n sizer.Add(fgs,0,wx.EXPAND|wx.ALL,5)\n\n edVMin.SetFocus()\n\n btns = self.CreateButtonSizer(wx.OK|wx.CANCEL)\n sizer.Add(btns,0,wx.EXPAND|wx.ALL,5)\n self.Bind(wx.EVT_BUTTON, self.OnBtnOk, id=wx.ID_OK)\n\n self.SetSizer(sizer)\n sizer.Fit(self)\n\n def OnBtnOk(self, event):\n event.Skip()#do not consume (use event to close the window and sent return code)\n print ('OnBtnOk')\n parent=self.GetParent()\n canvas=parent.canvas\n colBar=canvas.colBar\n colBar.norm.vmin=float(self.edVMin.Value)\n colBar.norm.vmax=float(self.edVMax.Value)\n canvas.img.set_norm(colBar.norm)\n #colBar.patch.figure.canvas.draw()\n canvas.draw()\n\n\nclass DlgColBarSetup(wx.Dialog):\n def __init__(self,parent):\n wx.Dialog.__init__(self,parent,-1,'Colormap Setup')\n colBar=parent.canvas.colBar\n cmap=colBar.cmap\n nrm=colBar.norm\n\n txtVMin=wx.StaticText(self,-1,'vmin')\n txtVMax=wx.StaticText(self,-1,'vmax')\n txtColMap=wx.StaticText(self,-1,'colormap')\n self.edVMin=edVMin=wx.TextCtrl(self,-1,'%g'%nrm.vmin,style=wx.TE_PROCESS_ENTER)\n self.edVMax=edVMax=wx.TextCtrl(self,-1,'%g'%nrm.vmax,style=wx.TE_PROCESS_ENTER)\n\n txtTxrFunc=wx.StaticText(self,-1,'function')\n self.cbtxrFunc=cbtxrFunc=wx.ComboBox(self, -1, choices=('linear','logarithmic'), style=wx.CB_READONLY)\n cbtxrFunc.SetSelection(0 if nrm.__class__==mpl.colors.Normalize else 1)\n\n #colMapLst=('Accent', 'Blues', 'BrBG', 'BuGn', 'BuPu', 'Dark2', 'GnBu', 'Greens', 'Greys', 'OrRd', 'Oranges', 'PRGn', 'Paired',\n #'Pastel1', 'Pastel2', 'PiYG', 'PuBu', 'PuBuGn', 'PuOr', 'PuRd', 'Purples', 'RdBu', 'RdGy', 'RdPu', 'RdYlBu', 'RdYlGn', 'Reds',\n #'Set1', 'Set2', 'Set3', 'Spectral', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'afmhot', 'autumn', 'binary', 'bone', 'brg', 'bwr',\n #'cool', 'coolwarm', 'copper', 'cubehelix', 'flag', 'gist_earth', 'gist_gray', 'gist_heat', 'gist_ncar', 'gist_rainbow', 'gist_stern',\n #'gist_yarg', 'gnuplot', 'gnuplot2', 'gray', 'hot', 'hsv', 'jet', 'ocean', 'pink', 'prism', 'rainbow', 'seismic', 'spectral',\n #'spring', 'summer', 'terrain', 'winter')\n\n colMapLst=('hot','spectral','jet','gray','RdYlBu','hsv','gist_stern','gist_ncar','BrBG','RdYlBu','brg','gnuplot2',\n 'prism','rainbow',)\n\n self.cbColMap=cbColMap=wx.ComboBox(self, -1, choices=colMapLst, style=wx.CB_READONLY)\n cbColMap.Value=cmap.name\n\n sizer=wx.BoxSizer(wx.VERTICAL)\n fgs=wx.FlexGridSizer(4,2,5,5)\n fgs.Add(txtVMin,0,wx.ALIGN_RIGHT)\n fgs.Add(edVMin,0,wx.EXPAND)\n fgs.Add(txtVMax,0,wx.ALIGN_RIGHT)\n fgs.Add(edVMax,0,wx.EXPAND)\n fgs.Add(txtTxrFunc,0,wx.ALIGN_RIGHT)\n fgs.Add(cbtxrFunc,0,wx.EXPAND)\n fgs.Add(txtColMap,0,wx.ALIGN_RIGHT)\n fgs.Add(cbColMap,0,wx.EXPAND)\n sizer.Add(fgs,0,wx.EXPAND|wx.ALL,5)\n\n edVMin.SetFocus()\n\n btns = self.CreateButtonSizer(wx.OK|wx.CANCEL)\n btnApply=wx.Button(self, -1, 'Apply')\n btns.Add(btnApply, 0, wx.ALL, 5)\n sizer.Add(btns,0,wx.EXPAND|wx.ALL,5)\n self.Bind(wx.EVT_BUTTON, self.OnModify, id=wx.ID_OK)\n self.Bind(wx.EVT_BUTTON, self.OnModify, btnApply)\n #self.Bind(wx.EVT_TEXT_ENTER, self.OnModify, edVMin)\n #self.Bind(wx.EVT_TEXT_ENTER, self.OnModify, edVMax)\n self.Bind(wx.EVT_TEXT, self.OnModify, edVMin)\n self.Bind(wx.EVT_TEXT, self.OnModify, edVMax)\n self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbtxrFunc)\n self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbColMap)\n\n self.SetSizer(sizer)\n sizer.Fit(self)\n\n def OnModify(self, event):\n #print 'OnModify'\n parent=self.GetParent()\n canvas=parent.canvas\n colBar=canvas.colBar\n cmap=colBar.cmap\n nrm=colBar.norm\n img=canvas.img\n ax=img.get_axes()\n data=img.get_array()\n\n v=self.cbColMap.Value\n if v!=cmap.name:\n cmap=getattr(mpl.cm,v)\n colBar.set_cmap(cmap)\n colBar.draw_all()\n img.set_cmap(cmap)\n ax.set_title(cmap.name)\n colBar.patch.figure.canvas.draw()\n\n vmin,vmax=(float(self.edVMin.Value),float(self.edVMax.Value))\n nrm.vmin=vmin; nrm.vmax=vmax\n v=self.cbtxrFunc.GetCurrentSelection()\n func=(mpl.colors.Normalize,ShiftedLogNorm)\n if nrm.__class__!=func[v]:\n if v==0: #linear mapping\n colBar.norm = mpl.colors.Normalize(vmin, vmax)\n elif v==1: #log mapping\n img.cmap._init();bg=img.cmap._lut[0].copy();bg[:-1]/=4\n ax.set_axis_bgcolor(bg)\n vmin=1\n colBar.norm = mpl.colors.LogNorm(vmin,vmax)\n img.set_norm(colBar.norm)\n colBar.patch.figure.canvas.draw()\n parent.Refresh(False)\n if event.GetId()==wx.ID_OK:\n event.Skip()#do not consume (use event to close the window and sent return code)\n\nclass HdfImageFrame(wx.Frame):\n def __init__(self, parent,lbl,hid):\n wx.Frame.__init__(self, parent, title=lbl, size=wx.Size(850, 650))\n imgDir=ut.Path.GetImage()\n icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n\n t=type(hid)\n if t==h5py.h5d.DatasetID:\n data=h5py.Dataset(hid)\n\n canvas = MPLCanvasImg(self,self.SetStatusCB)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(sizer)\n\n toolbar=ut.AddToolbar(canvas,sizer)\n\n wxAxCtrlLst=[]\n l=len(data.shape)\n idxXY=(l-2,l-1)\n for idx,l in enumerate(data.shape):\n if idx in idxXY:\n continue\n wxAxCtrl=ut.SliderGroup(self, label='Axis:%d'%idx,range=(0,l-1))\n wxAxCtrl.idx=idx\n wxAxCtrlLst.append(wxAxCtrl)\n sizer.Add(wxAxCtrl.sizer, 0, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)\n wxAxCtrl.SetCallback(HdfImageFrame.OnSetView,wxAxCtrl)\n\n sl=ut.GetSlice(idxXY,data.shape,wxAxCtrlLst)\n\n canvas.InitChild(data[sl])\n\n #self.Fit()\n self.Centre()\n\n self.BuildMenu(data.dtype)\n self.canvas=canvas\n self.sizer=sizer\n self.toolbar=toolbar\n self.data=data\n self.idxXY=idxXY\n self.wxAxCtrlLst=wxAxCtrlLst\n\n def BuildMenu(self,dtype):\n mnBar = wx.MenuBar()\n\n #-------- Edit Menu --------\n mn = wx.Menu()\n mnItem=mn.Append(wx.ID_ANY, 'Setup Colormap', 'Setup the color mapping ');self.Bind(wx.EVT_MENU, self.OnColmapSetup, mnItem)\n mnItem=mn.Append(wx.ID_ANY, 'Invert X-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n self.mnIDxAxis=mnItem.GetId()\n mnItem=mn.Append(wx.ID_ANY, 'Invert Y-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n mnItem=mn.Append(wx.ID_ANY, 'Show Moments', 'Show image moments ', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnShowMoments, mnItem)\n self.mnItemShowMoment=mnItem\n mnItem=mn.Append(wx.ID_ANY, 'Tomo Normalize', 'Multiplies each pixel with a normalization factor. Assumes there exist an array exchange/data_white', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnTomoNormalize, mnItem)\n self.mnItemTomoNormalize=mnItem\n\n if dtype==np.complex128:\n mnItem=mn.Append(wx.ID_ANY, 'Complex: Phase', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnSetComplexData, mnItem)\n\n\n mnBar.Append(mn, '&Edit')\n mn = wx.Menu()\n mnItem=mn.Append(wx.ID_ANY, 'Help', 'How to use the image viewer');self.Bind(wx.EVT_MENU, self.OnHelp, mnItem)\n mnBar.Append(mn, '&Help')\n\n self.SetMenuBar(mnBar)\n self.CreateStatusBar()\n\n def SetIdxXY(self,x,y):\n self.idxXY=(x,y)\n\n @staticmethod\n def SetStatusCB(obj,mode,v):\n if mode==0:\n obj.SetStatusText( \"x= %d y=%d val=%g\"%v,0)\n elif mode==1:\n obj.SetStatusText( \"Colormap Value %d (drag to scale)\"%v,0)\n else:\n raise KeyError('wrong mode')\n\n @staticmethod\n def OnSetView(usrData,value,msg):\n 'called when a slice is selected with the slider controls'\n imgFrm=usrData.slider.Parent\n #imgFrm.img.set_array(imgFrm.data[usrData.value,...])\n data=imgFrm.data\n sl=ut.GetSlice(imgFrm.idxXY,data.shape,imgFrm.wxAxCtrlLst)\n\n try:\n tomoNorm=imgFrm.tomoNorm\n except AttributeError:\n imgFrm.canvas.img.set_array(data[sl])\n else:\n data=data[sl]*tomoNorm\n imgFrm.canvas.img.set_array(data)\n\n if imgFrm.mnItemShowMoment.IsChecked():\n imgFrm.PlotMoments()\n imgFrm.canvas.draw()\n pass\n\n def OnShowMoments(self,event):\n if event.IsChecked():\n dlg = wx.FileDialog(self, \"Choose valid mask file (e.g. pilatus_valid_mask.mat)\", os.getcwd(), '','MATLAB files (*.mat)|*.mat|all (*.*)|*.*', wx.FD_OPEN|wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n fnMatMsk = dlg.GetPath()\n print ('OnOpen',fnMatMsk)\n dlg.Destroy()\n if not fnMatMsk:\n return\n #fnMatMsk='/scratch/detectorData/cSAXS_2013_10_e14608_georgiadis_3D_for_Marianne/analysis/data/pilatus_valid_mask.mat'\n self.procMoment=pm=ProcMoment()\n pm.SetMskMat(fnMatMsk,False)\n #roi=[603, 826, 200, 200]\n #pm.roi=(slice(roi[1],roi[1]+roi[3]),slice(roi[0],roi[0]+roi[2]))\n #pm.shape=(roi[3],roi[2])\n\n #pm.SetProcess('python')\n #pm.SetProcess('pyFast')\n pm.SetProcess('c')\n self.PlotMoments()\n #self.canvas.img.draw()\n data=self.canvas.img.get_array()\n self.canvas.img.set_array(data)\n fig, ax = plt.subplots(2)\n v=data.sum(axis=0); x=np.arange(v.size); x0=x.sum(); m0=v.sum(); m1=(v*x).sum(); m2=(v*x*x).sum()\n ax[0].plot(v);\n m=m1/m0\n s=np.sqrt( (m2-(m1**2/m0))/m0)\n xx=1/(s*np.sqrt(2*np.pi))*np.exp(-.5*((x-m)/s)**2)\n ax[0].set_title('%g | %g | %g | %g | %g'%(m0,m1,m2,m,s))\n ax[0].hold(True);ax[0].plot(xx*m0)\n\n v=data.sum(axis=1);\n ax[1].plot(v);\n\n\n plt.show()\n #print pm.resArr[0:3],pm.resArr[1]/pm.resArr[0],pm.resArr[2]/pm.resArr[0]\n else:\n for o in self.goMoment:\n o.remove()\n del self.goMoment\n del self.procMoment\n self.canvas.draw()\n\n def PlotMoments(self):\n data=self.canvas.img.get_array()\n pm=self.procMoment\n\n #data=ndi.median_filter(data, 3)\n try:\n data.ravel()[pm.mskIdx]=0\n except AttributeError as e:\n print (e)\n try:\n data=data[pm.roi]\n except AttributeError as e:\n print (e)\n #data=np.log(data+1)\n #data[100:110,500:510]=1000 #y,x\n #data[650:850,700:850]=0 #y,x\n #pm.Process(np.log(data+1))\n pm.Process(data)\n xbar, ybar, cov=pm.GetIntertialAxis()\n\n m=pm.resArr\n m00=m[0];m01=m[1];m10=m[2];m11=m[3];m02=m[4];m20=m[5]\n\n xm = m10 / m00\n ym = m01 / m00\n u11 = (m11 - xm * m01) / m00\n #u11[u11<0.]=0. #processing rounding error\n u20 = (m20 - xm * m10) / m00\n u02 = (m02 - ym * m01) / m00\n a=(u20+u02)/2\n b=np.sqrt(4*u11**2+(u20-u02)**2)/2\n l0=a+b\n l1=a-b\n ang=0.5*np.arctan2(2*u11,(u20-u02))/(2*np.pi)*360. #orientation value 0..1\n exc=np.sqrt(1-l1/l0) #eccentricity :circle=0: http://en.wikipedia.org/wiki/Eccentricity_%28mathematics%29\n\n print ('xb:%g yb:%g cov:%g %g %g %g ang:%g exc:%g'%((xm, ym)+tuple(cov.ravel())+(ang,exc)))\n #fig, ax = plt.subplots()\n #ax.imshow(data,vmax=100,interpolation='nearest')\n #plt.show()\n ax=self.canvas.img.get_axes()\n try:\n for o in self.goMoment:\n o.remove()\n except AttributeError: pass\n\n self.goMoment=ProcMoment.PlotMoments(ax, xbar, ybar, cov)\n ax.axis('image')\n\n def OnTomoNormalize(self,event):\n if event.IsChecked():\n #try to find white image\n #calculate average\n #show white normalize factors\n white=self.data.parent['data_white']\n tomoNorm=white[1,:,:]\n #tomoNorm=white[:,:,:].mean(axis=0)\n #np.iinfo(tomoNorm.dtype).max\n #tomoNorm=float(np.iinfo(tomoNorm.dtype).max/2)/tomoNorm\n tomoNorm=tomoNorm.mean()/tomoNorm\n #tomoNorm=tomoNorm/float(np.iinfo(tomoNorm.dtype).max)\n data=self.canvas.img.get_array()\n data*=tomoNorm\n #data/=tomoNorm\n self.tomoNorm=tomoNorm\n self.canvas.img.set_array(data)\n else:\n tomoNorm=self.tomoNorm\n data=self.canvas.img.get_array()\n data/=tomoNorm\n self.canvas.img.set_array(data)\n del self.tomoNorm\n self.canvas.draw()\n\n def OnSetComplexData(self, event):\n if event.IsChecked():\n data=np.angle(self.canvas.dataRaw)\n else:\n data=np.absolute(self.canvas.dataRaw)\n self.canvas.img.set_array(data)\n self.canvas.draw()\n\n def OnHelp(self,event):\n msg='''to change the image selection:\nuse the toolbar at the bottom to pan and zoom the image\nuse the scrollbars at the bottom (if present) to select an other slice\n\nto change the colorscale:\ndrag with left mouse button to move the colorbar up and down\ndrag with right mouse button to zoom in/out the colorbar at a given point\nuse mouse weel to zoom in/out the colorbar at a given point\ndouble click left mouse button to set maximum and minimun colorbar values\nuse cursor up and down to use a different colormap'''\n dlg = wx.MessageDialog(self, msg, 'Help', wx.OK|wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n\n def OnColmapSetup(self,event):\n dlg=DlgColBarSetup(self)\n if dlg.ShowModal()==wx.ID_OK:\n pass\n dlg.Destroy()\n\n def OnInvertAxis(self,event):\n ax=self.canvas.ax\n #event.Checked()\n if self.mnIDxAxis==event.GetId():\n ax.invert_xaxis()\n else:\n ax.invert_yaxis()\n self.canvas.draw()\n pass\n\nif __name__ == '__main__':\n import os,sys,argparse #since python 2.7\n def GetParser(required=True):\n fnHDF='/scratch/detectorData/e14472_00033.hdf5'\n #lbl='mcs'\n lbl='pilatus_1'\n #lbl='spec'\n elem='/entry/dataScan00033/'+lbl\n exampleCmd='--hdfFile='+fnHDF+' --elem='+elem\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=__doc__,\n epilog='Example:\\n'+os.path.basename(sys.argv[0])+' '+exampleCmd+'\\n ')\n parser.add_argument('--hdfFile', required=required, default=fnHDF, help='the hdf5 to show')\n parser.add_argument('--elem', required=required, default=elem, help='the path to the element in the hdf5 file')\n return parser\n args = parser.parse_args()\n return args\n\n class App(wx.App):\n def OnInit(self):\n parser=GetParser()\n #parser=GetParser(False) # debug with exampleCmd\n args = parser.parse_args()\n try:\n self.fid=fid=h5py.h5f.open(args.hdfFile)\n except IOError as e:\n sys.stderr.write('Unable to open File: '+args.hdfFile+'\\n')\n parser.print_usage(sys.stderr)\n return True\n try:\n hid = h5py.h5o.open(fid,args.elem)\n except KeyError as e:\n sys.stderr.write('Unable to open Object: '+args.elem+'\\n')\n parser.print_usage(sys.stderr)\n return True\n frame = HdfImageFrame(None,args.elem,hid)\n frame.Show()\n self.SetTopWindow(frame)\n return True\n\n def OnExit(self):\n self.fid.close()\n\n ut.StopWatch.Start()\n app = App()\n app.MainLoop()\n" ]
[ [ "matplotlib.figure.Figure", "matplotlib.use", "numpy.arange", "scipy.ndimage.median_filter", "numpy.average", "matplotlib.backends.backend_wxagg.FigureCanvasWxAgg.__init__" ], [ "numpy.sqrt", "numpy.arctan2", "numpy.max", "numpy.ma.power", "numpy.ma.max", "matplotlib.backends.backend_wxagg.FigureCanvasWxAgg.__init__", "numpy.exp", "matplotlib.cbook.iterable", "numpy.arange", "numpy.ma.min", "numpy.std", "numpy.log", "numpy.ma.asarray", "numpy.min", "numpy.isnan", "numpy.ma.masked_less_equal", "numpy.absolute", "matplotlib.colors.LogNorm", "matplotlib.figure.Figure", "matplotlib.use", "matplotlib.colors.Normalize", "numpy.angle", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abbasegbeyemi/pyqtgraph
[ "6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed", "6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed", "6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed" ]
[ "examples/optics/pyoptic.py", "examples/SimplePlot.py", "examples/verlet_chain/chain.py" ]
[ "# -*- coding: utf-8 -*-\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport csv, gzip, os\nfrom pyqtgraph import Point\n\nclass GlassDB:\n \"\"\"\n Database of dispersion coefficients for Schott glasses\n + Corning 7980\n \"\"\"\n def __init__(self, fileName='schott_glasses.csv'):\n path = os.path.dirname(__file__)\n fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb')\n r = csv.reader(map(str, fh.readlines()))\n lines = [x for x in r]\n self.data = {}\n header = lines[0]\n for l in lines[1:]:\n info = {}\n for i in range(1, len(l)):\n info[header[i]] = l[i]\n self.data[l[0]] = info\n self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog.\n 'B1': 0.68374049400,\n 'B2': 0.42032361300,\n 'B3': 0.58502748000,\n 'C1': 0.00460352869,\n 'C2': 0.01339688560,\n 'C3': 64.49327320000,\n 'TAUI25/250': 0.95, ## transmission data is fabricated, but close.\n 'TAUI25/1400': 0.98,\n }\n \n for k in self.data:\n self.data[k]['ior_cache'] = {}\n \n\n def ior(self, glass, wl):\n \"\"\"\n Return the index of refraction for *glass* at wavelength *wl*.\n \n The *glass* argument must be a key in self.data.\n \"\"\"\n info = self.data[glass]\n cache = info['ior_cache']\n if wl not in cache:\n B = list(map(float, [info['B1'], info['B2'], info['B3']]))\n C = list(map(float, [info['C1'], info['C2'], info['C3']]))\n w2 = (wl/1000.)**2\n n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2])))\n cache[wl] = n\n return cache[wl]\n \n def transmissionCurve(self, glass):\n data = self.data[glass]\n keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x]\n keys.sort()\n curve = np.empty((2,len(keys)))\n for i in range(len(keys)):\n curve[0][i] = keys[i]\n key = 'TAUI25/%d' % keys[i]\n val = data[key]\n if val == '':\n val = 0\n else:\n val = float(val)\n curve[1][i] = val\n return curve\n \n\nGLASSDB = GlassDB()\n\n\ndef wlPen(wl):\n \"\"\"Return a pen representing the given wavelength\"\"\"\n l1 = 400\n l2 = 700\n hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8)\n val = 1.0\n if wl > 700:\n val = 1.0 * (((700-wl)/700.) + 1)\n elif wl < 400:\n val = wl * 1.0/400.\n #print hue, val\n color = pg.hsvColor(hue, 1.0, val)\n pen = pg.mkPen(color)\n return pen\n\n\nclass ParamObj(object):\n # Just a helper for tracking parameters and responding to changes\n def __init__(self):\n self.__params = {}\n \n def __setitem__(self, item, val):\n self.setParam(item, val)\n \n def setParam(self, param, val):\n self.setParams(**{param:val})\n \n def setParams(self, **params):\n \"\"\"Set parameters for this optic. This is a good function to override for subclasses.\"\"\"\n self.__params.update(params)\n self.paramStateChanged()\n\n def paramStateChanged(self):\n pass\n\n def __getitem__(self, item):\n # bug in pyside 1.2.2 causes getitem to be called inside QGraphicsObject.parentItem:\n return self.getParam(item) # PySide bug: https://bugreports.qt.io/browse/PYSIDE-671\n \n def __len__(self):\n # Workaround for PySide bug: https://bugreports.qt.io/browse/PYSIDE-671\n return 0\n\n def getParam(self, param):\n return self.__params[param]\n\n\nclass Optic(pg.GraphicsObject, ParamObj):\n \n sigStateChanged = QtCore.Signal()\n \n \n def __init__(self, gitem, **params):\n ParamObj.__init__(self)\n pg.GraphicsObject.__init__(self) #, [0,0], [1,1])\n\n self.gitem = gitem\n self.surfaces = gitem.surfaces\n gitem.setParentItem(self)\n \n self.roi = pg.ROI([0,0], [1,1])\n self.roi.addRotateHandle([1, 1], [0.5, 0.5])\n self.roi.setParentItem(self)\n \n defaults = {\n 'pos': Point(0,0),\n 'angle': 0,\n }\n defaults.update(params)\n self._ior_cache = {}\n self.roi.sigRegionChanged.connect(self.roiChanged)\n self.setParams(**defaults)\n \n def updateTransform(self):\n self.setPos(0, 0)\n tr = QtGui.QTransform()\n self.setTransform(tr.translate(Point(self['pos'])).rotate(self['angle']))\n \n def setParam(self, param, val):\n ParamObj.setParam(self, param, val)\n\n def paramStateChanged(self):\n \"\"\"Some parameters of the optic have changed.\"\"\"\n # Move graphics item\n self.gitem.setPos(Point(self['pos']))\n self.gitem.resetTransform()\n self.gitem.setRotation(self['angle'])\n \n # Move ROI to match\n try:\n self.roi.sigRegionChanged.disconnect(self.roiChanged)\n br = self.gitem.boundingRect()\n o = self.gitem.mapToParent(br.topLeft())\n self.roi.setAngle(self['angle'])\n self.roi.setPos(o)\n self.roi.setSize([br.width(), br.height()])\n finally:\n self.roi.sigRegionChanged.connect(self.roiChanged)\n \n self.sigStateChanged.emit()\n\n def roiChanged(self, *args):\n pos = self.roi.pos()\n # rotate gitem temporarily so we can decide where it will need to move\n self.gitem.resetTransform()\n self.gitem.setRotation(self.roi.angle())\n br = self.gitem.boundingRect()\n o1 = self.gitem.mapToParent(br.topLeft())\n self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1))\n \n def boundingRect(self):\n return QtCore.QRectF()\n \n def paint(self, p, *args):\n pass\n\n def ior(self, wavelength):\n return GLASSDB.ior(self['glass'], wavelength)\n \n\n\nclass Lens(Optic):\n def __init__(self, **params):\n defaults = {\n 'dia': 25.4, ## diameter of lens\n 'r1': 50., ## positive means convex, use 0 for planar\n 'r2': 0, ## negative means convex\n 'd': 4.0,\n 'glass': 'N-BK7',\n 'reflect': False,\n }\n defaults.update(params)\n d = defaults.pop('d')\n defaults['x1'] = -d/2.\n defaults['x2'] = d/2.\n \n gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults)\n Optic.__init__(self, gitem, **defaults)\n \n def propagateRay(self, ray):\n \"\"\"Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays\"\"\"\n\n \"\"\"\n NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs)\n\n For the incident vector I and surface normal N, and the\n ratio of indices of refraction eta, return the refraction\n vector. The result is computed by\n k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))\n if (k < 0.0)\n return genType(0.0)\n else\n return eta * I - (eta * dot(N, I) + sqrt(k)) * N\n The input parameters for the incident vector I and the\n surface normal N must already be normalized to get the\n desired results. eta == ratio of IORs\n\n\n For reflection:\n For the incident vector I and surface orientation N,\n returns the reflection direction:\n I – 2 ∗ dot(N, I) ∗ N\n N must already be normalized in order to achieve the\n desired result.\n \"\"\"\n iors = [self.ior(ray['wl']), 1.0]\n for i in [0,1]:\n surface = self.surfaces[i]\n ior = iors[i]\n p1, ai = surface.intersectRay(ray)\n if p1 is None:\n ray.setEnd(None)\n break\n p1 = surface.mapToItem(ray, p1)\n \n rd = ray['dir']\n a1 = np.arctan2(rd[1], rd[0])\n ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior))\n ray.setEnd(p1)\n dp = Point(np.cos(ar), np.sin(ar))\n ray = Ray(parent=ray, ior=ior, dir=dp)\n return [ray]\n \n\nclass Mirror(Optic):\n def __init__(self, **params):\n defaults = {\n 'r1': 0,\n 'r2': 0,\n 'd': 0.01,\n }\n defaults.update(params)\n d = defaults.pop('d')\n defaults['x1'] = -d/2.\n defaults['x2'] = d/2.\n gitem = CircularSolid(brush=(100,100,100,255), **defaults)\n Optic.__init__(self, gitem, **defaults)\n \n def propagateRay(self, ray):\n \"\"\"Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays\"\"\"\n \n surface = self.surfaces[0]\n p1, ai = surface.intersectRay(ray)\n if p1 is not None:\n p1 = surface.mapToItem(ray, p1)\n rd = ray['dir']\n a1 = np.arctan2(rd[1], rd[0])\n ar = a1 + np.pi - 2*ai\n ray.setEnd(p1)\n dp = Point(np.cos(ar), np.sin(ar))\n ray = Ray(parent=ray, dir=dp)\n else:\n ray.setEnd(None)\n return [ray]\n\n\nclass CircularSolid(pg.GraphicsObject, ParamObj):\n \"\"\"GraphicsObject with two circular or flat surfaces.\"\"\"\n def __init__(self, pen=None, brush=None, **opts):\n \"\"\"\n Arguments for each surface are:\n x1,x2 - position of center of _physical surface_\n r1,r2 - radius of curvature\n d1,d2 - diameter of optic\n \"\"\"\n defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)\n defaults.update(opts)\n ParamObj.__init__(self)\n self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]\n pg.GraphicsObject.__init__(self)\n for s in self.surfaces:\n s.setParentItem(self)\n \n if pen is None:\n self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)\n else:\n self.pen = pg.mkPen(pen)\n \n if brush is None: \n self.brush = pg.mkBrush((230, 230, 255, 30))\n else:\n self.brush = pg.mkBrush(brush)\n\n self.setParams(**defaults)\n\n def paramStateChanged(self):\n self.updateSurfaces()\n\n def updateSurfaces(self):\n self.surfaces[0].setParams(self['r1'], self['d1'])\n self.surfaces[1].setParams(-self['r2'], self['d2'])\n self.surfaces[0].setPos(self['x1'], 0)\n self.surfaces[1].setPos(self['x2'], 0)\n \n self.path = QtGui.QPainterPath()\n self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))\n self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())\n self.path.closeSubpath()\n \n def boundingRect(self):\n return self.path.boundingRect()\n \n def shape(self):\n return self.path\n \n def paint(self, p, *args):\n p.setRenderHints(p.renderHints() | p.Antialiasing)\n p.setPen(self.pen)\n p.fillPath(self.path, self.brush)\n p.drawPath(self.path)\n \n\nclass CircleSurface(pg.GraphicsObject):\n def __init__(self, radius=None, diameter=None):\n \"\"\"center of physical surface is at 0,0\n radius is the radius of the surface. If radius is None, the surface is flat. \n diameter is of the optic's edge.\"\"\"\n pg.GraphicsObject.__init__(self)\n \n self.r = radius\n self.d = diameter\n self.mkPath()\n \n def setParams(self, r, d):\n self.r = r\n self.d = d\n self.mkPath()\n \n def mkPath(self):\n self.prepareGeometryChange()\n r = self.r\n d = self.d\n h2 = d/2.\n self.path = QtGui.QPainterPath()\n if r == 0: ## flat surface\n self.path.moveTo(0, h2)\n self.path.lineTo(0, -h2)\n else:\n ## half-height of surface can't be larger than radius\n h2 = min(h2, abs(r))\n arc = QtCore.QRectF(0, -r, r*2, r*2)\n a1 = np.arcsin(h2/r) * 180. / np.pi\n a2 = -2*a1\n a1 += 180.\n self.path.arcMoveTo(arc, a1)\n self.path.arcTo(arc, a1, a2)\n self.h2 = h2\n \n def boundingRect(self):\n return self.path.boundingRect()\n \n def paint(self, p, *args):\n return ## usually we let the optic draw.\n \n def intersectRay(self, ray):\n ## return the point of intersection and the angle of incidence\n #print \"intersect ray\"\n h = self.h2\n r = self.r\n p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords.\n #print \" ray: \", p, dir\n p = p - Point(r, 0) ## move position so center of circle is at 0,0\n #print \" adj: \", p, r\n \n if r == 0:\n #print \" flat\"\n if dir[0] == 0:\n y = 0\n else:\n y = p[1] - p[0] * dir[1]/dir[0]\n if abs(y) > h:\n return None, None\n else:\n return (Point(0, y), np.arctan2(dir[1], dir[0]))\n else:\n #print \" curve\"\n ## find intersection of circle and line (quadratic formula)\n dx = dir[0]\n dy = dir[1]\n dr = (dx**2 + dy**2) ** 0.5\n D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1]\n idr2 = 1.0 / dr**2\n disc = r**2 * dr**2 - D**2\n if disc < 0:\n return None, None\n disc2 = disc**0.5\n if dy < 0:\n sgn = -1\n else:\n sgn = 1\n \n \n br = self.path.boundingRect()\n x1 = (D*dy + sgn*dx*disc2) * idr2\n y1 = (-D*dx + abs(dy)*disc2) * idr2\n if br.contains(x1+r, y1):\n pt = Point(x1, y1)\n else:\n x2 = (D*dy - sgn*dx*disc2) * idr2\n y2 = (-D*dx - abs(dy)*disc2) * idr2\n pt = Point(x2, y2)\n if not br.contains(x2+r, y2):\n return None, None\n raise Exception(\"No intersection!\")\n \n norm = np.arctan2(pt[1], pt[0])\n if r < 0:\n norm += np.pi\n #print \" norm:\", norm*180/3.1415\n dp = p - pt\n #print \" dp:\", dp\n ang = np.arctan2(dp[1], dp[0]) \n #print \" ang:\", ang*180/3.1415\n #print \" ai:\", (ang-norm)*180/3.1415\n \n #print \" intersection:\", pt\n return pt + Point(r, 0), ang-norm\n\n \nclass Ray(pg.GraphicsObject, ParamObj):\n \"\"\"Represents a single straight segment of a ray\"\"\"\n \n sigStateChanged = QtCore.Signal()\n \n def __init__(self, **params):\n ParamObj.__init__(self)\n defaults = {\n 'ior': 1.0,\n 'wl': 500,\n 'end': None,\n 'dir': Point(1,0),\n }\n self.params = {}\n pg.GraphicsObject.__init__(self)\n self.children = []\n parent = params.get('parent', None)\n if parent is not None:\n defaults['start'] = parent['end']\n defaults['wl'] = parent['wl']\n self['ior'] = parent['ior']\n self['dir'] = parent['dir']\n parent.addChild(self)\n \n defaults.update(params)\n defaults['dir'] = Point(defaults['dir'])\n self.setParams(**defaults)\n self.mkPath()\n \n def clearChildren(self):\n for c in self.children:\n c.clearChildren()\n c.setParentItem(None)\n self.scene().removeItem(c)\n self.children = []\n \n def paramStateChanged(self):\n pass\n \n def addChild(self, ch):\n self.children.append(ch)\n ch.setParentItem(self)\n \n def currentState(self, relativeTo=None):\n pos = self['start']\n dir = self['dir']\n if relativeTo is None:\n return pos, dir\n else:\n trans = self.itemTransform(relativeTo)[0]\n p1 = trans.map(pos)\n p2 = trans.map(pos + dir)\n return Point(p1), Point(p2-p1)\n \n def setEnd(self, end):\n self['end'] = end\n self.mkPath()\n\n def boundingRect(self):\n return self.path.boundingRect()\n \n def paint(self, p, *args):\n #p.setPen(pg.mkPen((255,0,0, 150)))\n p.setRenderHints(p.renderHints() | p.Antialiasing)\n p.setCompositionMode(p.CompositionMode_Plus)\n p.setPen(wlPen(self['wl']))\n p.drawPath(self.path)\n \n def mkPath(self):\n self.prepareGeometryChange()\n self.path = QtGui.QPainterPath()\n self.path.moveTo(self['start'])\n if self['end'] is not None:\n self.path.lineTo(self['end'])\n else:\n self.path.lineTo(self['start']+500*self['dir'])\n\n\ndef trace(rays, optics):\n if len(optics) < 1 or len(rays) < 1:\n return\n for r in rays:\n r.clearChildren()\n o = optics[0]\n r2 = o.propagateRay(r)\n trace(r2, optics[1:])\n\n\nclass Tracer(QtCore.QObject):\n \"\"\"\n Simple ray tracer. \n \n Initialize with a list of rays and optics; \n calling trace() will cause rays to be extended by propagating them through\n each optic in sequence.\n \"\"\"\n def __init__(self, rays, optics):\n QtCore.QObject.__init__(self)\n self.optics = optics\n self.rays = rays\n for o in self.optics:\n o.sigStateChanged.connect(self.trace)\n self.trace()\n \n def trace(self):\n trace(self.rays, self.optics)\n\n", "import initExample ## Add path to library (just for examples; you do not need this)\n\nimport pyqtgraph as pg\nimport pyqtgraph.exporters\nimport numpy as np\nplt = pg.plot(np.random.normal(size=100), title=\"Simplest possible plotting example\")\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):\n pg.QtGui.QApplication.exec_()\n", "import pyqtgraph as pg\nimport numpy as np\nimport time\nfrom . import relax\n\n\nclass ChainSim(pg.QtCore.QObject):\n \n stepped = pg.QtCore.Signal()\n relaxed = pg.QtCore.Signal()\n \n def __init__(self):\n pg.QtCore.QObject.__init__(self)\n \n self.damping = 0.1 # 0=full damping, 1=no damping\n self.relaxPerStep = 10\n self.maxTimeStep = 0.01\n \n self.pos = None # (Npts, 2) float\n self.mass = None # (Npts) float\n self.fixed = None # (Npts) bool\n self.links = None # (Nlinks, 2), uint\n self.lengths = None # (Nlinks), float\n self.push = None # (Nlinks), bool\n self.pull = None # (Nlinks), bool\n \n self.initialized = False\n self.lasttime = None\n self.lastpos = None\n \n def init(self):\n if self.initialized:\n return\n \n if self.fixed is None:\n self.fixed = np.zeros(self.pos.shape[0], dtype=bool)\n if self.push is None:\n self.push = np.ones(self.links.shape[0], dtype=bool)\n if self.pull is None:\n self.pull = np.ones(self.links.shape[0], dtype=bool)\n \n \n # precompute relative masses across links\n l1 = self.links[:,0]\n l2 = self.links[:,1]\n m1 = self.mass[l1]\n m2 = self.mass[l2]\n self.mrel1 = (m1 / (m1+m2))[:,np.newaxis]\n self.mrel1[self.fixed[l1]] = 1 # fixed point constraint\n self.mrel1[self.fixed[l2]] = 0\n self.mrel2 = 1.0 - self.mrel1\n\n for i in range(10):\n self.relax(n=10)\n \n self.initialized = True\n \n def makeGraph(self):\n #g1 = pg.GraphItem(pos=self.pos, adj=self.links[self.rope], pen=0.2, symbol=None)\n brushes = np.where(self.fixed, pg.mkBrush(0,0,0,255), pg.mkBrush(50,50,200,255))\n g2 = pg.GraphItem(pos=self.pos, adj=self.links[self.push & self.pull], pen=0.5, brush=brushes, symbol='o', size=(self.mass**0.33), pxMode=False)\n p = pg.ItemGroup()\n #p.addItem(g1)\n p.addItem(g2)\n return p\n \n def update(self):\n # approximate physics with verlet integration\n \n now = pg.ptime.time()\n if self.lasttime is None:\n dt = 0\n else:\n dt = now - self.lasttime\n self.lasttime = now\n \n # limit amount of work to be done between frames\n if not relax.COMPILED:\n dt = self.maxTimeStep\n\n if self.lastpos is None:\n self.lastpos = self.pos\n\n # remember fixed positions\n fixedpos = self.pos[self.fixed]\n \n while dt > 0:\n dt1 = min(self.maxTimeStep, dt)\n dt -= dt1\n \n # compute motion since last timestep\n dx = self.pos - self.lastpos\n self.lastpos = self.pos\n \n # update positions for gravity and inertia\n acc = np.array([[0, -5]]) * dt1\n inertia = dx * (self.damping**(dt1/self.mass))[:,np.newaxis] # with mass-dependent damping\n self.pos = self.pos + inertia + acc\n\n self.pos[self.fixed] = fixedpos # fixed point constraint\n \n # correct for link constraints\n self.relax(self.relaxPerStep)\n self.stepped.emit()\n \n \n def relax(self, n=50):\n # speed up with C magic if possible\n relax.relax(self.pos, self.links, self.mrel1, self.mrel2, self.lengths, self.push, self.pull, n)\n self.relaxed.emit()\n \n \n\n" ]
[ [ "numpy.sqrt", "numpy.clip", "numpy.arcsin", "numpy.cos", "numpy.sin", "numpy.arctan2" ], [ "numpy.random.normal" ], [ "numpy.array", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XinyueZ/some-python-codes
[ "2d7296a4deebb0cd086be34ad7d66f5042cdf6e6", "2d7296a4deebb0cd086be34ad7d66f5042cdf6e6" ]
[ "machine_learning/tf_notMNIST_Training_Gradient_Descent.py", "machine_learning/tf_notMNIST_Training_Convolutional_Layer.py" ]
[ "#\n# Run NN, multinomial logistic regression using simple gradient descent.\n#\nimport config\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import (Variable, constant, global_variables_initializer,\n truncated_normal, zeros)\n\nfrom tf_training_helper import TrainingHelper\n\n\nclass TF_notMNIST_Training_Gradient_Descent:\n def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_batch=10000, train_steps=800, train_learning_rate=0.5):\n \"\"\"\n Constructor.\n \"\"\"\n self.each_object_size_width = each_object_size_width\n self.each_object_size_height = each_object_size_height\n self.train_batch = train_batch\n self.train_steps = train_steps\n self.train_learning_rate = train_learning_rate\n\n helper = TrainingHelper()\n self.__print_predications__ = helper.print_predications\n self.__print_test_accuracy__ = helper.print_test_accuracy\n self.__activation__ = helper.activation\n self.__loss_optimizer__ = helper.loss_optimizer\n\n def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, beta_for_regularizer=0.01):\n \"\"\"\n Start multinomial logistic regression using simple gradient descent.\n \"\"\"\n #\n # Fixed values while training\n #\n tf_train_dataset = constant(train_dataset[:self.train_batch, :])\n tf_train_labels = constant(train_labels[:self.train_batch])\n tf_valid_dataset = constant(valid_dataset)\n tf_test_dataset = constant(test_dataset)\n\n #\n # Variables should be trained.\n # Classical weight and biases.\n #\n tf_weights = Variable(truncated_normal(\n [self.each_object_size_width * self.each_object_size_height, count_classes]))\n tf_biases = Variable(zeros([count_classes]))\n\n logits = self.__activation__(tf_train_dataset, tf_weights, tf_biases)\n loss, optimizer = self.__loss_optimizer__(\n tf_train_labels, logits, self.train_learning_rate, beta_for_regularizer, [tf_weights])\n\n #\n # Convert dataset to predication\n # The actual problem is transformed into a probabilistic problem.\n #\n predication_for_train = tf.nn.softmax(logits)\n predication_for_valid = tf.nn.softmax(\n self.__activation__(tf_valid_dataset, tf_weights, tf_biases))\n predication_for_test = tf.nn.softmax(\n self.__activation__(tf_test_dataset, tf_weights, tf_biases))\n\n #\n # Training\n #\n print(\"\\n\")\n with tf.Session() as sess:\n init = global_variables_initializer()\n sess.run(init)\n for step in range(self.train_steps):\n _, ls, predications = sess.run(\n [optimizer, loss, predication_for_train])\n self.__print_predications__(\n step, ls, predications, train_labels[:self.train_batch, :], predication_for_valid, valid_labels) \n \n self.__print_test_accuracy__(predication_for_test, test_labels)\n", "#\n# Run NN, implementation of convolutional traning.\n#\nimport config\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import (Variable, constant, global_variables_initializer, placeholder,\n reduce_mean, truncated_normal, zeros)\n\nfrom tf_training_helper import TrainingHelper\n\n\nclass TF_notMNIST_Training_Convolutional_Layer:\n def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_steps=800, train_learning_rate=0.5, patch_size=5, channels=1, depth=16, hidden=64):\n \"\"\"\n Constructor.\n \"\"\"\n self.each_object_size_width = each_object_size_width\n self.each_object_size_height = each_object_size_height\n self.train_steps = train_steps\n self.train_learning_rate = train_learning_rate\n self.patch_size = patch_size\n self.channels = channels\n self.depth = depth\n self.hidden = hidden\n\n helper = TrainingHelper()\n self.__print_predications__ = helper.print_predications\n self.__print_test_accuracy__ = helper.print_test_accuracy\n self.__activation__ = helper.activation\n self.__loss_optimizer__ = helper.loss_optimizer\n self.__convolutional_model__ = helper.convolutional_model\n\n def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, data_batch_size=130):\n \"\"\"\n Start multinomial logistic regression using simple gradient descent.\n \"\"\"\n #\n # Changable values while training\n #\n tf_train_dataset = placeholder(tf.float32,\n shape=(data_batch_size, self.each_object_size_width, self.each_object_size_height, self.channels))\n tf_train_labels = placeholder(\n tf.float32, shape=(data_batch_size, count_classes))\n #\n # Fixed values while training\n #\n tf_valid_dataset = constant(valid_dataset)\n tf_test_dataset = constant(test_dataset)\n\n #\n # Variables should be trained.\n # Classical weight and biases.\n #\n tf_var_1_map = {\n \"weights\": Variable(truncated_normal(\n [self.patch_size, self.patch_size, self.channels, self.depth], stddev=0.1)),\n \"biases\": Variable(zeros([self.depth]))\n }\n tf_var_2_map = {\n \"weights\": Variable(truncated_normal(\n [self.patch_size, self.patch_size,\n self.depth, self.depth], stddev=0.1\n )),\n \"biases\": Variable(constant(1.0, shape=[self.depth]))\n }\n tf_var_3_map = {\n \"weights\": Variable(truncated_normal(\n [self.each_object_size_width // 4*self.each_object_size_height//4*self.depth, self.hidden], stddev=0.1\n )),\n \"biases\": Variable(constant(1.0, shape=[self.hidden]))\n }\n tf_var_4_map = {\n \"weights\": Variable(truncated_normal(\n [self.hidden, count_classes], stddev=0.1\n )),\n \"biases\": Variable(constant(1.0, shape=[count_classes]))\n }\n\n #\n # Logits, loss and optimizer\n #\n logits = self.__convolutional_model__(\n tf_train_dataset,\n tf_var_1_map,\n tf_var_2_map,\n tf_var_3_map,\n tf_var_4_map\n )\n loss = reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=tf_train_labels, logits=logits)\n )\n optimizer = tf.train.GradientDescentOptimizer(\n self.train_learning_rate).minimize(loss)\n\n #\n # Convert dataset to predication\n # The actual problem is transformed into a probabilistic problem.\n #\n predication_for_train = tf.nn.softmax(logits)\n predication_for_valid = tf.nn.softmax(\n self.__convolutional_model__(\n tf_valid_dataset,\n tf_var_1_map,\n tf_var_2_map,\n tf_var_3_map,\n tf_var_4_map\n )\n )\n predication_for_test = tf.nn.softmax(\n self.__convolutional_model__(\n tf_test_dataset,\n tf_var_1_map,\n tf_var_2_map,\n tf_var_3_map,\n tf_var_4_map\n )\n )\n\n #\n # Training\n #\n print(\"\\n\")\n with tf.Session() as sess:\n init = global_variables_initializer()\n sess.run(init)\n for step in range(self.train_steps):\n #\n # TODO Can do more optimized batch computation.\n #\n offset = (\n step * data_batch_size) % (train_labels.shape[0] - data_batch_size)\n batch_dataset = train_dataset[offset:(\n offset + data_batch_size), :]\n batch_labels = train_labels[offset: (\n offset + data_batch_size), :]\n\n # Per loop replace tf_train_dataset, tf_train_labels with batch.\n _, ls, predications = sess.run(\n [optimizer, loss, predication_for_train],\n feed_dict={\n tf_train_dataset: batch_dataset,\n tf_train_labels: batch_labels\n })\n self.__print_predications__(\n step, ls, predications, batch_labels, predication_for_valid, valid_labels)\n\n self.__print_test_accuracy__(predication_for_test, test_labels)\n" ]
[ [ "tensorflow.nn.softmax", "tensorflow.truncated_normal", "tensorflow.constant", "tensorflow.zeros", "tensorflow.global_variables_initializer", "tensorflow.Session" ], [ "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.truncated_normal", "tensorflow.zeros", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Session", "tensorflow.nn.softmax_cross_entropy_with_logits_v2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mone27/fastai
[ "af8dfc07ca3f333f8c1bdbea1803af669a53738f" ]
[ "fastai/callback/tensorboard.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/71_callback.tensorboard.ipynb (unless otherwise specified).\n\n__all__ = ['TensorBoardCallback']\n\n# Cell\nfrom ..basics import *\n\n# Cell\nimport tensorboard\nfrom torch.utils.tensorboard import SummaryWriter\nfrom .fp16 import ModelToHalf\n\n# Cell\nclass TensorBoardCallback(Callback):\n \"Saves model topology, losses & metrics\"\n def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9):\n store_attr(self, 'log_dir,trace_model,log_preds,n_preds')\n\n def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\") and rank_distrib()==0\n self.writer = SummaryWriter(log_dir=self.log_dir)\n if self.trace_model:\n if hasattr(self.learn, 'mixed_precision'):\n raise Exception(\"Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.\")\n b = self.dls.one_batch()\n self.learn._split(b)\n self.writer.add_graph(self.model, *self.xb)\n\n def after_batch(self):\n self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)\n for i,h in enumerate(self.opt.hypers):\n for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)\n\n def after_epoch(self):\n for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):\n self.writer.add_scalar(n, v, self.train_iter)\n if self.log_preds:\n b = self.dls.valid.one_batch()\n self.learn.one_batch(0, b)\n preds = getattr(self.loss_func, 'activation', noop)(self.pred)\n out = getattr(self.loss_func, 'decodes', noop)(preds)\n x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)\n tensorboard_log(x, y, its, outs, self.writer, self.train_iter)\n\n def after_fit(self): self.writer.close()\n\n# Cell\nfrom ..vision.data import *\n\n# Cell\n@typedispatch\ndef tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):\n fig,axs = get_grid(len(samples), add_vert=1, return_fig=True)\n for i in range(2):\n axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]\n axs = [r.show(ctx=c, color='green' if b==r else 'red')\n for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]\n writer.add_figure('Sample results', fig, step)\n\n# Cell\nfrom ..vision.core import TensorPoint,TensorBBox\n\n# Cell\n@typedispatch\ndef tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):\n fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True)\n for i in range(2):\n axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]\n for x in [samples,outs]:\n axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]\n writer.add_figure('Sample results', fig, step)" ]
[ [ "torch.utils.tensorboard.SummaryWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alarca94/senti-transfer
[ "da83a072c8d471bc74aa25b237b5e301502db869" ]
[ "utils/inout.py" ]
[ "import os\nimport yaml\n\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\nfrom types import SimpleNamespace\nfrom sklearn.model_selection import train_test_split\n\nfrom utils.experiment_utils import create_linspace\nfrom utils.preprocess import *\n\n\nSOURCE_PATH = './source_data'\nDATA_PATH = './data'\nCONFIG_PATH = './conf'\nDATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis']\n\n\nclass Colors:\n BLACK = '\\033[1;30m'\n RED = '\\033[1;31m'\n GREEN = '\\033[1;32m'\n YELLOW = '\\033[1;33m'\n BLUE = '\\033[1;34m'\n PURPLE = '\\033[1;35m'\n CYAN = '\\033[1;36m'\n WHITE = '\\033[1;37m'\n ENDC = '\\033[0m'\n\n\ndef colored(text, color):\n return f'{color}{text}{Colors.ENDC}'\n\n\ndef write_split_files(dataset, trn, dev, tst):\n trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\\t', mode='w')\n dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\\t', mode='w')\n tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\\t', mode='w')\n\n\ndef prepare_files():\n seed = 100\n test_ratio = 0.2\n\n # EmoEvent and HaterNet\n filename = 'original_es.tsv'\n data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\\t'),\n 'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\\\|\\\\|;',\n names=['id', 'text', 'hateful'],\n header=None,\n engine=\"python\")}\n labels = {'emoevent': 'offensive',\n 'haternet': 'hateful'}\n\n for dataset in data:\n data[dataset].text = basic_text_normalization(data[dataset].text)\n y = data[dataset][labels[dataset]]\n trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)\n y = trn[labels[dataset]]\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n # HatEval 2019\n dataset = 'hateval2019'\n n_instances = {}\n\n for phase in ['train', 'dev', 'test']:\n data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',')\n data.text = basic_text_normalization(data.text)\n data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\\t', mode='w')\n n_instances[phase] = data.shape[0]\n\n print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '\n f'{n_instances[\"train\"]}, {n_instances[\"dev\"]}, {n_instances[\"test\"]}')\n\n # MEX-A3T\n dataset = 'mex-a3t'\n columns = ['text', 'aggressiveness']\n trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\\t', names=columns)\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\\t', names=columns)\n\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed)\n for subset in [trn, dev, tst]:\n subset.text = basic_text_normalization(subset.text)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n # TASS 2019\n dataset = 'tass2019'\n n_instances = {}\n for phase in ['train', 'dev', 'test']:\n phase_data = pd.DataFrame()\n for country in ['ES', 'CR', 'MX', 'PE', 'UY']:\n root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot()\n tweets = []\n for item in root.iter('tweet'):\n tweet = {'country': country}\n for tweet_field in item.iter():\n if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']:\n tweet[tweet_field.tag] = tweet_field.text\n tweets.append(tweet)\n phase_data = phase_data.append(tweets)\n new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'}\n phase_data.rename(columns=new_cols, inplace=True)\n phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']]\n phase_data.text = basic_text_normalization(phase_data.text)\n phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\\t', mode='w')\n n_instances[phase] = phase_data.shape[0]\n\n print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '\n f'{n_instances[\"train\"]}, {n_instances[\"dev\"]}, {n_instances[\"test\"]}')\n\n # Universal Joy\n dataset = 'universal_joy'\n trn_data = {}\n for filename in ['small', 'large', 'combi']:\n trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv'))\n trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es']\n trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning)\n\n # Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those\n trn = pd.concat(trn_data.values(), axis=0, ignore_index=True)\n trn.drop_duplicates(inplace=True, subset='text')\n\n # There is no overlapping between training, validation and test (also, they do not contain duplicates)\n dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv'))\n dev.drop_duplicates(inplace=True, subset='text')\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv'))\n tst.drop_duplicates(inplace=True, subset='text')\n # The test set approximately represents 12.5% of the total data\n # print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0]))\n\n # DETOXIS\n dataset = 'detoxis'\n\n trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',')\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',')\n\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed)\n for subset in [trn, dev, tst]:\n subset.rename(columns={'comment': 'text'}, inplace=True)\n subset.text = basic_text_normalization(subset.text)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n\ndef read_datasets(datasets, tasks, lang='es'):\n data = {}\n for dataset in datasets:\n if dataset not in DATASETS:\n raise Exception(f'Dataset {dataset} is not in the list of available datasets!')\n\n data[dataset] = {\n 'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\\t'),\n 'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\\t'),\n 'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\\t')\n }\n\n for phase in data[dataset]:\n data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]]\n\n return data\n\n\ndef create_namespace_from_dict(dic, name=None):\n for k, v in dic.items():\n if isinstance(v, dict):\n dic[k] = create_namespace_from_dict(v, k)\n ns = SimpleNamespace(**dic)\n ns.__name__ = name\n return ns\n\n\ndef process_config(dic, name=None):\n for k, v in dic.items():\n if k not in ['transfer_learning', 'optimization']:\n if isinstance(v, dict):\n dic[k] = process_config(v, k)\n elif isinstance(v, list):\n for vi in v:\n if isinstance(vi, dict):\n dic[k] += create_linspace(vi)\n dic[k] = dic[k][1:]\n else:\n dic[k] = [v]\n return dic\n\n\ndef load_config(config_file):\n with open(os.path.join(CONFIG_PATH, config_file), 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return process_config(config) # create_namespace_from_dict(config)\n\n\ndef log(string, indent=0):\n start = '\\t' * indent\n print(f'{start}{string}')\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
cafltar/CAF_EC_Column_Rename
[ "7375678081d8931f34e7ab8b4a6e02eca112e721" ]
[ "LTAR_Flux_QC.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 15:53:46 2018\n\n@author: Eric S. Russell\nLaboratory for Atmospheric Research\nDept. of Civil and Environmental Engineering\nWashington State University\[email protected]\n\nNot all of these functions are used in the column rename script; these are potentially to be used with this processing \ndepending on other's thoughts. This is a trial run of dealing with code across sites.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime\n\n\"\"\" \nQA/QC processing for flux data:\n Inputs:\n data: Full input data\n grade: Maximum QA/QC grade as assigned by the flux calculation code\n LE_B: Two number array with the highest (LE_B[1]) and lowest (LE_B[0]) hard limit LE value\n H_B: Same as LE_B but for H\n F_B: Same as LE-B but for Fc\n cls:\n gg:\n Outputs:\n data: Dataframe with the filtered data; does not track reason for removing data.\n \n Conditional for door_is_open_Hst since not all sites will/do have enclosure door sensors installed\n\"\"\" \n# This function not implemented into the script; still thinking about how I want to format this and integrate so user doesn't have to do a lot to make work\n\ndef Grade_cs(df,info, Site, site=False):\n if site == True: \n grade = int(info['grade'][Site])\n LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])]\n H_B = [float(info['HL'][Site]),float(info['HU'][Site])]\n F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])]\n T_B = [float(info['TL'][Site]),float(info['TU'][Site])]\n elif site == False:\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])]\n gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST']\n cls =['H','LE','FC', 'TAU']\n# var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC\n pd.options.mode.chained_assignment = None \n if (grade >9) | (grade<1):\n print('Grade number must be between 0-9.')\n return # 'exit' function and return error \n Good = None\n data = []; data=pd.DataFrame(data,index=df.index)\n if cls[1] in df.columns:\n HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull()\n if gg[1] in df.columns:\n Grade = (df[gg[1]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[1]][~Grade] = np.NaN\n data[cls[1]+'_Flag'] = 0\n data[cls[1]+'_Flag'][~Grade] = 1\n if cls[0] in df.columns:\n HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull()\n if gg[0] in df.columns:\n Grade = (df[gg[0]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[0]][~Grade] = np.NaN\n data[cls[0]+'_Flag'] = 0\n data[cls[0]+'_Flag'][~Grade] = 1\n if cls[2] in df.columns:\n HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull()\n if gg[2] in df.columns:\n Grade = (df[gg[2]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[2]][~Grade] = np.NaN\n data[cls[2]+'_Flag'] = 0\n data[cls[2]+'_Flag'][~Grade] = 1\n if cls[3] in df.columns:\n HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull()\n if gg[3] in df.columns:\n Grade = (df[gg[3]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n data[cls[3]+'_Flag'] = 0\n data[cls[3]+'_Flag'][~Grade] = 1\n # Rain Mask\n if 'P' in df.columns:\n Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999)\n precip = True\n data['P_Flag'] = 0\n data['P_Flag'][~Precip] = 1\n else: precip = False \n if 'CO2_sig_strgth_Min' in df.columns:\n c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7\n data['CO2_Signal_Strength'] = 0\n data['CO2_Signal_Strength'][~c_sig_strength] = 1\n if 'H2O_sig_strgth_Min' in df.columns:\n w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7\n data['H2O_Signal_Strength'] = 0\n data['H2O_Signal_Strength'][~w_sig_strength] = 1\n if 'CO2_samples_Tot' in df.columns:\n Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400\n data['CO2_Samples_Flag'] = 0\n data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1\n irga = True\n else: irga=False\n if 'sonic_samples_Tot' in df.columns:\n Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400\n data['Sonic_Samples_Flag'] = 0\n data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1\n sonic = True\n else: sonic=False\n if 'used_records' in df.columns: \n Samp_Good_Sonic = df['used_records'].astype(float)>14400\n sonic = True\n else: sonic=False\n if 'door_is_open_Hst' in df.columns:\n Door_Closed = df['door_is_open_Hst'].astype(float) == 0\n pc = True\n else: pc = False\n if precip&irga&sonic&pc:\n Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&irga&sonic&~pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&~sonic&~pc:\n Good = Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength\n elif ~precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength\n elif ~precip&irga&sonic&pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength\n if Good is not None:\n if cls[3] in df.columns:\n df[cls[3]][~Good] = np.NaN\n if cls[2] in df.columns:\n df[cls[2]][~Good] = np.NaN\n if cls[1] in df.columns:\n df[cls[1]][~Good] = np.NaN\n if cls[0] in df.columns:\n df[cls[0]][~Good] = np.NaN\n return df, data\n\n\n#Fills in the blanks spaces with NaN's so the time index is continuous\ndef indx_fill(df, time): \n df.index = pd.to_datetime(df.index)\n # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme\n df = df.sort_index()\n # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints\n df = df[~df.index.duplicated(keep='first')]\n for k in range (0,len(df)):\n if str(df.index[k])=='NaT':\n df = df.drop(df.index[k])\n # Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day\n idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time)\n df = df.reindex(idx, fill_value=np.NaN)\n return df\n\n# Used to format EddyPro data by combining the date and time into a common index and dropping the filename column\ndef format_ep(df):\n df.index = df['date']+' '+df['time']\n df = df.drop(['filename'],1)\n df.index = pd.to_datetime(df.index)\n return df\n\n# This function not used in main script; potential to be used with QC function\ndef ReadIn_Initial(info):\n # Values pulled in from a separate *.csv file because easier and flexible\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])]\n cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])]\n return grade, LE_B,H_B,F_B,gg,cls\n\n# Reads in a directory of files based on the format for either EddyPro or EasyFlux\ndef Fast_Read(filenames, time, form):\n if len(filenames) == 0:\n print('No Files in directory, check the path name.')\n return # 'exit' function and return error\n else:\n #Initialize dataframe used within function\n Final = [];Final = pd.DataFrame(Final)\n if form == 'EF':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False)\n Final = pd.concat([Final,df], sort = False)\n elif form == 'EP':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n Final =Final.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point\n elif form == 'Biomet':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n else: \n print('Format must be either EF or EP')\n return\n # Convert time index\n Final = Final.sort_index()\n Out = indx_fill(Final, time)\n return Out # Return dataframe to main function. \n\ndef Despike_7(s,ss,x,lab,delta_time, multi):\n an,Tim = [],[]\n while ss < x.index[-1]:\n x_m = np.nanmean(x[ss:s])\n x_s = np.nanstd(x[ss:s])\n x_d = x[ss:s]\n an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s))))\n ss+= datetime.timedelta(days=delta_time)\n Tim.append((x_d.index))\n s+= datetime.timedelta(days=delta_time)\n qq = np.hstack(an)\n an = pd.DataFrame(qq, columns = [lab])\n an.index = np.hstack(Tim)\n an = an[~an.index.duplicated(keep='first')]\n# x[an[lab]==False] = np.NaN\n return an\n\ndef Met_QAQC(**kwargs):\n Q = None\n if 'Tair' in kwargs.keys():\n Tair = pd.DataFrame(kwargs['Tair'])\n Q = Tair; Q = pd.DataFrame(Q); \n Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40)\n Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # (~np.isnan(Q[Tair.columns[0]].diff())) & \n Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0)\n Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']]\n else:\n print('**** Temperature not present ****')\n \n if 'RH' in kwargs.keys():\n RH = pd.DataFrame(kwargs['RH']) \n if Q is None:\n Q = RH; Q = pd.DataFrame(Q)\n else: Q= Q.join(RH)\n Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0)\n Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110)\n Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) # & (~np.isnan(Q[RH.columns[0]].astype(float).diff()))\n Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) \n Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']]\n Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100)\n# Q['RH_Filtered'][Q['RH_gt_100']]=100\n else:\n print('**** RH not present ****')\n\n if 'P' in kwargs.keys():\n P = pd.DataFrame(kwargs['P']); \n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(P) \n Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70)\n Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) # & (~np.isnan(Q[P.columns[0]].diff())) \n Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']]\n if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()):\n MSLP = []; \n H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height\n x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); \n MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure\n MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:\"MSLP\"})\n Q= Q.join(MSLP)\n Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80)\n Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff())) \n Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']]\n else:\n print('**** Mean sea level pressure not present ****')\n else:\n print('**** Pressure not present ****')\n \n if 'WS' in kwargs.keys():\n WS = pd.DataFrame(kwargs['WS'])\n if Q is None:\n Q = WS; Q = pd.DataFrame(Q)\n else: Q= Q.join(WS)\n Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0)\n Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff())) \n Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) \n Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']]\n else:\n print('**** Wind Speed not present ****')\n \n if 'WD' in kwargs.keys():\n WD = pd.DataFrame(kwargs['WD'])\n if Q is None:\n Q = WD; Q = pd.DataFrame(Q)\n else: Q= Q.join(WD)\n Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0)\n Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) &\n Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']]\n else:\n print('**** Wind Direction not present ****')\n \n if 'PAR' in kwargs.keys():\n PAR = pd.DataFrame(kwargs['PAR']); \n if Q is None:\n Q = PAR; Q = pd.DataFrame(Q)\n else: Q= Q.join(PAR)\n Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000)\n Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff()))\n Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason\n Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']]\n else:\n print('**** PAR not present ****')\n \n if 'Rn' in kwargs.keys():\n Rn = pd.DataFrame(kwargs['Rn']) \n if Q is None:\n Q = Rn; Q = pd.DataFrame(Q)\n else: Q= Q.join(Rn)\n Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) \n Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff())) \n Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) \n Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']]\n else:\n print('**** Net Radiations not present ****')\n \n if 'Precip' in kwargs.keys():\n Precip = pd.DataFrame(kwargs['Precip'])\n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(Precip)\n Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0)\n Z_Precip = Q[Precip.columns[0]].astype(float) ==0\n# if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n# elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n if 'Tair' in kwargs.keys():\n Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n print('**** Precipitation not present ****')\n \n if 'VPD' in kwargs.keys():\n VPD = pd.DataFrame(kwargs['VPD'])\n if Q is None:\n Q = VPD; Q = pd.DataFrame(Q)\n else: Q= Q.join(VPD)\n Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0)\n Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) \n Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) \n Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']]\n\n if 'e' in kwargs.keys():\n e = pd.DataFrame(kwargs['e'])\n if Q is None:\n Q = e; Q = pd.DataFrame(Q)\n else: Q= Q.join(e)\n Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0)\n Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) \n Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) \n Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']]\n \n if 'e_s' in kwargs.keys():\n e_s = pd.DataFrame(kwargs['e_s'])\n if Q is None:\n Q = e_s; Q = pd.DataFrame(Q)\n else: Q= Q.join(e_s)\n Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0)\n Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) \n Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) \n Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] \n return Q\n " ]
[ [ "numpy.hstack", "pandas.concat", "pandas.to_datetime", "pandas.read_csv", "pandas.DataFrame", "numpy.nanmean", "numpy.nanstd", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
n1ckfg/RoutedFusion
[ "1733911c7fe025b461b75e48461658709996e39c" ]
[ "voxelgrid/tsdf/run_tsdf_fusion.py" ]
[ "#!/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin/python\nimport os\napp_path = '/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin'\nos.environ[\"PATH\"] = app_path + os.pathsep + os.environ[\"PATH\"]\n\nfrom TSDFHandle import *\nimport numpy as np\nimport cv2\nfrom utils import extract_mesh_marching_cubes\nfrom visualization import plot_mesh\nimport plyfile\nfrom sys import argv\nimport pathlib\n\nif (len(argv) < 3):\n\tprint('Usage: {0} <name of depth directory> <save mode>'.format(argv[0]))\n\texit(0)\n\n\nCURRENT_DIR = str(pathlib.Path().absolute())\ndepth_path = CURRENT_DIR + '/' + argv[1]\ncampose_path = CURRENT_DIR + '/' + 'left_camera_matrix'\n\n\nbox = np.array([[-4,4],[-4,4],[-4,4]]) # each cell depicts the interval where we will reconstruct the shape i.e.\n# [[-xmin,xmax],[-ymin,ymax],[-zmin,zmax]]\ntsdf = TSDF(bbox=box, resolution=0.025, resolution_factor=1)\n\ndepth_dir = os.listdir(depth_path)\nsortOrder_depth = [int(x[:-4]) for x in depth_dir]\ndepth_dir = [x for _, x in sorted(zip(sortOrder_depth, depth_dir))]\n\ncampose_dir = os.listdir(campose_path)\nsortOrder_pose = [int(x[:-4]) for x in campose_dir]\ncampose_dir = [x for _, x in sorted(zip(sortOrder_pose, campose_dir))]\n\ncamera_intrinsics = np.array([[256, 0, 256], [0, 256, 256], [0, 0, 1]]).astype(np.float32)\n# apparently, the tsdf fusion code expects that the camera coordinate system is such that z is in the\n# camera viewing direction, y is down and x is to the right. This is achieved by a serie of rotations\nrot_180_around_y = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]).astype(np.float32)\nrot_180_around_z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).astype(np.float32)\nrotation = np.matmul(rot_180_around_z, rot_180_around_y)\n\nfor i in range(len(depth_dir)):\n\tdepth = cv2.imread(depth_path + '/' + depth_dir[i], -1)\n\tdepth = depth / 1000\n\tweight_map = np.ones(depth.shape)\n\tcampose = np.linalg.inv(np.loadtxt(campose_path + '/' + campose_dir[i]).astype(np.float32))\n\tcampose = np.matmul(camera_intrinsics, np.matmul(rotation,campose[0:3, 0:4]))\n\ttsdf.fuse(campose, depth.astype(np.float32), weight_map.astype(np.float32))\n\n\nmesh = extract_mesh_marching_cubes(tsdf.get_volume()[:, :, :, 0])\nif argv[2]:\n\tmesh.write('tsdf_fusion_' + argv[1] + '.ply')\nplot_mesh(mesh)\n" ]
[ [ "numpy.array", "numpy.matmul", "numpy.loadtxt", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiangze/edward
[ "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa", "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa", "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa" ]
[ "tests/util/test_get_descendants.py", "tests/models/test_param_mixture_stats.py", "docs/parser/public_api.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom edward.models import Bernoulli, Normal\nfrom edward.util import get_descendants\n\n\nclass test_get_descendants_class(tf.test.TestCase):\n\n def test_v_structure(self):\n \"\"\"a -> b -> e <- d <- c\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(0.0, 1.0)\n d = Normal(c, 1.0)\n e = Normal(b * d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, e]))\n self.assertEqual(get_descendants(b), [e])\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_a_structure(self):\n \"\"\"e <- d <- a -> b -> c\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(b, 1.0)\n d = Normal(a, 1.0)\n e = Normal(d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(get_descendants(b), [c])\n self.assertEqual(get_descendants(c), [])\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_chain_structure(self):\n \"\"\"a -> b -> c -> d -> e\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(b, 1.0)\n d = Normal(c, 1.0)\n e = Normal(d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(set(get_descendants(b)), set([c, d, e]))\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_tensor(self):\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = tf.constant(2.0)\n c = a + b\n d = Normal(c, 1.0)\n self.assertEqual(get_descendants(a), [d])\n self.assertEqual(get_descendants(b), [d])\n self.assertEqual(get_descendants(c), [d])\n self.assertEqual(get_descendants(d), [])\n\n def test_control_flow(self):\n with self.test_session():\n a = Bernoulli(0.5)\n b = Normal(0.0, 1.0)\n c = tf.constant(0.0)\n d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)\n e = Normal(d, 1.0)\n self.assertEqual(get_descendants(a), [e])\n self.assertEqual(get_descendants(b), [e])\n self.assertEqual(get_descendants(c), [e])\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_scan(self):\n \"\"\"copied from test_chain_structure\"\"\"\n def cumsum(x):\n return tf.scan(lambda a, x: a + x, x)\n\n with self.test_session():\n a = Normal(tf.ones([3]), tf.ones([3]))\n b = Normal(cumsum(a), tf.ones([3]))\n c = Normal(cumsum(b), tf.ones([3]))\n d = Normal(cumsum(c), tf.ones([3]))\n e = Normal(cumsum(d), tf.ones([3]))\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(set(get_descendants(b)), set([c, d, e]))\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\nif __name__ == '__main__':\n tf.test.main()\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport numpy as np\nimport tensorflow as tf\n\nfrom edward.models import Beta, Normal, ParamMixture\n\n\ndef _make_histograms(values, hists, hist_centers, x_axis, n_bins):\n if len(values.shape) > 1:\n for i in range(values.shape[1]):\n _make_histograms(values[:, i], hists[:, i], hist_centers[:, i],\n x_axis[:, i], n_bins)\n else:\n hist, hist_bins = np.histogram(values, bins=n_bins)\n bin_width = hist_bins[1] - hist_bins[0]\n hists[:] = hist / float(hist.sum())\n hist_centers[:] = 0.5 * (hist_bins[1:] + hist_bins[:-1])\n x_axis[:n_bins] = hist_centers\n\n\nclass test_param_mixture_class(tf.test.TestCase):\n\n def _test(self, probs, params, dist):\n g = tf.Graph()\n with g.as_default():\n tf.set_random_seed(10003)\n\n N = 50000\n\n x = ParamMixture(probs, params, dist, sample_shape=N)\n cat = x.cat\n components = x.components\n\n marginal_logp = x.marginal_log_prob(x)\n cond_logp = x.log_prob(x)\n\n comp_means = components.mean()\n comp_stddevs = components.stddev()\n marginal_mean = x.mean()\n marginal_stddev = x.stddev()\n marginal_var = x.variance()\n\n sess = self.test_session(graph=g)\n with self.test_session(graph=g) as sess:\n to_eval = [x, cat, components, comp_means, comp_stddevs, marginal_mean,\n marginal_stddev, marginal_var, marginal_logp, cond_logp]\n vals = sess.run(to_eval)\n vals = {k: v for k, v in zip(to_eval, vals)}\n\n # Test that marginal statistics are reasonable\n self.assertAllClose(vals[x].mean(0), vals[marginal_mean],\n rtol=0.01, atol=0.01)\n self.assertAllClose(vals[x].std(0), vals[marginal_stddev],\n rtol=0.01, atol=0.01)\n self.assertAllClose(vals[x].var(0), vals[marginal_var],\n rtol=0.01, atol=0.01)\n\n # Test that per-component statistics are reasonable\n for k in range(x.num_components):\n selector = (vals[cat] == k)\n self.assertAllClose(selector.mean(), probs[k], rtol=0.01, atol=0.01)\n x_k = vals[x][selector]\n self.assertAllClose(x_k.mean(0), vals[comp_means][k],\n rtol=0.05, atol=0.05)\n self.assertAllClose(x_k.std(0), vals[comp_stddevs][k],\n rtol=0.05, atol=0.05)\n\n n_bins = 100\n x_hists = np.zeros((n_bins,) + vals[x].shape[1:])\n hist_centers = np.zeros_like(x_hists)\n x_axis = np.zeros((N,) + vals[x].shape[1:])\n _make_histograms(vals[x], x_hists, hist_centers, x_axis, n_bins)\n\n x_marginal_val = sess.run(marginal_logp, {x: x_axis,\n components: vals[components]})\n # Test that histograms match marginal log prob\n x_pseudo_hist = np.exp(x_marginal_val[:n_bins])\n self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,\n rtol=0.1, atol=0.1)\n x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)\n self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)\n\n # Test that histograms match conditional log prob\n for k in range(probs.shape[-1]):\n k_cat = k + np.zeros(x_axis.shape, np.int32)\n x_vals_k = sess.run(x, {cat: k_cat, components: vals[components]})\n _make_histograms(x_vals_k, x_hists, hist_centers, x_axis, n_bins)\n x_cond_logp_val_k = sess.run(cond_logp, {x: x_axis, cat: k_cat,\n components: vals[components]})\n x_pseudo_hist = np.exp(x_cond_logp_val_k[:n_bins])\n self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,\n rtol=0.1, atol=0.1)\n x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)\n self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)\n\n def test_normal(self):\n \"\"\"Mixture of 3 normal distributions.\"\"\"\n probs = np.array([0.2, 0.3, 0.5], np.float32)\n loc = np.array([1.0, 5.0, 7.0], np.float32)\n scale = np.array([1.5, 1.5, 1.5], np.float32)\n\n self._test(probs, {'loc': loc, 'scale': scale}, Normal)\n\n def test_beta(self):\n \"\"\"Mixture of 3 beta distributions.\"\"\"\n probs = np.array([0.2, 0.3, 0.5], np.float32)\n conc1 = np.array([2.0, 1.0, 0.5], np.float32)\n conc0 = conc1 + 2.0\n\n self._test(probs, {'concentration1': conc1, 'concentration0': conc0},\n Beta)\n\n def test_batch_beta(self):\n \"\"\"Two mixtures of 3 beta distributions.\"\"\"\n probs = np.array([[0.2, 0.3, 0.5], [0.2, 0.3, 0.5]], np.float32)\n conc1 = np.array([[2.0, 0.5], [1.0, 1.0], [0.5, 2.0]], np.float32)\n conc0 = conc1 + 2.0\n\n # self._test(probs, {'concentration1': conc1, 'concentration0': conc0},\n # Beta)\n self.assertRaises(NotImplementedError,\n self._test, probs,\n {'concentration1': conc1, 'concentration0': conc0},\n Beta)\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Visitor restricting traversal to only the public tensorflow API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom tensorflow.python.util import tf_inspect\n\n\nclass PublicAPIVisitor(object):\n \"\"\"Visitor to use with `traverse` to visit exactly the public TF API.\"\"\"\n\n def __init__(self, visitor):\n \"\"\"Constructor.\n\n `visitor` should be a callable suitable as a visitor for `traverse`. It will\n be called only for members of the public TensorFlow API.\n\n Args:\n visitor: A visitor to call for the public API.\n \"\"\"\n self._visitor = visitor\n self._root_name = 'tf'\n\n # Modules/classes we want to suppress entirely.\n self._private_map = {\n # Some implementations have this internal module that we shouldn't\n # expose.\n 'tf.flags': ['cpp_flags'],\n }\n\n # Modules/classes we do not want to descend into if we hit them. Usually,\n # system modules exposed through platforms for compatibility reasons.\n # Each entry maps a module path to a name to ignore in traversal.\n self._do_not_descend_map = {\n 'tf': [\n 'core',\n 'examples',\n 'flags', # Don't add flags\n # TODO(drpng): This can be removed once sealed off.\n 'platform',\n # TODO(drpng): This can be removed once sealed.\n 'pywrap_tensorflow',\n # TODO(drpng): This can be removed once sealed.\n 'user_ops',\n 'python',\n 'tools',\n 'tensorboard',\n ],\n\n # Everything below here is legitimate.\n # It'll stay, but it's not officially part of the API.\n 'tf.app': ['flags'],\n # Imported for compatibility between py2/3.\n 'tf.test': ['mock'],\n }\n\n @property\n def private_map(self):\n \"\"\"A map from parents to symbols that should not be included at all.\n\n This map can be edited, but it should not be edited once traversal has\n begun.\n\n Returns:\n The map marking symbols to not include.\n \"\"\"\n return self._private_map\n\n @property\n def do_not_descend_map(self):\n \"\"\"A map from parents to symbols that should not be descended into.\n\n This map can be edited, but it should not be edited once traversal has\n begun.\n\n Returns:\n The map marking symbols to not explore.\n \"\"\"\n return self._do_not_descend_map\n\n def set_root_name(self, root_name):\n \"\"\"Override the default root name of 'tf'.\"\"\"\n self._root_name = root_name\n\n def _is_private(self, path, name):\n \"\"\"Return whether a name is private.\"\"\"\n # TODO(wicke): Find out what names to exclude.\n return ((path in self._private_map and\n name in self._private_map[path]) or\n (name.startswith('_') and not re.match('__.*__$', name) or\n name in ['__base__', '__class__']))\n\n def _do_not_descend(self, path, name):\n \"\"\"Safely queries if a specific fully qualified name should be excluded.\"\"\"\n return (path in self._do_not_descend_map and\n name in self._do_not_descend_map[path])\n\n def __call__(self, path, parent, children):\n \"\"\"Visitor interface, see `traverse` for details.\"\"\"\n\n # Avoid long waits in cases of pretty unambiguous failure.\n if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:\n raise RuntimeError('Modules nested too deep:\\n%s.%s\\n\\nThis is likely a '\n 'problem with an accidental public import.' %\n (self._root_name, path))\n\n # Includes self._root_name\n full_path = '.'.join([self._root_name, path]) if path else self._root_name\n\n # Remove things that are not visible.\n for name, child in list(children):\n if self._is_private(full_path, name):\n children.remove((name, child))\n\n self._visitor(path, parent, children)\n\n # Remove things that are visible, but which should not be descended into.\n for name, child in list(children):\n if self._do_not_descend(full_path, name):\n children.remove((name, child))\n" ]
[ [ "tensorflow.scan", "tensorflow.constant", "tensorflow.cast", "tensorflow.test.main", "tensorflow.ones" ], [ "tensorflow.Graph", "tensorflow.test.main", "numpy.exp", "numpy.zeros_like", "tensorflow.set_random_seed", "numpy.array", "numpy.histogram", "numpy.zeros" ], [ "tensorflow.python.util.tf_inspect.ismodule" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "1.2", "2.10" ] } ]
dongfangyixi/ParlAI
[ "424a2b3c7086593f699c76612dffd1d925986177" ]
[ "parlai/agents/transformer/mixer.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTransformer Agents.\n\"\"\"\nfrom typing import Optional\nfrom parlai.core.params import ParlaiParser\nfrom parlai.core.opt import Opt\nfrom parlai.core.agents import Agent\nfrom parlai.utils.torch import padded_3d\nfrom parlai.core.torch_classifier_agent import TorchClassifierAgent\nfrom parlai.core.torch_ranker_agent import TorchRankerAgent\nfrom parlai.core.torch_generator_agent import TorchGeneratorAgent\nfrom parlai.utils.misc import recursive_getattr\nfrom parlai.utils.logging import logging\n\nfrom .modules import (\n TransformerMemNetModel,\n TransformerGeneratorModel,\n TransformerLinearWrapper,\n MixerModel,\n MixerGeneratorModel,\n)\n\nimport torch\n\n\ndef add_common_cmdline_args(parser):\n \"\"\"\n Add common command line args.\n \"\"\"\n parser.add_argument(\n '-esz',\n '--embedding-size',\n type=int,\n default=300,\n help='Size of all embedding layers. Must be a multiple of --n-heads.',\n )\n parser.add_argument(\n '-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.'\n )\n parser.add_argument(\n '-hid',\n '--ffn-size',\n type=int,\n default=300,\n help='Hidden size of the FFN layers',\n )\n parser.add_argument(\n '--dropout',\n type=float,\n default=0.0,\n help='Dropout used around embeddings and before layer layer normalizations. '\n 'This is used in Vaswani 2017 and works well on large datasets.',\n )\n parser.add_argument(\n '--attention-dropout',\n type=float,\n default=0.0,\n help='Dropout used after attention softmax. This is not used in Vaswani 2017.',\n )\n parser.add_argument(\n '--relu-dropout',\n type=float,\n default=0.0,\n help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, '\n 'but used in Tensor2Tensor.',\n )\n parser.add_argument(\n '--n-heads', type=int, default=2, help='Number of multihead attention heads'\n )\n parser.add_argument(\n '--learn-positional-embeddings',\n type='bool',\n default=False,\n help='If off, sinusoidal embeddings are used. If on, position embeddings are '\n 'learned from scratch.',\n )\n parser.add_argument('--embeddings-scale', type='bool', default=True)\n parser.add_argument(\n '--n-positions',\n type=int,\n default=None,\n hidden=True,\n help='Number of positional embeddings to learn. Defaults '\n 'to truncate or 1024 if not provided.',\n )\n parser.add_argument(\n '--n-segments',\n type=int,\n default=0,\n help='The number of segments that support the model. '\n 'If zero no segment and no langs_embedding.',\n )\n parser.add_argument(\n '--variant',\n choices={'aiayn', 'xlm', 'prelayernorm', 'bart'},\n default='aiayn',\n help='Chooses locations of layer norms, etc. prelayernorm '\n 'is used to match some fairseq models',\n recommended='xlm',\n )\n parser.add_argument(\n '--activation',\n choices={'relu', 'gelu'},\n default='relu',\n help='Nonlinear activation to use. AIAYN uses relu, but '\n 'more recent papers prefer gelu.',\n recommended='gelu',\n )\n parser.add_argument(\n '--output-scaling',\n type=float,\n default=1.0,\n help='scale the output of every transformer by this quantity.',\n )\n parser.add_argument(\n '--share-word-embeddings',\n type='bool',\n default=True,\n help='Share word embeddings table for candidate and context'\n 'in the memory network',\n )\n parser.add_argument(\n '-nel',\n '--n-encoder-layers',\n type=int,\n default=-1,\n help='This will overide the n-layers for asymmetrical transformers',\n )\n parser.add_argument(\n '-ndl',\n '--n-decoder-layers',\n type=int,\n default=-1,\n help='This will overide the n-layers for asymmetrical transformers',\n )\n parser.add_argument(\n '--model-parallel',\n type='bool',\n default=False,\n help='Shard the layers across multiple GPUs.',\n )\n\n\nclass Transformer(Agent):\n \"\"\"\n Placeholder Transformer Agent.\n\n Placeholder class, which just throws an error telling the user to specify whether\n they want the ranker or the generator.\n \"\"\"\n\n def __init__(self, opt, shared=None):\n raise RuntimeError(\n \"`--model transformer` is not a valid choice. Please select either \"\n \"`--model transformer/ranker` or `--model transformer/generator\"\n )\n\n\nclass TransformerRankerAgent(TorchRankerAgent):\n \"\"\"\n Transformer Ranker Agent.\n\n Implementation of a TorchRankerAgent, where the model is a Transformer\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n \"\"\"\n Add command-line arguments specifically for this agent.\n \"\"\"\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n agent = parser.add_argument_group('Transformer Arguments')\n add_common_cmdline_args(agent)\n # memory and knowledge arguments\n agent.add_argument(\n '--use-memories',\n type='bool',\n default=False,\n help='use memories: must implement the function '\n '`_vectorize_memories` to use this',\n )\n agent.add_argument(\n '--wrap-memory-encoder',\n type='bool',\n default=False,\n help='wrap memory encoder with MLP',\n )\n agent.add_argument(\n '--memory-attention',\n type=str,\n default='sqrt',\n choices=['cosine', 'dot', 'sqrt'],\n help='similarity for basic attention mechanism '\n 'when using transformer to encode memories',\n )\n # model specific arguments\n agent.add_argument('--normalize-sent-emb', type='bool', default=False)\n agent.add_argument('--share-encoders', type='bool', default=True)\n parser.add_argument(\n '--share-word-embeddings',\n type='bool',\n default=True,\n help='Share word embeddings table for candidate and context'\n 'in the memory network',\n )\n agent.add_argument(\n '--learn-embeddings', type='bool', default=True, help='learn embeddings'\n )\n agent.add_argument(\n '--data-parallel',\n type='bool',\n default=False,\n help='use model in data parallel, requires ' 'multiple gpus',\n )\n agent.add_argument(\n '--reduction-type',\n type=str,\n default='mean',\n choices=['first', 'max', 'mean'],\n help='Type of reduction at the end of transformer',\n )\n\n parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024)\n cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)\n\n return agent\n\n def _score(self, output, cands):\n if cands.dim() == 2:\n return torch.matmul(output, cands.t())\n elif cands.dim() == 3:\n return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1)\n else:\n raise RuntimeError(\n 'Unexpected candidate dimensions {}' ''.format(cands.dim())\n )\n\n def build_model(self, states=None):\n \"\"\"\n Build and return model.\n \"\"\"\n model = MixerModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type'])\n return model\n\n def batchify(self, obs_batch, sort=False):\n \"\"\"\n Override so that we can add memories to the Batch object.\n \"\"\"\n batch = super().batchify(obs_batch, sort)\n if self.opt['use_memories']:\n valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)]\n valid_inds, exs = zip(*valid_obs)\n mems = None\n if any('memory_vecs' in ex for ex in exs):\n mems = [ex.get('memory_vecs', None) for ex in exs]\n batch.memory_vecs = mems\n return batch\n\n def _vectorize_memories(self, obs):\n # TODO: move this to Torch Ranker Agent\n raise NotImplementedError(\n 'Abstract class: user must implement this function to use memories'\n )\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Override to include vectorization of memories.\n \"\"\"\n kwargs['add_start'] = False\n kwargs['add_end'] = False\n obs = super().vectorize(*args, **kwargs)\n if self.opt['use_memories']:\n obs = self._vectorize_memories(obs)\n return obs\n\n def encode_candidates(self, padded_cands):\n \"\"\"\n Encode candidates.\n \"\"\"\n _, cands = self.model(xs=None, mems=None, cands=padded_cands)\n\n return cands\n\n def score_candidates(self, batch, cand_vecs, cand_encs=None):\n \"\"\"\n Score candidates.\n \"\"\"\n # convoluted check that not all memories are empty\n if (\n self.opt['use_memories']\n and batch.memory_vecs is not None\n and sum(len(m) for m in batch.memory_vecs)\n ):\n mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX)\n else:\n mems = None\n\n if cand_encs is not None:\n # we pre-encoded the candidates, do not re-encode here\n cand_vecs = None\n\n context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs)\n\n if cand_encs is not None:\n cands_h = cand_encs\n scores = self._score(context_h, cands_h)\n\n return scores\n\n\nclass TransformerGeneratorAgent(TorchGeneratorAgent):\n \"\"\"\n TransformerGeneratorAgent.\n\n Implementation of TorchGeneratorAgent, where the model is a Transformer\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n \"\"\"\n Add command-line arguments specifically for this agent.\n \"\"\"\n agent = parser.add_argument_group('Transformer Arguments')\n add_common_cmdline_args(agent)\n cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)\n\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n return agent\n\n def build_model(self, states=None):\n \"\"\"\n Build and return model.\n \"\"\"\n model = MixerGeneratorModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(\n model.encoder.embeddings.weight, self.opt['embedding_type']\n )\n return model\n\n def _resize_token_embeddings(self, state_dict, msg=None):\n \"\"\"\n Resize the token embeddings when are adding extra special tokens.\n \"\"\"\n # map extra special tokens carefully\n new_size = self.model.embeddings.weight.size()[0]\n orig_size = state_dict['embeddings.weight'].size()[0]\n logging.info(f'Resizing token embeddings from {orig_size} to {new_size}')\n if new_size <= orig_size:\n # new size should be greater than original size,\n # as we are adding special tokens\n raise RuntimeError(msg)\n\n for emb_weights in [\n 'embeddings.weight',\n 'encoder.embeddings.weight',\n 'decoder.embeddings.weight',\n ]:\n # get new_embs\n old_embs = state_dict[emb_weights]\n new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device)\n # copy over old weights\n new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :]\n # reset in state dict\n state_dict[emb_weights] = new_embs\n\n return state_dict\n\n\nclass TransformerClassifierAgent(TorchClassifierAgent):\n \"\"\"\n Classifier based on Transformer.\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n TransformerRankerAgent.add_cmdline_args(\n parser, partial_opt=partial_opt\n ) # add transformer args\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n parser.add_argument(\n '--load-from-pretrained-ranker',\n type='bool',\n default=False,\n help='load model from base transformer ranking model '\n '(used for pretraining)',\n )\n parser.set_defaults(reduction_type='first')\n return parser\n\n def build_model(self):\n num_classes = len(self.class_list)\n self.base_model = MixerModel(self.opt, self.dict)\n return TransformerLinearWrapper(self.base_model.context_encoder, num_classes)\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the text.\n \"\"\"\n kwargs['add_start'] = True\n kwargs['add_end'] = True\n obs = super().vectorize(*args, **kwargs)\n return obs\n\n def _set_text_vec(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the text.\n \"\"\"\n obs = super()._set_text_vec(*args, **kwargs)\n\n if 'text_vec' in obs and 'added_start_end' not in obs:\n obs.force_set(\n 'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)\n )\n obs['added_start_end'] = True\n\n # check truncation after adding start end tokens\n if obs.get('text_vec') is not None:\n truncated_vec = self._check_truncate(\n obs['text_vec'], self.text_truncate, True\n )\n obs.force_set('text_vec', torch.LongTensor(truncated_vec))\n\n return obs\n\n def score(self, batch):\n return self.model(batch.text_vec)\n\n def load_state_dict(self, state_dict):\n \"\"\"\n Load the state dict into model.\n\n This is easily overridable to facilitate transfer of state dicts.\n \"\"\"\n if self.is_finetune and self.opt['load_from_pretrained_ranker']:\n self.base_model.load_state_dict(state_dict, strict=False)\n else:\n self.model.load_state_dict(state_dict)\n\n\n" ]
[ [ "torch.LongTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ReyesDeJong/Deep-SVDD-PyTorch
[ "1fc7eae1474556f869d5c5422da74fd4fe2f1aed" ]
[ "src/datasets/hits_dataset.py" ]
[ "import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Subset\nfrom torch.utils.data.dataset import Dataset # For custom datasets\nfrom torchvision import transforms\n\nPROJECT_PATH = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..'))\nsys.path.append(PROJECT_PATH)\n\nfrom src.base.torchvision_dataset import TorchvisionDataset\nfrom src.datasets.preprocessing import get_target_label_idx\nfrom src.datasets.data_splitter import DatasetDivider\nfrom src.datasets.data_set_generic import Dataset\n\n\nclass HitsDataset(TorchvisionDataset):\n def __init__(self, root: str, normal_class=1):\n super().__init__(root)\n\n self.n_classes = 2 # 0: normal, 1: outlier\n self.normal_classes = tuple([normal_class])\n self.outlier_classes = list(range(0, 2))\n self.outlier_classes.remove(normal_class)\n\n self.data_dict = pd.read_pickle(self.root)\n # hardcoded selected channel\n images = self.normalize_by_image(self.data_dict['images'])[..., 3][\n ..., np.newaxis]\n labels = np.array(self.data_dict['labels'])\n\n dataset = Dataset(data_array=images, data_label=labels, batch_size=50)\n data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1)\n data_splitter.set_dataset_obj(dataset)\n train_dataset, test_dataset, val_dataset = \\\n data_splitter.get_train_test_val_set_objs()\n\n transform = transforms.Compose([transforms.ToTensor()])\n target_transform = transforms.Lambda(\n lambda x: int(x in self.outlier_classes))\n\n train_set = Hits(train_dataset.data_array, train_dataset.data_label,\n transform=transform, target_transform=target_transform)\n train_idx_normal = get_target_label_idx(\n np.array(train_set.label_arr), self.normal_classes)\n self.train_set = Subset(train_set, train_idx_normal)\n print(self.train_set.__len__())\n\n self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label,\n transform=transform,\n target_transform=target_transform)\n val_idx_normal = get_target_label_idx(\n np.array(self.val_all_set.label_arr), self.normal_classes)\n self.val_normal_set = Subset(self.val_all_set, val_idx_normal)\n print(self.val_normal_set.__len__())\n\n self.test_set = Hits(test_dataset.data_array, test_dataset.data_label,\n transform=transform,\n target_transform=target_transform)\n\n def normalize_by_image(self, images):\n images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :]\n images = images / np.nanmax(images, axis=(1, 2))[\n :, np.newaxis, np.newaxis, :]\n return images\n\n\nclass Hits(Dataset):\n def __init__(self, images, labels, transform, target_transform):\n \"\"\"\n \"\"\"\n # Transforms\n self.transform = transform\n self.target_transform = target_transform\n\n self.image_arr = images\n self.label_arr = labels\n print(self.image_arr.shape)\n self.data_len = self.label_arr.shape[0]\n\n def __getitem__(self, index):\n single_image = self.image_arr[index]\n single_image_label = self.label_arr[index]\n\n if self.transform is not None:\n img = self.transform(single_image)\n\n if self.target_transform is not None:\n target = self.target_transform(single_image_label)\n\n return img, target, index # only line changed\n\n def __len__(self):\n return self.data_len\n" ]
[ [ "numpy.nanmax", "pandas.read_pickle", "numpy.nanmin", "torch.utils.data.Subset", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Amitdedhia6/DrugDiscovery
[ "c70dec96cee4d0d643a8b9de30530b6871fdf05e" ]
[ "generate_embeddings.py" ]
[ "import torch\nimport torch.nn as nn\nimport os\nfrom common import base_data_path\nfrom typing import List\nimport pandas as pd\n\n\nCONTEXT_SIZE = 1 # 1 words to the left, 1 to the right\nEMDEDDING_DIM = 3\nword_to_ix = {}\nix_to_word = {}\n\n\ndef make_context_vector(context, word_to_ix):\n idxs = [word_to_ix[w] for w in context]\n return torch.tensor(idxs, dtype=torch.long)\n\n\ndef get_index_of_max(input):\n index = 0\n for i in range(1, len(input)):\n if input[i] > input[index]:\n index = i\n return index\n\n\ndef get_max_prob_result(input, ix_to_word):\n return ix_to_word[get_index_of_max(input)]\n\n\ndef split_smiles_repr(smile_repr: str) -> List[str]:\n element_list = []\n skip_next = False\n for i in range(len(smile_repr)):\n if skip_next:\n skip_next = False\n continue\n\n element = smile_repr[i]\n if (i < (len(smile_repr) - 1)) and (smile_repr[i].isalpha()):\n possible_element = element + smile_repr[i+1]\n if possible_element in word_to_ix:\n element = possible_element\n skip_next = True\n\n if element in word_to_ix:\n element_list.append(element)\n else:\n raise ValueError('Inappropriate argument to function get_elements_from_smiles_data of Vocab class')\n return element_list\n\n\ndef get_data(sequence_list: List[str]):\n _sequence_list = []\n sequence_elements_list = []\n\n for s in sequence_list:\n split_elements = split_smiles_repr(s)\n _sequence_list.append(s)\n sequence_elements_list.append(split_elements)\n\n return sequence_elements_list\n\n\nfilepath = os.path.join(base_data_path, \"vocab.txt\")\nf = open(filepath, \"r\")\nelements_list = f.read().splitlines()\nelements_list.append(' ')\nf.close()\n\nvocab = elements_list\nvocab_size = len(elements_list)\n\nfor i, word in enumerate(vocab):\n word_to_ix[word] = i\n ix_to_word[i] = word\n\nfilepath = os.path.join(base_data_path, \"dataset_v1.csv\")\ndf = pd.read_csv(filepath, sep=\",\", header=0)\nsmiles_data = get_data(df.SMILES.tolist())\n\n\nclass CBOW(torch.nn.Module):\n def __init__(self, vocab_size, embedding_dim):\n super(CBOW, self).__init__()\n\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.linear1 = nn.Linear(embedding_dim, 128)\n self.activation_function1 = nn.ReLU()\n self.linear2 = nn.Linear(128, vocab_size)\n self.activation_function2 = nn.LogSoftmax(dim=-1)\n\n def forward(self, inputs):\n embeds = sum(self.embeddings(inputs)).view(1, -1)\n out = self.linear1(embeds)\n out = self.activation_function1(out)\n out = self.linear2(out)\n out = self.activation_function2(out)\n return out\n\n def get_word_emdedding(self, word):\n word = torch.LongTensor([word_to_ix[word]])\n return self.embeddings(word).view(1, -1)\n\n\nmodel = CBOW(vocab_size, EMDEDDING_DIM)\nloss_function = nn.NLLLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n\nfor epoch in range(50):\n total_loss = 0\n for smiles_element_list in smiles_data:\n for i in range(1, len(smiles_element_list) - 1):\n context = [smiles_element_list[i - 1], smiles_element_list[i + 1]]\n target = smiles_element_list[i]\n context_vector = make_context_vector(context, word_to_ix)\n model.zero_grad()\n log_probs = model(context_vector)\n loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n print(f\"Epoch - {epoch}, Loss - {total_loss}\")\n" ]
[ [ "torch.nn.NLLLoss", "pandas.read_csv", "torch.nn.LogSoftmax", "torch.LongTensor", "torch.nn.Embedding", "torch.tensor", "torch.nn.Linear", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ehsanul/brick
[ "291c0783f3b062cf73887cb3581dd92342891165" ]
[ "heuristic/train/nn/train-nn.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport sys\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nEPOCHS = 1000\n\n# The patience parameter is the amount of epochs to check for improvement\nEARLY_STOP = keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)\n\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [cost]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n plt.ylim([0,5])\n plt.legend()\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$cost^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n plt.ylim([0,20])\n plt.legend()\n plt.show()\n\n# we hard-code the values instead of using stats so that integration with\n# predictor using the model is easier\nscaling = pd.DataFrame(data={\n 'min': [-10000, -10000, -10000, -2300, -2300, -2300, -6.0, -6.0, -6.0, -3.2, -3.2, -3.2],\n 'max': [ 10000, 10000, 10000, 2300, 2300, 2300, 6.0, 6.0, 6.0, 3.2, 3.2, 3.2],\n}, index=[ 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw'])\n\n# scale to range [0, 1]\n# TODO try polar coordinates. for velocity: https://math.stackexchange.com/questions/2444965/relationship-between-cartesian-velocity-and-polar-velocity\ndef scale(x):\n return (x - scaling['min']) / (scaling['max'] - scaling['min'])\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(128, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(128, activation=tf.nn.relu),\n\n # these extra layers seem to hurt more than they help!\n #layers.Dropout(0.01),\n #layers.Dense(64, activation=tf.nn.relu),\n\n # this doesn't work as well as a single 64-wide layer\n #layers.Dense(12, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n\n layers.Dense(1)\n ])\n #optimizer = tf.keras.optimizers.RMSprop(0.001)\n optimizer = tf.train.AdamOptimizer(0.001)\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n\n# should be the time.csv from generate-data's time binary\ndataset_path = sys.argv[1]\n\ncolumn_names = ['cost', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw']\nraw_dataset = pd.read_csv(dataset_path, names=column_names,\n na_values = \"\", #comment='\\t',\n sep=\",\", skipinitialspace=True)\n\n\n# visualize the data!\npos_plot = sns.pairplot(raw_dataset[[\"cost\", \"x\", \"y\", \"z\"]], diag_kind=\"kde\")\npos_plot.savefig(\"./pos.fig.png\")\nvel_plot = sns.pairplot(raw_dataset[[\"cost\", \"vx\", \"vy\", \"vz\"]], diag_kind=\"kde\")\nvel_plot.savefig(\"./vel.fig.png\")\navel_plot = sns.pairplot(raw_dataset[[\"cost\", \"avx\", \"avy\", \"avz\"]], diag_kind=\"kde\")\navel_plot.savefig(\"./avel.fig.png\")\nrot_plot = sns.pairplot(raw_dataset[[\"cost\", \"roll\", \"pitch\", \"yaw\"]], diag_kind=\"kde\")\nrot_plot.savefig(\"./rot.fig.png\")\npos_rot_plot = sns.pairplot(raw_dataset[[\"cost\", \"x\", \"y\", \"yaw\"]], diag_kind=\"kde\")\npos_rot_plot.savefig(\"./pos_rot.fig.png\")\n\ndataset = raw_dataset.copy()\ndataset.tail()\n\n# we don't have missing data\n# dataset.isna().sum()\n# dataset = dataset.dropna()\n\n# split into training vs test datasets\ntrain_dataset = dataset.sample(frac=0.95,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n# using stats from full dataset\nstats = raw_dataset.describe()\nstats.pop(\"cost\")\nstats = stats.transpose()\nstats\n\ntrain_labels = train_dataset.pop('cost')\ntest_labels = test_dataset.pop('cost')\n\nscaled_train_dataset = scale(train_dataset)\nscaled_test_dataset = scale(test_dataset)\n\n# build and train moddel\nmodel = build_model()\nmodel.summary()\nhistory = model.fit(scaled_train_dataset, train_labels, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[EARLY_STOP, PrintDot()])\nplot_history(history)\n\n# check against test set\nloss, mae, mse = model.evaluate(scaled_test_dataset, test_labels, verbose=0)\nprint(\"Testing set Mean Abs Error: {:5.2f} cost\".format(mae))\n\n# plot all test predictions\ntest_predictions = model.predict(scaled_test_dataset).flatten()\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [cost]')\nplt.ylabel('Predictions [cost]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nplt.show()\n\n# error distribution\nerror = test_predictions - test_labels\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [cost]\")\nplt.ylabel(\"Count\")\nplt.show()\n\nmodel.save('./simple_throttle_cost_model.h5')\nsaved_model_path = tf.contrib.saved_model.save_keras_model(model, \"./simple_throttle_cost_saved_model\")\n\n" ]
[ [ "tensorflow.contrib.saved_model.save_keras_model", "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "tensorflow.keras.layers.Dense", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "tensorflow.train.AdamOptimizer", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "tensorflow.keras.callbacks.EarlyStopping", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
juanpablordz/moog.github.io
[ "d7995d3563492378d0877ce8d16f5ca9a8031794", "d7995d3563492378d0877ce8d16f5ca9a8031794" ]
[ "moog/action_spaces/joystick.py", "moog/state_initialization/sprite_generators.py" ]
[ "\"\"\"Joystick action space for controlling agent avatars.\"\"\"\n\nfrom . import abstract_action_space\nfrom dm_env import specs\nimport numpy as np\n\n\nclass Joystick(abstract_action_space.AbstractActionSpace):\n \"\"\"Joystick action space.\"\"\"\n\n def __init__(self, scaling_factor=1., action_layers='agent',\n constrained_lr=False, control_velocity=False, momentum=0.):\n \"\"\"Constructor.\n \n Args:\n scaling_factor: Scalar. Scaling factor multiplied to the action.\n agent_layer: String or iterable of strings. Elements (or itself if\n string) must be keys in the environment state. All sprites in\n these layers will be acted upon by this action space.\n control_velocity: Bool. Whether to control velocity (True) or force\n (False).\n constrained_lr: Bool. If True, joystick is contrained to actions\n parallel to the x-axis, by zeroing out the y-axis (component 1)\n of the action.\n momentum: Float in [0, 1]. Discount factor for previous action. This\n should be zero if control_velocity is False, because imparting\n forces automatically gives momentum to the agent(s) being\n controlled. If control_velocity is True, setting this greater\n than zero gives the controlled agent(s) momentum. However, the\n velocity is clipped at scaling_factor, so the agent only retains\n momentum when stopping or changing direction and does not\n accelerate.\n \"\"\"\n self._scaling_factor = scaling_factor\n if not isinstance(action_layers, (list, tuple)):\n action_layers = (action_layers,)\n self._action_layers = action_layers\n self._constrained_lr = constrained_lr\n self._control_velocity = control_velocity\n self._momentum = momentum\n\n self._action_spec = specs.BoundedArray(\n shape=(2,), dtype=np.float32, minimum=-1, maximum=1)\n\n def step(self, state, action):\n \"\"\"Apply action to environment state.\n\n Args:\n state: OrderedDict. Environment state.\n action: Numpy float array of size (2) in [-1, 1]. Force to apply.\n \"\"\"\n if self._constrained_lr:\n action[1] = 0.\n\n self._action *= self._momentum\n self._action += self._scaling_factor * action\n self._action = np.clip(\n self._action, -self._scaling_factor, self._scaling_factor)\n \n for action_layer in self._action_layers:\n for sprite in state[action_layer]:\n if self._control_velocity:\n sprite.velocity = self._action / sprite.mass\n else:\n sprite.velocity += self._action / sprite.mass\n\n def reset(self, state):\n \"\"\"Reset action space at start of new episode.\"\"\"\n del state\n self._action = np.zeros(2)\n \n def random_action(self):\n \"\"\"Return randomly sampled action.\"\"\"\n return np.random.uniform(-1., 1., size=(2,))\n \n def action_spec(self):\n return self._action_spec\n", "# This file was forked and modified from the file here:\n# https://github.com/deepmind/spriteworld/blob/master/spriteworld/sprite_generators.py\n# Here is the license header for that file:\n\n# Copyright 2019 DeepMind Technologies Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Generators for producing lists of sprites based on factor distributions.\"\"\"\n\nimport itertools\nimport numpy as np\nfrom moog import sprite\n\n\ndef generate_sprites(factor_dist,\n num_sprites=1,\n max_recursion_depth=int(1e4),\n fail_gracefully=False):\n \"\"\"Create callable that samples sprites from a factor distribution.\n\n Example usage:\n ```python\n sprite_factors = distribs.Product(\n [distribs.Continuous('x', 0.2, 0.8),\n distribs.Continuous('y', 0.2, 0.8),\n distribs.Continuous('x_vel', -0.03, 0.03),\n distribs.Continuous('y_vel', -0.03, 0.03)],\n shape='circle, scale=0.1, c0=255, c1=0, c2=0,\n )\n sprite_gen = sprite_generators.generate_sprites(\n sprite_factors, num_sprites=lambda: np.random.randint(3, 6))\n \n def _state_initializer():\n ...\n other_sprites = ...\n ...\n sprites = sprite_gen(\n disjount=True, without_overlapping=other_sprites)\n state = collections.OrderedDict([\n ('other_sprites', other_sprites),\n ('sprites', sprites),\n ])\n ```\n\n Args:\n factor_dist: The factor distribution from which to sample. Should be an\n instance of spriteworld.factor_distributions.AbstractDistribution.\n num_sprites: Int or callable returning int. Number of sprites to\n generate per call.\n max_recursion_depth: Int. Maximum recursion depth when rejection\n sampling to generate sprites without overlap.\n fail_gracefully: Bool. Whether to return a list of sprites or raise\n RecursionError if max_recursion_depth is exceeded.\n\n Returns:\n _generate: Callable that returns a list of Sprites.\n \"\"\"\n def _overlaps(s, other_sprites):\n \"\"\"Whether s overlaps any sprite in other_sprites.\"\"\"\n if len(other_sprites) == 0:\n return False\n else:\n overlaps = [s.overlaps_sprite(x) for x in other_sprites]\n return any(overlaps)\n\n def _generate(disjoint=False, without_overlapping=[]):\n \"\"\"Return a list of sprites.\n \n Args:\n disjoint: Boolean. If true, all generated sprites will be disjoint.\n without_overlapping: Optional iterable of ../sprite/Sprite\n instances. If specified, all generated sprites will not overlap\n any sprites in without_overlapping.\n \"\"\"\n n = num_sprites() if callable(num_sprites) else num_sprites\n sprites = []\n for _ in range(n):\n s = sprite.Sprite(**factor_dist.sample())\n count = 0\n while _overlaps(s, without_overlapping):\n if count > max_recursion_depth:\n if fail_gracefully:\n return sprites\n else:\n raise RecursionError(\n 'max_recursion_depth exceeded trying to initialize '\n 'a non-overlapping sprite.')\n count += 1\n s = sprite.Sprite(**factor_dist.sample())\n sprites.append(s)\n if disjoint:\n without_overlapping = without_overlapping + [s]\n \n return sprites\n\n return _generate\n\n\ndef chain_generators(*sprite_generators):\n \"\"\"Chain generators by concatenating output sprite sequences.\n\n Essentially an 'AND' operation over sprite generators. This is useful when\n one wants to control the number of samples from the modes of a multimodal\n sprite distribution.\n\n Note that factor_distributions.Mixture provides weighted mixture\n distributions, so chain_generators() is typically only used when one wants\n to forces the different modes to each have a non-zero number of sprites.\n\n Args:\n *sprite_generators: Callable sprite generators.\n\n Returns:\n _generate: Callable returning a list of sprites.\n \"\"\"\n\n def _generate(*args, **kwargs):\n return list(itertools.chain(*[generator(*args, **kwargs)\n for generator in sprite_generators]))\n\n return _generate\n\n\ndef sample_generator(sprite_generators, p=None):\n \"\"\"Sample one element from a set of sprite generators.\n\n Essential an 'OR' operation over sprite generators. This returns a callable\n that samples a generator from sprite_generators and calls it.\n\n Note that if sprite_generators each return 1 sprite, this functionality can\n be achieved with spriteworld.factor_distributions.Mixture, so\n sample_generator is typically used when sprite_generators each return\n multiple sprites. Effectively it allows dependant sampling from a multimodal\n factor distribution.\n\n Args:\n sprite_generators: Iterable of callable sprite generators.\n p: Probabilities associated with each generator. If None, assumes\n uniform distribution.\n\n Returns:\n _generate: Callable sprite generator.\n \"\"\"\n\n def _generate(*args, **kwargs):\n sampled_generator = np.random.choice(sprite_generators, p=p)\n return sampled_generator(*args, **kwargs)\n\n return _generate\n\n\ndef shuffle(sprite_generator):\n \"\"\"Randomize the order of sprites sample from sprite_generator.\n\n This is useful because sprites are z-layered with occlusion according to\n their order, so if sprite_generator is the output of chain_generators(),\n then sprites from some component distributions will always be behind sprites\n from others.\n\n An alternate design would be to let the environment handle sprite ordering,\n but this design is preferable because the order can be controlled more\n finely. For example, this allows the user to specify one sprite (e.g. the\n agent's body) to always be in the foreground while all the others are\n randomly ordered.\n\n Args:\n sprite_generator: Callable return a list of sprites.\n\n Returns:\n _generate: Callable sprite generator.\n \"\"\"\n\n def _generate(*args, **kwargs):\n sprites = sprite_generator(*args, **kwargs)\n order = np.arange(len(sprites))\n np.random.shuffle(order)\n return [sprites[i] for i in order]\n\n return _generate\n" ]
[ [ "numpy.random.uniform", "numpy.zeros", "numpy.clip" ], [ "numpy.random.shuffle", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yyliu01/TraCoCo
[ "eecbc92c961d393deaa31726739a94b7f495d893" ]
[ "Code/VnetLA/validate.py" ]
[ "import os\nimport math\nimport torch\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom medpy import metric\nimport torch.nn.functional as F\nfrom Configs.config import config\nfrom Model.Vnet import VNet as Vnet\nfrom cc3d import connected_components\nfrom Dataloader.dataset import LAHeartDataset\n\n\"\"\"\n# https://github.com/kleinzcy/SASSnet/blob/master/code/test_util.py\ndef getLargestCC(segmentation):\n # from skimage.measure import label as sm_label\n labels = sm_label(segmentation)\n assert (labels.max() != 0) # assume at least 1 CC\n largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1\n return largestCC\n\"\"\"\n\n\ndef cct(pseudo_label):\n labels_out, N = connected_components(pseudo_label, connectivity=26, return_N=True)\n for segid in range(1, N + 1):\n extracted_image = labels_out * (labels_out == segid)\n if extracted_image.sum() < 8000:\n pseudo_label[labels_out == segid] = 0\n return pseudo_label\n\n\ndef test_all_case(net, val_set, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4,\n post_process=False, visual=False):\n\n total_metric = 0.0\n assert val_set.aug is False, \">> no augmentation for test set\"\n dataloader = iter(val_set)\n tbar = range(len(val_set))\n tbar = tqdm(tbar, ncols=135)\n for (idx, _) in enumerate(tbar):\n image, label = next(dataloader)\n prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size,\n num_classes=num_classes,\n post_process=post_process)\n\n if np.sum(prediction) == 0:\n single_metric = (0, 0, 0, 0)\n else:\n single_metric = calculate_metric_percase(np.array(prediction),\n np.array(label[:]))\n\n total_metric += np.asarray(single_metric)\n\n if visual:\n # import nibabel as nib\n # struggle for where to save; modify it if you need.\n raise NotImplementedError\n\n avg_metric = total_metric / len(val_set)\n print(\"|dice={:.4f}|mIoU={:.4f}|95HD={:.4f}|ASD={:.4f}|\".format(avg_metric[0], avg_metric[1],\n avg_metric[3], avg_metric[2]))\n\n return avg_metric\n\n\ndef test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1,\n post_process=False):\n\n image = image.squeeze()\n w, h, d = image.shape\n\n # if the size of image is less than patch_size, then padding it\n add_pad = False\n if w < patch_size[0]:\n w_pad = patch_size[0] - w\n add_pad = True\n else:\n w_pad = 0\n if h < patch_size[1]:\n h_pad = patch_size[1] - h\n add_pad = True\n else:\n h_pad = 0\n if d < patch_size[2]:\n d_pad = patch_size[2] - d\n add_pad = True\n else:\n d_pad = 0\n wl_pad, wr_pad = w_pad // 2, w_pad - w_pad // 2\n hl_pad, hr_pad = h_pad // 2, h_pad - h_pad // 2\n dl_pad, dr_pad = d_pad // 2, d_pad - d_pad // 2\n if add_pad:\n image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad), (dl_pad, dr_pad)], mode='constant',\n constant_values=0)\n ww, hh, dd = image.shape\n\n sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1\n sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1\n sz = math.ceil((dd - patch_size[2]) / stride_z) + 1\n score_map = np.zeros((num_classes,) + image.shape).astype(np.float32)\n cnt = np.zeros(image.shape).astype(np.float32)\n\n for x in range(0, sx):\n xs = min(stride_xy * x, ww - patch_size[0])\n for y in range(0, sy):\n ys = min(stride_xy * y, hh - patch_size[1])\n for z in range(0, sz):\n zs = min(stride_z * z, dd - patch_size[2])\n test_patch = image[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]]\n test_patch = np.expand_dims(np.expand_dims(test_patch, axis=0), axis=0).astype(np.float32)\n test_patch = torch.from_numpy(test_patch).cuda(non_blocking=True)\n y1, _ = net(test_patch)\n y = F.softmax(y1, dim=1)\n y = y.cpu().data.numpy()\n y = y[0, :, :, :, :]\n score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \\\n = score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + y\n cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \\\n = cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + 1\n score_map = score_map / np.expand_dims(cnt, axis=0)\n label_map = np.argmax(score_map, axis=0)\n\n if post_process:\n label_map = cct(label_map)\n # label_map = getLargestCC(label_map) feel free to change the post-process approach\n\n if add_pad:\n label_map = label_map[wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]\n score_map = score_map[:, wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]\n\n return label_map, score_map\n\n\ndef calculate_metric_percase(pred, gt):\n dice = metric.binary.dc(pred, gt)\n jc = metric.binary.jc(pred, gt)\n hd = metric.binary.hd95(pred, gt)\n asd = metric.binary.asd(pred, gt)\n\n return dice, jc, hd, asd\n\n\ndef test_calculate_metric(ckpt_path, vis=False, post=False):\n net = Vnet(n_channels=1, n_classes=2,\n normalization='batchnorm', has_dropout=True).cuda()\n net.load_state_dict(torch.load(ckpt_path))\n net.eval()\n val_dataset = LAHeartDataset(os.path.join(config.code_path, \"Dataloader\"),\n config.data_path,\n split=\"eval\", config=config)\n\n # follows the previous works' setting\n avg_metric = test_all_case(net, val_dataset, num_classes=2,\n patch_size=(112, 112, 80), stride_xy=18, stride_z=4,\n post_process=post, visual=vis)\n\n return avg_metric\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Medical Semi-supervised Semantic Segmentation (valid)')\n parser.add_argument(\"--env_name\", default=\"traCoCo(8-label,spatial_weight(kl)=0.3,hyp=0.1,iters=9000)\",\n type=str, help=\"your environment folder name for training\")\n\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"your environment folder name for training\")\n\n parser.add_argument(\"--post\", action=\"store_true\",\n help=\"implement post process or not\")\n\n cmd_line = parser.parse_args()\n default_path = os.path.join(config.code_path, \"saved\", cmd_line.env_name)\n ckpt = os.listdir(default_path)\n ckpt = [i for i in ckpt if \".pth\" in str(i)][0]\n print(\"validate {} for LA dataset ...\".format(str(ckpt)))\n metric = test_calculate_metric(os.path.join(default_path, ckpt), vis=cmd_line.visual,\n post=cmd_line.post)\n" ]
[ [ "torch.nn.functional.softmax", "numpy.expand_dims", "numpy.pad", "torch.load", "numpy.asarray", "torch.from_numpy", "numpy.argmax", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dcstrandberg/aspect-sentiment
[ "0177888d4fe96d49b78e44f5bd24be619c93bf00", "0177888d4fe96d49b78e44f5bd24be619c93bf00" ]
[ "aspect_sentiment.py", "tweet_handlers.py" ]
[ "import spacy\nfrom textblob import TextBlob\nimport pandas as pd\n\n# Import functions from other files\nfrom tweet_handlers import pullTweetsFromCSV, tweetPulls\n\n### Declare functions to standardize, identify, and analyze input text\n# Will ultimately take in a list of tweets and return:\n# - Word counts\n# - Split of positive / negative aspects\n# - Brand identification?\n\n#visualizeText() is a funtion to diagram sentences for help troubleshooting\n# Inputs: \n# - nlp: an NLP object, \n# - txt = a string containing the sentence to be diagramed, \n# - writeFilename: a string containing the filename to write the HTML diagram to\n# Returns:\n# - writeFilename: the path of the file that contains the HTML diagram\ndef visualizeText(nlp, txt, writeFilename):\n doc = nlp(txt)\n html = spacy.displacy.render(doc, style='dep')\n\n filePath = './' + writeFilename + '.html'\n\n with open(filePath, 'w') as f:\n f.write(html)\n \n return filePath\n\n\n#extractDescriptors() is a funtion to pull aspects and descriptors from a list of sentences\n# Inputs: \n# - nlp: an NLP object, \n# - sentenceList: a list of strinsg containing the sentences to be analyzed\n# Outputs: \n# - list of dictionaries containing 'aspect' and 'description' -- not broken by tweet\n\ndef extractDescriptors(nlp, sentenceList):\n #We'll ultimately return this aspects list\n aspects = []\n aspects_lemma = []\n attributes = []\n attributes_lemma = []\n\n\n #We will iterate through the sentences\n for i, aSentence in enumerate( sentenceList ):\n if i % 100 == 0: print(\"Tweet# \", str(i))\n doc = nlp(aSentence)\n \n for token in doc:\n\n ###TODO: \n # Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed\n # Also need to add in a case that checks for pronoun resolution and sees what we can do about that\n\n # We need to identify each noun, and find its descendants that are (pos_ == 'ADJ' or pos_ == 'VERB') and (dep_ == 'amod' or dep_ == 'acl')\n\n # Modifying rule to examine ALL nouns, not just the subject of the sentence\n #if token.dep_ == 'nsubj' and token.pos_ == 'NOUN':\n if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'):\n\n #Now append the things\n aspects.append (token.head.text)\n aspects_lemma.append(token.head.lemma_)\n\n attributes.append( token.text )\n attributes_lemma.append( token.lemma_ )\n\n\n return ( aspects , attributes, aspects_lemma, attributes_lemma ) \n\n# Need a function that pulls attributes for each keyword in the tweet DF, since we need them to be kept separate\n# extractTweetAttributes: \n# Takes a DF of tweets, keywords, etc. and pulls out adjectives for each\n# Inputs:\n# - nlp: an NLP object,\n# - tweet_df: pandas dataframe containing colums:\n# - Tweet \n# - Keyword\n# - Spanish\n# - Date\n# Returns:\n# - attribute_df: dataframe containing the list of...\n# ...aspects & attributes for each keyword / spanish pair\ndef extractTweetAttributes(nlp, tweet_df):\n #define return df\n attribute_df = pd.DataFrame( columns = [\n 'Keyword',\n 'Spanish',\n 'aspect',\n 'attribute',\n 'aspect_lemma',\n 'attribute_lemma'\n ])\n\n # Now create a set for the different keywords and spanish words\n keySet = set( tweet_df['Keyword'] )\n \n for aKey in keySet:\n print(\"Extracting \", aKey)\n spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0]\n\n # And this is where we actually add the various analyses\n ( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] ) \n\n\n # Now that we've got the data, create lookup lists for the Keyword & Spanish words\n keyList = [aKey] * len(aspectList)\n spanishList = [spanishWord] * len(aspectList)\n\n temp_df = pd.DataFrame({\n 'Keyword': keyList,\n 'Spanish': spanishList,\n 'aspect': aspectList,\n 'attribute': attributeList,\n 'aspect_lemma': aspectList_lemma,\n 'attribute_lemma': attributeList_lemma\n })\n\n # Finally, append the data for this keyword to the attribute dataframe\n attribute_df = attribute_df.append( temp_df )\n \n return attribute_df\n\ndef countAttributes( aspect_df ):\n\n temp_df = pd.DataFrame({\n 'Keyword': aspect_df['Keyword'],\n 'Spanish': aspect_df['Spanish'],\n 'aspect': aspect_df['aspect_lemma'],\n 'attribute': aspect_df['attribute_lemma']\n })\n\n return temp_df.value_counts()\n\n# In the main, this is where the tweet files are loaded...\n# ...and routed through the analysis functions\nif __name__ == \"__main__\":\n print(\"In the main\")\n \n # Create the NLP object that will be used for all the text processing\n #nlp = spacy.load(\"en_core_web_sm\")\n # We're actually using a spanish NLP object instead of an English one\n nlp = spacy.load(\"es_core_news_sm\")\n\n # Pull in CSV files that hold all the tweets\n tweetFileList = [\n './tweet_data/tweet_db_08.27.2021.csv'\n ]\n\n # Create the DF of tweets from the CSV File\n tweet_df = pullTweetsFromCSV( tweetFileList )#, fileEncoding='ANSI' )\n\n # Instead of pulling tweets from a file, we're going to get new tweets\n # First we need to designate a list of english + spanish keywords to search for\n keyword_df = pd.read_csv('./keyword_list.csv')\n\n #tweet_df = tweetPulls( keyword_df )\n\n #Save the tweet-df because of errors\n #tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI')\n\n # Run the tweets through the attribute extractor\n aspect_df = extractTweetAttributes ( nlp, tweet_df)\n\n \n # Run the aspects & attributes through a modified version of the wordcount function\n count_df = countAttributes( aspect_df )\n # - Not to mention run some sort of pronoun resolution\n \n count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv')", "from datetime import date\nfrom numpy.lib.npyio import save\nimport pandas as pd\nimport twint\nfrom multiprocessing import Process, Queue, Pool, Manager, Lock\n\n\n# tweetPulls:\n# Runs tweet pulling function in multiprocess and returns a simplified tweet_df\ndef tweetPulls( key_spanish_df ):\n \n m = Manager()\n q = m.Queue() \n \n p = {}\n qcount = 0\n\n tweet_df=pd.DataFrame( columns = [\n 'Keyword',\n 'Spanish',\n 'Date',\n 'Tweet'\n ])\n\n for i, aKeyword in enumerate(key_spanish_df['keyword']): \n print(\"starting process: \", aKeyword)\n p[i] = Process(target=get_tweets, args=(aKeyword, key_spanish_df['english'][i], q))\n p[i].start()\n\n \n # join should be done in seperate for loop \n # reason being that once we join within previous for loop, join for p1 will start working\n # and hence will not allow the code to run after one iteration till that join is complete, ie.\n # the thread which is started as p1 is completed, so it essentially becomes a serial work instead of \n # parallel\n for i in range(len(key_spanish_df['keyword'])):\n p[i].join()\n print(\"#\" + str(i) + \" joined\")\n while q.empty() is not True:\n qcount = qcount+1\n queue_top = q.get()\n\n tweet_df = tweet_df.append(queue_top[0])\n \n print(\"Q Count \" + str(qcount) + \" pulled\")\n \n #print(q.get())\n \n return tweet_df\n\n\n# get_tweets:\n# Takes a keyword (and english version of that word), a queue object and pipes all the tweet DFs back through that queue\n# filterTweets: An optional agrument that potentially filters the returned DF with only tweets that contain the keyword\ndef get_tweets(keyword, englishWord, q, filterTweets = None):\n all = []\n \n #Create the TWINT config object\n c = twint.Config()\n \n c.Pandas = True\n c.Popular_tweets = True\n\n\n c.Hide_output = True\n c.Search = keyword\n c.Limit = 100000\n c.Since = '2019-01-01'\n\n twint.run.Search(c)\n\n #Get the tweets\n temp_df = twint.storage.panda.Tweets_df\n \n #If filter parameter == True, remove any entries that don't hvae the actual keyword in the text\n if filterTweets is not None:\n temp_df = temp_df.loc[ temp_df['tweet'].str.contains(filterTweets, case=False) ]\n temp_df = temp_df.reset_index()\n\n #append on the keyword column\n tempKeywordColumn = [ keyword ] * len( temp_df['tweet'] )\n tempEnglishColumn = [ englishWord ] * len( temp_df['tweet'] )\n\n temp_df['Keyword'] = tempEnglishColumn\n temp_df['Spanish'] = tempKeywordColumn\n\n print(keyword, \" is length \", str(len(temp_df['tweet'])))\n\n\n all.append( temp_df )\n\n q.put(all)\n print(\"Put \", keyword)\n \n return \n\n\n\n# pullTweetsFromCSV: \n# Take in filenames of CSVs and return a dataframe of Date, Tweet, Keyword, and Spanish Word \ndef pullTweetsFromCSV( tweetFileList, returnColumns = None, fileEncoding = 'UTF-8' ):\n # First we're going to use this space to declare the columns we want to keep. \n # These will be default columns, unless given others\n if returnColumns is None:\n returnColumns = [\n 'Date',\n 'Tweet',\n 'Keyword',\n 'Spanish'\n ]\n\n # Declare the df to append, then return\n tweet_df = pd.DataFrame( columns = returnColumns)\n\n if type(tweetFileList) == type(''): tweetFileList = [tweetFileList] \n\n for aFile in tweetFileList:\n temp_df = pd.read_csv(aFile, encoding=fileEncoding)\n\n tweet_df = tweet_df.append( temp_df, ignore_index=True )\n\n return tweet_df\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ], [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
williamhowardsnyder/OnClass
[ "07b2917dbdf01a1de54771de3383bbaa4bb2f283", "07b2917dbdf01a1de54771de3383bbaa4bb2f283" ]
[ "utils.py", "script/run_marker_gene_based_prediction.py" ]
[ "from anndata import read_h5ad\nimport sys\nfrom time import time\nfrom scipy import stats, sparse\nimport numpy as np\nimport collections\nimport pickle\nfrom sklearn.preprocessing import normalize\nimport os\nfrom collections import Counter\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve\nimport time\nimport umap\nimport copy\nfrom sklearn import preprocessing\nfrom fbpca import pca\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA\n#from libs import *\nfrom scanorama import find_alignments,merge_datasets,process_data,transform,vstack\nfrom sklearn.utils.graph_shortest_path import graph_shortest_path\nfrom scipy.sparse.linalg import svds, eigs\n\nnn_nhidden = [1000]\nrsts = [0.5,0.6,0.7,0.8]\ndfs_depth = 1\nco_dim = 5\nkeep_prob = 1.0\nuse_diagonal = True\nmax_iter = 20\nniter = 5\ndef translate_paramter(ps):\n\ts = []\n\tfor p in ps:\n\t\tif isinstance(p, list):\n\t\t\tp = [str(i) for i in p]\n\t\t\tp = '.'.join(p)\n\t\t\ts.append(p)\n\t\telse:\n\t\t\ts.append(str(p))\n\ts = '_'.join(s)\n\treturn s\npname = translate_paramter([max_iter])\n\ndef make_folder(folder):\n\tif not os.path.exists(folder):\n\t\tos.makedirs(folder)\n\treturn folder\n\ndef create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]):\n\tncls = np.shape(cls2cls)[0]\n\tif dname != 'allen':\n\t\tonto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file)\n\t\t#network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls)\n\t\tnetwork = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress)\n\telse:\n\t\tstack_net_bin = np.zeros((ncls,ncls))\n\t\tfor n1 in onto_net:\n\t\t\tfor n2 in onto_net[n1]:\n\t\t\t\tif n1==n2:\n\t\t\t\t\tcontinue\n\t\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\t\tstack_net_bin[n2,n1] = 1\n\t\tnetwork = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts]\n\treturn network\n\n\ndef fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8):\n\tco2name, name2co = get_ontology_name(obo_file = obo_file)\n\tfrom sentence_transformers import SentenceTransformer\n\tmodel = SentenceTransformer('bert-base-nli-mean-tokens')\n\tsentences = np.array([sentence.lower() for sentence in sentences])\n\tsentence_embeddings = model.encode(sentences)\n\tco_embeddings = []\n\tcos = []\n\tfor co in co2emb:\n\t\tco_embeddings.append(co2emb[co])\n\t\tcos.append(co)\n\tco_embeddings = np.array(co_embeddings)\n\tsent2co = {}\n\tfor sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))):\n\t\tscs = cosine_similarity(co_embeddings, embedding.reshape(1,-1))\n\n\t\tco_id = np.argmax(scs)\n\t\tsc = scs[co_id]\n\t\tif sc>nlp_mapping_cutoff:\n\t\t\tsent2co[sentence.lower()] = cos[co_id]\n\t\t\tnames = set()\n\t\t\tfor name in name2co:\n\t\t\t\tif name2co[name].upper() == cos[co_id]:\n\t\t\t\t\tnames.add(name)\n\t\t\t#print (sentence, cos[co_id], sc, co2name[cos[co_id]],names)\n\treturn sent2co\n\n\ndef ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tnngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\t#kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\t#if len(kngh) == 0:\n\t\t\t#\tcontinue\n\t\t\ty_mat[i,nseen:] = y_mat[i,nngh]\n\t\t\ty_mat[i,:nseen] -= 1000000\n\treturn y_mat\n\n\ndef ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\tkngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\tif len(kngh) == 0:\n\t\t\t\tcontinue\n\t\t\ty_mat[i,:nseen] -= 1000000\n\t\t\ty_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:])\n\treturn y_mat\n\ndef find_gene_ind(genes, common_genes):\n\tgid = []\n\tfor g in common_genes:\n\t\tgid.append(np.where(genes == g)[0][0])\n\tgid = np.array(gid)\n\treturn gid\n\ndef RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7):\n\tncls = len(l2i)\n\tonto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file)\n\tonto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_mat = np.zeros((ncls, ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tonto_net_mat[n1,n2] = onto_net_nlp[n1, n2]\n\t\t\tonto_net_mat[n2,n1] = onto_net_nlp[n2, n1]\n\tonto_net_rwr = RandomWalkRestart(onto_net_mat, rst)\n\treturn onto_net_rwr\n\ndef process_expression(c2g_list):\n\t#this data process function is motivated by ACTINN, please check ACTINN for more information.\n\tc2g = np.vstack(c2g_list)\n\tc2g = c2g.T\n\t#print ('onclass d0',np.shape(c2g))\n\tc2g = c2g[np.sum(c2g, axis=1)>0, :]\n\t#print (c2g)\n\t#print ('onclass d1',np.shape(c2g))\n\tc2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000\n\tc2g = np.log2(c2g+1)\n\texpr = np.sum(c2g, axis=1)\n\t#total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\n\tc2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\t#print (c2g)\n\t#print ('onclass d2',np.shape(c2g))\n\tcv = np.std(c2g, axis=1) / np.mean(c2g, axis=1)\n\tc2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),]\n\t#print (c2g)\n\t#print ('onclass d3',np.shape(c2g))\n\tc2g = c2g.T\n\t#print (c2g)\n\t#print ('onclass d4',np.shape(c2g))\n\tc2g_list_new = []\n\tindex = 0\n\tfor c in c2g_list:\n\t\tncell = np.shape(c)[0]\n\t\tc2g_list_new.append(c2g[index:index+ncell,:])\n\t\tindex = ncell\n\treturn c2g_list_new\n\ndef read_ontology_file(dname, data_folder):\n\tif 'allen' in dname:\n\t\tcell_type_network_file = data_folder + 'allen.ontology'\n\t\tcell_type_nlp_emb_file = None\n\t\tcl_obo_file = None\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.error(cell_type_network_file + ' not found!')\n\telse:\n\t\tcell_type_network_file = data_folder + 'cl.ontology'\n\t\tcell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb'\n\t\tcl_obo_file = data_folder + 'cl.obo'\n\t\tif not os.path.isfile(cell_type_nlp_emb_file):\n\t\t\tsys.exit(cell_type_nlp_emb_file + ' not found!')\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.exit(cell_type_network_file + ' not found!')\n\t\tif not os.path.isfile(cl_obo_file):\n\t\t\tsys.exit(cl_obo_file + ' not found!')\n\treturn cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file\n\ndef read_data_file(dname, data_dir):\n\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\tfeature_file = data_dir + 'Lemur/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'sapiens' in dname:\n\t\tfeature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_type'\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + '/Allen_Brain/features.pkl'\n\t\tlabel_file = data_dir + '/Allen_Brain/labels.pkl'\n\t\tgene_file = data_dir + '/Allen_Brain/genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + '/HLCA/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + '/HLCA/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + '/HLCA/'+tech+'_genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif feature_file.endswith('.pkl'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\telif feature_file.endswith('.h5ad'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\tsys.exit('wrong file suffix')\n\ndef read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True):\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\t#file = data_dir + 'TMS_official_060520/' + 'tabula-microcebus_smartseq2-10x_combined_annotated_filtered_gene-labels-correct.h5ad'\n\t\tfile = data_dir + 'TMS_official_060520/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tbatch_key = ''#original_channel\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfile = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tbatch_key = ''\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'allen_part' in dname:\n\t\tfeature_file = data_dir + 'Allen/matrix_part.csv'\n\t\tlabel_file = data_dir + 'Allen/metadata.csv'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + 'Allen/features.pkl'\n\t\tlabel_file = data_dir + 'Allen/labels.pkl'\n\t\tgene_file = data_dir + 'Allen/genes.pkl'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Krasnow/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + 'Krasnow/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif read_tissue:\n\t\treturn feature, label, genes, tissues, ontology_nlp_file, ontology_file\n\telse:\n\t\treturn feature, label, genes, ontology_nlp_file, ontology_file\n\n\n\ndef parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\ndef parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,ncell,replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\ndef select_high_var_genes(train_X, test_X, ngene = 200):\n\tmat = np.vstack((train_X, test_X))\n\t#mat = mat.todense()\n\tgstd = np.std(mat, axis=0)\n\tbest_genes = np.argsort(gstd*-1)\n\tbest_genes = best_genes[:ngene]\n\treturn train_X[:, best_genes], test_X[:, best_genes]\n\ndef emb_cells(train_X, test_X, dim=20):\n\tif dim==-1:\n\t\treturn np.log1p(train_X.todense()), np.log1p(test_X.todense())\n\ttrain_X = np.log1p(train_X)\n\ttest_X = np.log1p(test_X)\n\ttrain_X = preprocessing.normalize(train_X, axis=1)\n\ttest_X = preprocessing.normalize(test_X, axis=1)\n\tntrain = np.shape(train_X)[0]\n\tmat = sparse.vstack((train_X, test_X))\n\tU, s, Vt = pca(mat, k=dim) # Automatically centers.\n\tX = U[:, range(dim)] * s[range(dim)]\n\treturn X[:ntrain,:], X[ntrain:,:]\n\ndef write_markers(fname, markers):\n\t## Write marker genes to file\n\tfmarker_genes = open(fname,'w')\n\tfor t in markers:\n\t\tfmarker_genes.write(t+'\\t')\n\t\tg2pv = sorted(markers[t].items(), key=lambda item: item[1])\n\t\tfor g,pv in g2pv:\n\t\t\tfmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\\t')\n\t\tfmarker_genes.write('\\n')\n\tfmarker_genes.close()\n\n\ndef calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100):\n\tncell, nterm = np.shape(cell2term)\n\tngene = np.shape(cell2gene)[1]\n\tassert(ncell == np.shape(cell2gene)[0])\n\tmarkers = collections.defaultdict(dict)\n\tfor t in range(nterm):\n\t\tscs = np.argsort(cell2term[:,t])\n\t\tk_bot_cells = scs[:topk_cells]\n\t\tk_top_cells = scs[ncell-topk_cells:]\n\t\tpv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] #* ngene\n\t\ttop_mean = np.mean(cell2gene[k_top_cells,:],axis=0)\n\t\tbot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0)\n\t\tif only_over_expressed:\n\t\t\tfor g in range(ngene):\n\t\t\t\tif top_mean[g] < bot_mean[g]:\n\t\t\t\t\tpv[g] = 1.\n\t\tpv_sort = list(np.argsort(pv))\n\t\t#for i in range(return_k_genes):\n\t\t#markers[terms[t]][genes[pv_sort[i]]] = pv[pv_sort[i]]\n\t\tmarkers[terms[t]] = pv\n\t\tfor i,p in enumerate(pv):\n\t\t\tif np.isnan(p):\n\t\t\t\tpv[i] = 1.\n\t\t\t#markers[terms[t]][str(pv_sort[i])] = pv[pv_sort[i]]\n\treturn markers\n\ndef peak_h5ad(file):\n\t'''\n\tpeak the number of cells, classes, genes in h5ad file\n\t'''\n\tx = read_h5ad(file)\n\t#print (np.shape(x.X))\n\t#print (x.X[:10][:10])\n\t#print (x.obs.keys())\n\tncell, ngene = np.shape(x.X)\n\tnclass = len(np.unique(x.obs['free_annotation']))\n\t#print (np.unique(x.obs['free_annotation']))\n\tf2name = {}\n\tsel_cell = 0.\n\tfor i in range(ncell):\n\t\tif x.obs['method'][i]!='10x':\n\t\t\tcontinue\n\n\t\tfree = x.obs['free_annotation'][i]\n\t\tname = x.obs['cell_ontology_class'][i]\n\t\tf2name[free] = name\n\t\tsel_cell += 1\n\t#return f2name\n\t#for key in x.obs.keys():\n\t#\tprint (key, np.unique(x.obs[key]))\n\treturn sel_cell, ngene, nclass\n\t#for i in range(10):\n\t#\tprint (x.obs['method'][i], x.obs['channel_no_10x'][i])\n\t#for key in x.obs.keys():\n\t#\tprint (key, np.unique(x.obs[key]))\n\t#return index\n\n\ndef get_onotlogy_parents(GO_net, g):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\tngh_GO.remove(GO)\n\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\ndef exclude_non_ontology_term(cl_obo_file, labels, label_key):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tnew_labs = []\n\tnew_ids = []\n\tif label_key!='cell_ontology_class' and label_key!='cell_ontology_id':\n\t\tuse_co = False\n\t\tfor kk in np.unique(labels):\n\t\t\tif kk.lower().startswith('cl:'):\n\t\t\t\tuse_co = True\n\t\t\t\tbreak\n\telse:\n\t\tif label_key == 'cell_ontology_class':\n\t\t\tuse_co = False\n\t\telse:\n\t\t\tuse_co = True\n\tfor i in range(len(labels)):\n\t\tl = labels[i]\n\t\tif not use_co:\n\t\t\tif l.lower() in name2co.keys():\n\t\t\t\tnew_labs.append(name2co[l.lower()])\n\t\t\t\tnew_ids.append(i)\n\t\telse:\n\t\t\tif l.lower() in co2name.keys():\n\t\t\t\tnew_labs.append(l.lower())\n\t\t\t\tnew_ids.append(i)\n\tnew_labs = np.array(new_labs)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, new_labs\n\n\ndef parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\n\tncell = np.shape(x.raw.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.raw.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\tcorrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = corrected_feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\ndef select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None):\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeatures = features[select_cells,: ]\n\tif labels is not None:\n\t\tlabels = labels[select_cells]\n\tif tissues is not None:\n\t\ttissues = tissues[select_cells]\n\tx = x[select_cells,:]\n\treturn features, labels, tissues, x\n\ndef find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50):\n\tcor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T)\n\tcor = np.nan_to_num(cor) # cell type to gene\n\tnl = len(i2l)\n\tc2g = {}\n\tfor i in range(nl):\n\t\tgl = np.argsort(cor[i,:]*-1)\n\t\tc2g[i2l[i]] = {}\n\t\tfor j in range(topk):\n\t\t\tc2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]]\n\treturn c2g, cor\n\n\ndef use_pretrained_model(OnClass, genes, test_X, models = []):\n\tlast_l2i = {}\n\tlast_i2l = {}\n\n\tpred_Y_all_models = 0.\n\tngene = len(genes)\n\tfor model in models:\n\t\tOnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model)\n\t\tprint ('Build model finished for ',model)\n\t\tpred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes)\n\t\tprint ('Predict for ',model)\n\t\tpred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1)\n\t\tpred_Y_all = pred_Y_all.T\n\t\tif len(last_l2i)>0:\n\t\t\tnew_ct_ind = []\n\t\t\tfor i in range(len(last_i2l)):\n\t\t\t\tl = last_i2l[i]\n\t\t\t\tnew_ct_ind.append(OnClass.co2i[l])\n\t\t\tpred_Y_all = pred_Y_all[:, np.array(new_ct_ind)]\n\t\t\tpred_Y_all_models += pred_Y_all\n\t\telse:\n\t\t\tlast_l2i = OnClass.co2i\n\t\t\tlast_i2l = OnClass.i2co\n\t\t\tpred_Y_all_models = pred_Y_all\n\treturn pred_Y_all_models\n\n\ndef read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None):\n\tnp.random.seed(seed)\n\tx = read_h5ad(feature_file)\n\tncell = np.shape(x.X)[0]\n\tdataset = x.X.toarray()\n\tgenes = np.array([x.upper() for x in x.var.index])\n\n\tif tissue_key is not None:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())\n\telse:\n\t\ttissues = None\n\tif AnnData_label_key is None and label_file is None:\n\t\tprint ('no label file is provided')\n\t\tlabels = None\n\t\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\t\treturn dataset, genes, labels, tissues, x\n\tif AnnData_label_key is not None:\n\t\tlabels = x.obs[AnnData_label_key].tolist()\n\telse:\n\t\tfin = open(label_file)\n\t\tlabels = []\n\t\tfor line in fin:\n\t\t\tlabels.append(line.strip())\n\t\tfin.close()\n\tlabels = np.array(labels)\n\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\tind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file)\n\tif tissue_key is not None:\n\t\ttissues = tissues[ind]\n\tdataset = dataset[ind, :]\n\tx = x[ind, :]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\ttissues = tissues[new_ids]\n\t\tdataset = dataset[new_ids, :]\n\t\tlabels = labels[new_ids]\n\t\tx = x[new_ids, :]\n\n\tncell = np.shape(dataset)[0]\n\tindex = np.random.choice(ncell,ncell,replace=False)\n\tdataset = dataset[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif tissue_key is not None:\n\t\ttissues = tissues[index]\n\treturn dataset, genes, labels, tissues, x\n\n\n\n\ndef exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None):\n\tif cl_obo_file is None:\n\t\treturn lab2co\n\tco2name, name2co = get_ontology_name(obo_file = cl_obo_file)\n\tfor label in labels:\n\t\tif label.lower() in name2co:\n\t\t\tlab2co[label.lower()] = name2co[label.lower()]\n\tfor name in name2co:\n\t\tlab2co[name.lower()] = name2co[name]\n\treturn lab2co\n\n\ndef map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None):\n\tlab2co = {}\n\tif nlp_mapping:\n\t\tif co2emb is None:\n\t\t\tsys.exit('Please provide cell type embedding to do NLP-based mapping.')\n\t\tlab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff)\n\tlab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file)\n\tfor ct in ct_mapping_key:\n\t\tlab2co[ct_mapping_key[ct]] = lab2co[ct]\n\tind = []\n\tlab_id = []\n\tunfound_labs = set()\n\tfor i,l in enumerate(labels):\n\t\tif l in cell_ontology_ids:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(l)\n\t\telif l.lower() in lab2co:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(lab2co[l.lower()])\n\t\telse:\n\t\t\tunfound_labs.add(l)\n\tfrac = len(ind) * 1. / len(labels)\n\tind = np.array(ind)\n\tlabels = np.array(lab_id)\n\tunfound_labs = set(unfound_labs)\n\twarn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100)\n\tif frac < 0.5:\n\t\tprint (warn_message)\n\t\tprint ('Here are unfound labels:',unfound_labs)\n\treturn ind, labels, unfound_labs\n\ndef parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\t'''\n\tread h5ad file\n\tfeature: cell by gene expression\n\tlabel: cell ontology class\n\tgenes: gene names HGNC\n\t'''\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs['tissue'].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\t#corrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\n\ndef exclude_parent_child_nodes(cell_ontology_file,labels):\n\tuniq_labels = np.unique(labels)\n\texcludes = set()\n\tnet = collections.defaultdict(dict)\n\tfin = open(cell_ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tnet[s][p] = 1 #p is parent\n\tfin.close()\n\tfor n in list(net.keys()):\n\t\tngh = get_ontology_parents(net, n)\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\tfor l1 in uniq_labels:\n\t\tfor l2 in uniq_labels:\n\t\t\tif l1 in net[l2] and l1!=l2: #l1 is l2 parent\n\t\t\t\texcludes.add(l1)\n\t#print (excludes)\n\tnew_ids = []\n\tfor i in range(len(labels)):\n\t\tif labels[i] not in excludes:\n\t\t\tnew_ids.append(i)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, excludes\n\ndef corr2_coeff(A, B):\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:, None]\n B_mB = B - B.mean(1)[:, None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1)\n ssB = (B_mB**2).sum(1)\n\n # Finally get corr coeff\n return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None]))\n\ndef extract_data_based_on_class(feats, labels, sel_labels):\n\tind = []\n\tfor l in sel_labels:\n\t\tid = np.where(labels == l)[0]\n\t\tind.extend(id)\n\tnp.random.shuffle(ind)\n\tX = feats[ind,:]\n\tY = labels[ind]\n\treturn X, Y, ind\n\ndef SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False))\n\tfor c in cls2ct:\n\t\tif cls2ct[c] < nmin_size:\n\t\t\ttest_cls.append(c)\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\n'''\ndef SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\trare_cls = []\n\tnot_rare_cls = []\n\tfor c in cls2ct:\n\t\tif cls2ct[c] < 2:\n\t\t\tcontinue\n\t\telif cls2ct[c] < nmin_size:\n\t\t\trare_cls.append(c)\n\t\telse:\n\t\t\tnot_rare_cls.append(c)\n\tcls = np.concatenate((rare_cls, not_rare_cls))\n\tncls = len(cls)\n\trare_cls = np.array(rare_cls)\n\tnot_rare_cls = np.array(not_rare_cls)\n\ttrain_non_rare_cls = list(np.random.choice(not_rare_cls, int(len(not_rare_cls) * (1 - nfold_cls)), replace=False))\n\ttrain_cls = np.concatenate((train_non_rare_cls, rare_cls))\n\ttest_cls = [x for x in cls if x not in train_cls]\n\ttest_cls = np.array(test_cls)\n\tassert(len(test_cls) + len(train_cls) == ncls)\n\tassert(len(set(test_cls) & set(train_cls)) == 0)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n'''\n\ndef LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = [test_Y]\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\ndef renorm(X):\n\tY = X.copy()\n\tY = Y.astype(float)\n\tngene,nsample = Y.shape\n\ts = np.sum(Y, axis=0)\n\t#print s.shape()\n\tfor i in range(nsample):\n\t\tif s[i]==0:\n\t\t\ts[i] = 1\n\t\t\tif i < ngene:\n\t\t\t\tY[i,i] = 1\n\t\t\telse:\n\t\t\t\tfor j in range(ngene):\n\t\t\t\t\tY[j,i] = 1. / ngene\n\t\tY[:,i] = Y[:,i]/s[i]\n\treturn Y\n\ndef RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False):\n\tif use_torch:\n\t\tdevice = torch.device(\"cuda:0\")\n\tnnode = A.shape[0]\n\t#print nnode\n\tif reset is None:\n\t\treset = np.eye(nnode)\n\tnsample,nnode = reset.shape\n\t#print nsample,nnode\n\tP = renorm(A)\n\tP = P.T\n\tnorm_reset = renorm(reset.T)\n\tnorm_reset = norm_reset.T\n\tif use_torch:\n\t\tnorm_reset = torch.from_numpy(norm_reset).float().to(device)\n\t\tP = torch.from_numpy(P).float().to(device)\n\tQ = norm_reset\n\n\tfor i in range(1,max_iter):\n\t\t#Q = gnp.garray(Q)\n\t\t#P = gnp.garray(P)\n\t\tif use_torch:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array()\n\t\t\tdelta = torch.norm(Q-Q_new, 2)\n\t\telse:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array()\n\t\t\tdelta = np.linalg.norm(Q-Q_new, 'fro')\n\t\tQ = Q_new\n\t\t#print (i,Q)\n\t\tsys.stdout.flush()\n\t\tif delta < 1e-4:\n\t\t\tbreak\n\tif use_torch and not return_torch:\n\t\tQ = Q.cpu().numpy()\n\treturn Q\n\ndef DCA_vector(Q, dim):\n\tnnode = Q.shape[0]\n\talpha = 1. / (nnode **2)\n\tQ = np.log(Q + alpha) - np.log(alpha);\n\n\t#Q = Q * Q';\n\t[U, S, V] = svds(Q, dim);\n\tS = np.diag(S)\n\tX = np.dot(U, np.sqrt(S))\n\tY = np.dot(np.sqrt(S), V)\n\tY = np.transpose(Y)\n\treturn X,U,S,V,Y\n\ndef read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = len(l2i)\n\tnet = np.zeros((ncls, ncls))\n\tbin_net = np.zeros((ncls, ncls))\n\tfin = open(ontology_nlp_file)\n\tfor line in fin:\n\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\twt = float(wt)\n\t\tnet[l2i[s], l2i[p]] = np.exp(wt)\n\t\tnet[l2i[p], l2i[s]] = np.exp(wt)\n\t\tbin_net[l2i[s], l2i[p]] = 1\n\t\tbin_net[l2i[p], l2i[s]] = 1\n\tfin.close()\n\n\tl2vec = {}\n\tfin = open(ontology_nlp_emb_file)\n\tfor line in fin:\n\t\tw = line.upper().strip().split('\\t')\n\t\tl2vec[w[0]] = []\n\t\tdim = len(w)-1\n\t\tfor i in range(1,len(w)):\n\t\t\tl2vec[w[0]].append(float(w[i]))\n\tfin.close()\n\n\tl2vec_mat = np.zeros((ncls, dim))\n\tfor l in l2vec:\n\t\tif l.upper() not in l2i:\n\t\t\tcontinue\n\t\tl2vec_mat[l2i[l.upper()],:] = l2vec[l]\n\n\t'''\n\tnet_sum = np.sum(net,axis=0)\n\tfor i in range(ncls):\n\t\tif net_sum[i] == 0:\n\t\t\tnet[i,i] = 1.\n\t\tnet[:,i] /= np.sum(net[:,i])\n\t#net = net / net.sum(axis=1)[:, np.newaxis]\n\t'''\n\treturn net, bin_net, l2vec_mat\n\n\ndef GetReverseNet(onto_net):\n\tonto_net_rev = collections.defaultdict(dict)\n\tfor a in onto_net:\n\t\tfor b in onto_net[a]:\n\t\t\tonto_net_rev[b][a] = 1\n\treturn onto_net_rev\n\n\ndef ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):#\n\tunseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen)\n\tY_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\tif add_emb_diagonal:\n\t\tY_emb = np.column_stack((np.eye(len(i2l)), Y_emb))\n\treturn unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat\n\n\n\ndef graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None):\n\tnl = np.shape(A)[0]\n\tif use_seen_only:\n\t\tseen_ind = []\n\t\tunseen_ind = []\n\t\tfor i in range(nl):\n\t\t\tif i2l[i] in unseen_l:\n\t\t\t\tunseen_ind.append(i)\n\t\t\telse:\n\t\t\t\tseen_ind.append(i)\n\t\tseen_ind = np.array(seen_ind)\n\t\tunseen_ind = np.array(unseen_ind)\n\n\t#if len(seen_ind) * 0.8 < dim:\n\t#\tdim = int(len(seen_ind) * 0.8)\n\tif mi==0 or mi == 1:\n\t\tsp = graph_shortest_path(A,method='FW',directed =False)\n\telse:\n\t\tsp = RandomWalkRestart(A, 0.8)\n\tif use_seen_only:\n\t\tsp = sp[seen_ind, :]\n\t\tsp = sp[:,seen_ind]\n\tX = np.zeros((np.shape(sp)[0],dim))\n\tsvd_dim = min(dim, np.shape(sp)[0]-1)\n\tif mi==0 or mi == 2:\n\t\tX[:,:svd_dim] = svd_emb(sp, dim=svd_dim)\n\telse:\n\t\tX[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0]\n\tif use_seen_only:\n\t\tX_ret = np.zeros((nl, dim))\n\t\tX_ret[seen_ind,:] = X\n\telse:\n\t\tX_ret = X\n\tif mi==2 or mi == 3:\n\t\tsp *= -1\n\treturn sp, X_ret\n\ndef cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tif use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'):\n\t\tcl_nlp = collections.defaultdict(dict)\n\t\tif ontology_nlp_file is not None:\n\t\t\tfin = open(ontology_nlp_file)\n\t\t\tfor line in fin:\n\t\t\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\t\t\tcl_nlp[s][p] = float(wt)\n\t\t\t\tcl_nlp[p][s] = float(wt)\n\t\t\tfin.close()\n\n\t\tfin = open(ontology_file)\n\t\tlset = set()\n\t\ts2p = {}\n\t\tfor line in fin:\n\t\t\tw = line.strip().split('\\t')\n\t\t\ts = w[0]\n\t\t\tp = w[1]\n\t\t\tif len(w)==2:\n\t\t\t\tif p in cl_nlp and s in cl_nlp[p]:\n\t\t\t\t\twt = cl_nlp[p][s]\n\t\t\t\telse:\n\t\t\t\t\twt = 1.\n\t\t\telse:\n\t\t\t\twt = float(w[2])\n\t\t\tif s not in s2p:\n\t\t\t\ts2p[s] = {}\n\t\t\ts2p[s][p] = wt\n\t\t\tlset.add(s)\n\t\t\tlset.add(p)\n\t\tfin.close()\n\t\tlset = np.sort(list(lset))\n\t\tnl = len(lset)\n\t\tl2i = dict(zip(lset, range(nl)))\n\t\ti2l = dict(zip(range(nl), lset))\n\t\tA = np.zeros((nl, nl))\n\t\tfor s in s2p:\n\t\t\tfor p in s2p[s]:\n\t\t\t\tA[l2i[s], l2i[p]] = s2p[s][p]\n\t\t\t\tA[l2i[p], l2i[s]] = s2p[s][p]\n\t\tsp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l)\n\t\tif use_pretrain is not None:\n\t\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\t\tX_file = use_pretrain+'X.npy'\n\t\t\tsp_file = use_pretrain+'sp.npy'\n\t\t\tnp.save(X_file, X)\n\t\t\tnp.save(i2l_file, i2l)\n\t\t\tnp.save(l2i_file, l2i)\n\t\t\tnp.save(sp_file, sp)\n\telse:\n\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\tX_file = use_pretrain+'X.npy'\n\t\tsp_file = use_pretrain+'sp.npy'\n\t\tX = np.load(X_file)\n\t\ti2l = np.load(i2l_file,allow_pickle=True).item()\n\t\tl2i = np.load(l2i_file,allow_pickle=True).item()\n\t\tsp = np.load(sp_file,allow_pickle=True)\n\treturn X, l2i, i2l, sp\n\ndef merge_26_datasets(datanames_26datasets, scan_dim = 50):\n\tdatasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True)\n\tdatasets, genes = merge_datasets(datasets, genes_list)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150)\n\tdatasets_dimred = sparse.vstack(expr_datasets).toarray()\n\treturn datasets_dimred, genes\n\ndef emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tX, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\n\ti2emb = np.zeros((len(i2l),dim))\n\tnl = len(i2l)\n\tfor i in range(nl):\n\t\tant = i2l[i]\n\t\tif ant not in ont_l2i:\n\t\t\tprint (ant, ont_l2i)\n\t\t\tassert('xxx' in ant.lower() or 'nan' in ant.lower())\n\t\t\tcontinue\n\t\ti2emb[i,:] = X[ont_l2i[ant],:]\n\t'''\n\tAA = np.zeros((nl, nl))\n\tfor i in range(nl):\n\t\tfor j in range(nl):\n\t\t\tanti, antj = i2l[i], i2l[j]\n\t\t\tif anti in ont_l2i and antj in ont_l2i:\n\t\t\t\tAA[i,j] = A[ont_l2i[anti],ont_l2i[antj]]\n\t'''\n\treturn i2emb\n'''\ndef get_ontology_parents(GO_net, g):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\tngh_GO.remove(GO)\n\t\t\tterm_valid.add(GO)\n\treturn term_valid\n'''\n\ndef get_ontology_parents(GO_net, g, dfs_depth=100):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\tdepth = {}\n\tdepth[g] = 0\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\t\tdepth[GO1] = depth[GO] + 1\n\t\t\tngh_GO.remove(GO)\n\t\t\tif depth[GO] < dfs_depth:\n\t\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\ndef create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000):\n\n\tfin = open(ontology_file)\n\tlset = set()\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tlset.add(s)\n\t\tlset.add(p)\n\tfin.close()\n\n\tseen_l = sorted(np.unique(train_Y))\n\tunseen_l = sorted(lset - set(train_Y))\n\tys = np.concatenate((seen_l, unseen_l))\n\n\ti2l = {}\n\tl2i = {}\n\tfor l in ys:\n\t\tnl = len(i2l)\n\t\tcol = l\n\t\tif combine_unseen and l in unseen_l:\n\t\t\tnl = len(seen_l)\n\t\t\tl2i[col] = nl\n\t\t\ti2l[nl] = col\n\t\t\tcontinue\n\t\tl2i[col] = nl\n\t\ti2l[nl] = col\n\ttrain_Y = [l2i[y] for y in train_Y]\n\ttrain_X2Y = ConvertLabels(train_Y, ncls = len(i2l))\n\tonto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth)\n\treturn unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat\n\ndef query_depth_ontology(net, node, root='cl:0000000'):\n\tdepth = 0\n\twhile node != root:\n\t\tif len(net[node]) == 0:\n\t\t\tprint (node)\n\t\tnode = sorted(list(net[node].keys()))[0]\n\t\tdepth += 1\n\t\tif depth>100:\n\t\t\tsys.error('root not found')\n\treturn depth\n\n\ndef read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000):\n\tnl = len(l2i)\n\tnet = collections.defaultdict(dict)\n\tnet_mat = np.zeros((nl,nl))\n\tfin = open(ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tsi = l2i[s]\n\t\tpi = l2i[p]\n\t\tnet[si][pi] = 1\n\t\tnet_mat[si][pi] = 1\n\tfin.close()\n\tfor n in range(nl):\n\t\tngh = get_ontology_parents(net, n, dfs_depth = dfs_depth)\n\t\tnet[n][n] = 1\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\treturn net, net_mat\n\ndef extract_label_propagate_tree(onto_net, ncls):\n\ttree = np.zeros((ncls,ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\ttree[n1,n2] = 1\n\treturn tree\n\ndef ConvertLabels(labels, ncls=-1):\n\tncell = np.shape(labels)[0]\n\tif len(np.shape(labels)) ==1 :\n\t\t#bin to mat\n\t\tif ncls == -1:\n\t\t\tncls = np.max(labels)\n\t\tmat = np.zeros((ncell, ncls))\n\t\tfor i in range(ncell):\n\t\t\tmat[i, labels[i]] = 1\n\t\treturn mat\n\telse:\n\t\tif ncls == -1:\n\t\t\tncls = np.shape(labels)[1]\n\t\tvec = np.zeros(ncell)\n\t\tfor i in range(ncell):\n\t\t\tind = np.where(labels[i,:]!=0)[0]\n\t\t\tassert(len(ind)<=1) # not multlabel classification\n\t\t\tif len(ind)==0:\n\t\t\t\tvec[i] = -1\n\t\t\telse:\n\t\t\t\tvec[i] = ind[0]\n\t\treturn vec\n\ndef MapLabel2CL(test_Y, l2i):\n\ttest_Y_new = np.array([l2i[y] for y in test_Y])\n\treturn test_Y_new\n\ndef get_ontology_name(obo_file, lower=True):\n\tfin = open(obo_file)\n\tco2name = {}\n\tname2co = {}\n\ttag_is_syn = {}\n\tfor line in fin:\n\t\tif line.startswith('id: '):\n\t\t\tco = line.strip().split('id: ')[1]\n\t\tif line.startswith('name: '):\n\t\t\tif lower:\n\t\t\t\tname = line.strip().lower().split('name: ')[1]\n\t\t\telse:\n\t\t\t\tname = line.strip().split('name: ')[1]\n\t\t\tco2name[co] = name\n\t\t\tname2co[name] = co\n\t\tif line.startswith('synonym: '):\n\t\t\tif lower:\n\t\t\t\tsyn = line.strip().lower().split('synonym: \"')[1].split('\" ')[0]\n\t\t\telse:\n\t\t\t\tsyn = line.strip().split('synonym: \"')[1].split('\" ')[0]\n\t\t\tif syn in name2co:\n\t\t\t\tcontinue\n\t\t\tname2co[syn] = co\n\tfin.close()\n\treturn co2name, name2co\n\ndef knn_ngh(Y2Y):\n\tind = np.argsort(Y2Y*-1, axis=1)\n\treturn ind\n\ndef extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200):\n\tsys.exit(-1)#NOT USED\n\tncls = np.shape(onto_net_rwr)[0]\n\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1))\n\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\treturn pred_Y_all\n\ndef create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = np.shape(cls2cls)[0]\n\t_, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file)\n\tonto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_nlp = np.zeros((ncls, ncls))\n\tonto_net_bin = np.zeros((ncls, ncls))\n\tstack_net_bin = np.zeros((ncls, ncls))\n\tstack_net_nlp = np.zeros((ncls, ncls))\n\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tstack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\tstack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\tstack_net_bin[n2,n1] = 1\n\tfor n1 in range(ncls):\n\t\tfor n2 in range(ncls):\n\t\t\tif cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1:\n\t\t\t\tonto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\t\tonto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\t\tonto_net_bin[n1,n2] = 1\n\t\t\t\tonto_net_bin[n2,n1] = 1\n\treturn onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs\n\n\ndef create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]):\n\tcls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)\n\tncls = np.shape(onto_net_mat)[0]\n\tnetworks = []\n\tfor rst in rsts:\n\t\tfor dis in diss:\n\t\t\tfor thres in thress:\n\t\t\t\tuse_net = np.copy(onto_net_mat)\n\t\t\t\tuse_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]\n\t\t\t\tonto_net_rwr = RandomWalkRestart(use_net, rst)\n\t\t\t\tnetworks.append(onto_net_rwr)\n\treturn networks\n\ndef extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False):\n\tif not isinstance(networks, list):\n\t\tnetworks = [networks]\n\tpred_Y_all_totoal = 0.\n\tfor onto_net_rwr in networks:\n\t\tif use_normalize:\n\t\t\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1))\n\t\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\t\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\t\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\t\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\t\tpred_Y_all_totoal += pred_Y_all\n\treturn pred_Y_all_totoal\n\ndef my_auprc(y_true, y_pred):\n\tprecision, recall, thresholds = precision_recall_curve(y_true, y_pred)\n\tarea = auc(recall, precision)\n\treturn area\n\ndef sampled_auprc(truths,preds):\n\tpos = np.where(truths == 1)[0]\n\tneg = np.where(truths == 0)[0]\n\tassert(len(pos) + len(neg) == len(truths))\n\tnneg = len(neg)\n\tnpos = len(pos)\n\tselect_neg = np.random.choice(nneg, npos*3, replace = True)\n\tselect_ind = np.concatenate((pos, select_neg))\n\treturn average_precision_score(truths[select_ind], preds[select_ind])\n\ndef evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']):\n\t#preprocess scores\n\tunseen_l = np.array(list(unseen_l))\n\tncell,nclass = np.shape(Y_pred_mat)\n\tnseen = nclass - len(unseen_l)\n\tif Y_ind is not None:\n\t\tnon_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind)))\n\t\tif len(non_Y_ind)>0:\n\t\t\tY_pred_mat[:,non_Y_ind] = -1 * np.inf\n\tif Y_pred_vec is None:\n\t\tY_pred_vec = np.argmax(Y_pred_mat, axis=1)\n\tif Y_truth_bin_mat is None:\n\t\tY_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass)\n\n\tY_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass)\n\t#class-based metrics\n\tclass_auc_macro = np.full(nclass, np.nan)\n\tclass_auprc_macro = np.full(nclass, np.nan)\n\tclass_f1 = np.full(nclass, np.nan)\n\tfor i in range(nclass):\n\t\tif len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10:\n\t\t\tclass_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i])\n\n\n\t#sample-based metrics\n\textend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l)\n\tkappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec)\n\textend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec)\n\taccuracy = accuracy_score(Y_truth_vec, Y_pred_vec)\n\tprec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3)\n\tprec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5)\n\n\t#print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)])\n\tseen_auc_macro = np.nanmean(class_auc_macro[:nseen])\n\tseen_auprc_macro = np.nanmean(class_auprc_macro[:nseen])\n\tseen_f1 = np.nanmean(class_f1[:nseen])\n\tif len(unseen_l) == 0:\n\t\tunseen_auc_macro = 0\n\t\tunseen_auprc_macro = 0\n\t\tunseen_f1 = 0\n\telse:\n\t\tunseen_auc_macro = np.nanmean(class_auc_macro[unseen_l])\n\t\t#unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])#\n\t\tunseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l])\n\t\tunseen_f1 = np.nanmean(class_f1[unseen_l])\n\n\t#metrics = ['AUROC','AUPRC','unseen_AUROC', 'unseen_AUPRC','Cohens Kappa','Accuracy@3','Accuracy@5']\n\t#res_v = [seen_auc_macro, seen_auprc_macro, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), extend_kappa, prec_at_k_3, prec_at_k_5, unseen_auc_macro, unseen_auprc_macro]\n\tall_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5}\n\tres_v = {}\n\tfor metric in metrics:\n\t\tres_v[metric] = all_v[metric]\n\t#res_v = [seen_auc_macro, seen_auprc_macro, seen_f1, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), np.nanmean(class_f1), unseen_auc_macro, unseen_auprc_macro, unseen_f1]\n\tif write_screen:\n\t\tprint (prefix, end='\\t')\n\t\tfor v in metrics:\n\t\t\tprint ('%.4f'%res_v[v], end='\\t')\n\t\tprint ('')\n\t\tsys.stdout.flush()\n\tif write_to_file is not None:\n\t\twrite_to_file.write(prefix+'\\t')\n\t\tfor v in metrics:\n\t\t\twrite_to_file.write('%.2f\\t'%res_v[v])\n\t\twrite_to_file.write('\\n')\n\t\twrite_to_file.flush()\n\treturn res_v\n\ndef precision_at_k(pred,truth,k):\n\tncell, nclass = np.shape(pred)\n\thit = 0.\n\tfor i in range(ncell):\n\t\tx = np.argsort(pred[i,:]*-1)\n\t\trank = np.where(x==truth[i])[0][0]\n\t\tif rank < k:\n\t\t\thit += 1.\n\tprec = hit / ncell\n\treturn prec\n\ndef write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name):\n\tif len(np.shape(test_label))==2:\n\t\ttest_label = np.argmax(test_label, axis = 1)\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tx = test_AnnData\n\tncell = np.shape(x.X)[0]\n\tprint (ncell, len(test_label))\n\tassert(ncell == len(test_label))\n\ttest_name = []\n\ttest_label_id = []\n\tfor i in range(ncell):\n\t\txx = i2tp[test_label[i]]\n\t\ttest_label_id.append(xx)\n\t\ttest_name.append(co2name[xx])\n\ttest_name = np.array(test_name)\n\ttest_label_id = np.array(test_label_id)\n\tx.obs['OnClass_annotation_ontology_ID'] = test_label\n\tx.obs['OnClass_annotation_ontology_name'] = test_name\n\treturn x\n\n\ndef read_type2genes(g2i, marker_gene,cl_obo_file):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\n\tc2cnew = {}\n\tc2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower()\n\tc2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower()\n\n\n\tc2cnew['mature NK T cell'] = 'mature NK T cell'.lower()\n\tc2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower()\n\tfin = open(marker_gene)\n\tfin.readline()\n\ttp2genes = {}\n\tunfound = set()\n\tfor line in fin:\n\t\tw = line.strip().split('\\t')\n\t\tc1 = w[1].lower()\n\t\tc2 = w[2].lower()\n\t\tgenes = []\n\t\tfor ww in w[8:]:\n\t\t\tif ww.upper() in g2i:\n\t\t\t\tgenes.append(ww.upper())\n\t\tif len(genes)==0:\n\t\t\tcontinue\n\t\tif c1.endswith('s') and c1[:-1] in name2co:\n\t\t\tc1 = c1[:-1]\n\t\tif c2.endswith('s') and c2[:-1] in name2co:\n\t\t\tc2 = c2[:-1]\n\t\tif c1 + ' cell' in name2co:\n\t\t\tc1 +=' cell'\n\t\tif c2 + ' cell' in name2co:\n\t\t\tc2 +=' cell'\n\t\tif c1 in c2cnew:\n\t\t\tc1 = c2cnew[c1]\n\t\tif c2 in c2cnew:\n\t\t\tc2 = c2cnew[c2]\n\t\tif c1 in name2co:\n\t\t\ttp2genes[name2co[c1]] = genes\n\t\telse:\n\t\t\tunfound.add(c1)\n\t\tif c2 in name2co:\n\t\t\ttp2genes[name2co[c2]] = genes\n\t\telse:\n\t\t\tunfound.add(c2)\n\tfin.close()\n\n\treturn tp2genes\n\n\n\n\ndef extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l):\n\tunseen_l = set(unseen_l)\n\tn = len(test_Y)\n\tacc = 0.\n\tntmp = 0.\n\tnew_pred = []\n\tfor i in range(n):\n\t\tif test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l:\n\t\t\tif test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1:\n\t\t\t\tacc += 1\n\t\t\t\tntmp += 1\n\t\t\t\tnew_pred.append(test_Y[i])\n\t\t\telse:\n\t\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\t\telse:\n\t\t\tif test_Y[i] == test_Y_pred_vec[i]:\n\t\t\t\tacc += 1\n\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\tnew_pred = np.array(new_pred)\n\treturn acc/n, new_pred\n\n\ndef run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100):\n\tsparse_datasets = []\n\tfor dataset in datasets:\n\t\tsparse_datasets.append(sparse.csr_matrix(dataset))\n\tdatasets, genes = merge_datasets(sparse_datasets, genes)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150)\n\tdataset_correct = []\n\tfor sp in sparse_dataset_correct:\n\t\tdataset_correct.append(np.power(sp.todense(), 2))\n\treturn datasets_dimred, dataset_correct\n\n\ndef run_scanorama_same_genes(features, batch_labels, scan_dim = 100):\n\tbatchs = np.unique(batch_labels)\n\tnbatch = len(batchs)\n\tif nbatch == 1:\n\t\treturn features\n\tncell, ngene = np.shape(features)\n\tassert(ncell == len(batch_labels))\n\tgenes = []\n\tdatasets = []\n\tindexs = []\n\tfor i in range(nbatch):\n\t\tgenes.append(np.array(range(ngene)))\n\t\tindex = np.where(batch_labels == batchs[i])[0]\n\t\tdataset = features[index,:]\n\t\tprint (batchs[i], np.shape(dataset))\n\t\tdatasets.append(dataset)\n\t\tindexs.append(index)\n\t_, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim)\n\tassert(len(dataset_correct)) == nbatch\n\tfor i in range(nbatch):\n\t\tfeatures[indexs[i],:] = dataset_correct[i]\n\treturn features\n\n\ndef my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,\n\t\t\t sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,\n\t\t\t ds_names=None, batch_size=None,\n\t\t\t geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors)\n\t#this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function.\n\tif len(datasets) == 1:\n\t\treturn datasets\n\n\tif alignments is None and matches is None:\n\t\talignments, matches = find_alignments(\n\t\t\tdatasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,\n\t\t)\n\n\tds_assembled = {}\n\tpanoramas = []\n\tct = 0\n\tfor i, j in alignments:\n\t\tct += 1\n\t\tprint (ct)\n\t\tsys.stdout.flush()\n\t\tif verbose:\n\t\t\tif ds_names is None:\n\t\t\t\tprint('Processing datasets {}'.format((i, j)))\n\t\t\telse:\n\t\t\t\tprint('Processing datasets {} <=> {}'.\n\t\t\t\t\t format(ds_names[i], ds_names[j]))\n\n\t\t# Only consider a dataset a fixed amount of times.\n\t\tif not i in ds_assembled:\n\t\t\tds_assembled[i] = 0\n\t\tds_assembled[i] += 1\n\t\tif not j in ds_assembled:\n\t\t\tds_assembled[j] = 0\n\t\tds_assembled[j] += 1\n\t\tif ds_assembled[i] > 3 and ds_assembled[j] > 3:\n\t\t\tcontinue\n\n\t\t# See if datasets are involved in any current panoramas.\n\t\tpanoramas_i = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif i in panoramas[p] ]\n\t\tassert(len(panoramas_i) <= 1)\n\t\tpanoramas_j = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif j in panoramas[p] ]\n\t\tassert(len(panoramas_j) <= 1)\n\n\t\tif len(panoramas_i) == 0 and len(panoramas_j) == 0:\n\t\t\tif datasets[i].shape[0] < datasets[j].shape[0]:\n\t\t\t\ti, j = j, i\n\t\t\tpanoramas.append([ i ])\n\t\t\tpanoramas_i = [ panoramas[-1] ]\n\n\t\t# Map dataset i to panorama j.\n\t\tif len(panoramas_i) == 0:\n\t\t\tcurr_ds = datasets[i]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(i, p)] ])\n\t\t\t\telif i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[i] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[i]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[i] = curr_ds + bias\n\n\t\t\tpanoramas_j[0].append(i)\n\n\t\t# Map dataset j to panorama i.\n\t\telif len(panoramas_j) == 0:\n\t\t\tcurr_ds = datasets[j]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(j, p)] ])\n\t\t\t\telif j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[j] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[j]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_i[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t\t cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[j] = curr_ds + bias\n\n\t\t\tpanoramas_i[0].append(j)\n\n\t\t# Merge two panoramas together.\n\t\telse:\n\t\t\tcurr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\t# Find base indices into each panorama.\n\t\t\tbase_i = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i: break\n\t\t\t\tbase_i += datasets[p].shape[0]\n\t\t\tbase_j = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j: break\n\t\t\t\tbase_j += datasets[p].shape[0]\n\n\t\t\t# Find matching indices.\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i and j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (b + base, a + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(j, p)] ])\n\t\t\t\telif p == i and j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (a + base, b + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j and i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a + base_i, b + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(i, p)] ])\n\t\t\t\telif p == j and i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b + base_i, a + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\t# Apply transformation to entire panorama.\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tcurr_ds += bias\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tn_cells = datasets[p].shape[0]\n\t\t\t\tdatasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\tbase += n_cells\n\n\t\t\tif not expr_datasets is None:\n\t\t\t\tcurr_ds = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t for p in panoramas_i[0] ])\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\tcurr_ds += bias\n\t\t\t\tbase = 0\n\t\t\t\tfor p in panoramas_i[0]:\n\t\t\t\t\tn_cells = expr_datasets[p].shape[0]\n\t\t\t\t\texpr_datasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\t\tbase += n_cells\n\n\t\t\t# Merge panoramas i and j and delete one.\n\t\t\tif panoramas_i[0] != panoramas_j[0]:\n\t\t\t\tpanoramas_i[0] += panoramas_j[0]\n\t\t\t\tpanoramas.remove(panoramas_j[0])\n\n\t\t# Visualize.\n\t\tif view_match:\n\t\t\tplot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)\n\n\treturn datasets, expr_datasets\n", "import sys\nfrom scipy import stats, sparse\nimport numpy as np\nimport os\nfrom collections import Counter, defaultdict\nfrom OnClass.OnClassModel import OnClassModel\nfrom sklearn.metrics import roc_auc_score\nfrom utils import read_ontology_file, ConvertLabels, MapLabel2CL, read_data, read_data_file, read_data, parse_pkl, SplitTrainTest, read_type2genes, corr2_coeff\nfrom config import ontology_data_dir, scrna_data_dir, result_dir, intermediate_dir\n\ndnames = ['muris_facs','muris_droplet','microcebusBernard','microcebusStumpy','microcebusMartine','microcebusAntoine']\n\nmarker_gene_file = scrna_data_dir + 'gene_marker_expert_curated.txt'\nhas_truth_our_auc_mat = defaultdict(dict)\nhas_truth_base_auc_mat = defaultdict(dict)\nno_truth_our_auc_mat = defaultdict(dict)\nfor dnamei, dname1 in enumerate(dnames):\n\tcor_file = intermediate_dir + '/Marker_genes/' +dname1+ 'cor.released.npy'\n\tcell_type_nlp_emb_file, cell_type_network_file, cl_obo_file = read_ontology_file(dname1, ontology_data_dir)\n\tOnClass_train_obj = OnClassModel(cell_type_nlp_emb_file = cell_type_nlp_emb_file, cell_type_network_file = cell_type_network_file)\n\tfeature_file, filter_key, label_key, label_file, gene_file = read_data_file(dname1, scrna_data_dir)\n\tif feature_file.endswith('.pkl'):\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, exclude_non_leaf_ontology = True, cell_ontology_file = cell_type_network_file)\n\telif feature_file.endswith('.h5ad'):\n\t\tfeature, genes, label, _, _ = read_data(feature_file, cell_ontology_ids = OnClass_train_obj.cell_ontology_ids,\n\t\texclude_non_leaf_ontology = True, tissue_key = 'tissue', filter_key = filter_key, AnnData_label_key=label_key,\n\t\tnlp_mapping = False, cl_obo_file = cl_obo_file, cell_ontology_file = cell_type_network_file, co2emb = OnClass_train_obj.co2vec_nlp)\n\n\tOnClass_train_obj.EmbedCellTypes(label)\n\tg2i1 = {g : i for i,g in enumerate(genes)}\n\ti2g1 = {i : g for i,g in enumerate(genes)}\n\ttp2genes_base = read_type2genes(g2i1, marker_gene_file, cl_obo_file)\n\tcor = np.load(cor_file)\n\texist_y = np.unique(label)\n\n\tfor dnamej, dname2 in enumerate(dnames):\n\t\tif dname1 == dname2:\n\t\t\tcontinue\n\t\tfeature_file, filter_key, label_key, label_file, gene_file = read_data_file(dname2, scrna_data_dir)\n\t\tif feature_file.endswith('.pkl'):\n\t\t\tfeature2, label2, genes2 = parse_pkl(feature_file, label_file, gene_file, exclude_non_leaf_ontology = True, cell_ontology_file = cell_type_network_file)\n\t\telif feature_file.endswith('.h5ad'):\n\t\t\tfeature2, genes2, label2, _, _ = read_data(feature_file, cell_ontology_ids = OnClass_train_obj.cell_ontology_ids,\n\t\t\texclude_non_leaf_ontology = True, tissue_key = 'tissue', filter_key = filter_key, AnnData_label_key=label_key,\n\t\t\tnlp_mapping = False, cl_obo_file = cl_obo_file, cell_ontology_file = cell_type_network_file, co2emb = OnClass_train_obj.co2vec_nlp)\n\t\tOnClass_test_obj = OnClassModel(cell_type_nlp_emb_file = cell_type_nlp_emb_file, cell_type_network_file = cell_type_network_file)\n\t\tOnClass_test_obj.EmbedCellTypes(label2)\n\t\tnseen = OnClass_test_obj.nseen\n\t\tg2i2 = {g : i for i,g in enumerate(genes2)}\n\t\tcommon_genes = list(set(genes) & set(genes2))\n\n\t\thas_truth_our_auc = []\n\t\thas_truth_tp_base_auc = []\n\t\tno_truth_our_auc = []\n\t\ttopk = 50\n\t\tthres = 0.4\n\t\tsub_cor = np.where(np.abs(cor)>thres, 1, 0)\n\t\tnmarkers = []\n\t\tlabel_mat2 = ConvertLabels(MapLabel2CL(label2, OnClass_test_obj.co2i), len(OnClass_test_obj.i2co))\n\n\t\tfor i in range(nseen):\n\t\t\ttp = OnClass_test_obj.i2co[i]\n\t\t\tour_marker = np.where(sub_cor[OnClass_train_obj.co2i[tp],:]==1)[0]#np.argsort(cor[l2i1[tp],:]*-1)[:topk]\n\t\t\tour_marker_id = [g2i2[i2g1[gi]] for gi in our_marker if i2g1[gi] in g2i2]\n\t\t\tnmarkers.append(len(our_marker_id))\n\t\t\tif len(our_marker_id) ==0:\n\t\t\t\tour_marker = np.argsort(cor[OnClass_train_obj.co2i[tp],:]*-1)[:topk]\n\t\t\t\tour_marker_id = [g2i2[i2g1[gi]] for gi in our_marker if i2g1[gi] in g2i2]\n\t\t\tY_true = label_mat2[:,i]\n\t\t\tY_pred_our = np.mean(feature2[:, our_marker_id], axis=1)\n\t\t\tour_auc = roc_auc_score(Y_true, Y_pred_our)\n\t\t\tif tp in tp2genes_base and len([g2i2[g] for g in tp2genes_base[tp] if g in g2i2])!=0:\n\t\t\t\tbase_marker = tp2genes_base[tp]\n\t\t\t\tbase_marker_id = [g2i2[g] for g in base_marker if g in g2i2]\n\t\t\t\tif len(base_marker_id) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tY_pred_base = np.mean(feature2[:, base_marker_id], axis=1)\n\t\t\t\tbase_auc = roc_auc_score(Y_true, Y_pred_base)#roc_auc_score(Y_true, Y_pred_base)\n\t\t\t\thas_truth_our_auc.append(our_auc)\n\t\t\t\thas_truth_tp_base_auc.append(base_auc)\n\t\t\telse:\n\t\t\t\tno_truth_our_auc.append(our_auc)\n\t\tpv = stats.ttest_rel(has_truth_our_auc, has_truth_tp_base_auc)[1] / 2.\n\t\tprint ('%f %s %s %f seen(our,base,length,pv):%f %f %d %d %e' % (thres, dname1,dname2,np.mean(no_truth_our_auc),\n\t\tnp.mean(has_truth_our_auc), np.mean(has_truth_tp_base_auc), len(has_truth_tp_base_auc), np.median(nmarkers), pv))\n\t\thas_truth_our_auc_mat[dname1][dname2] = has_truth_our_auc\n\t\thas_truth_base_auc_mat[dname1][dname2] = has_truth_tp_base_auc\n\t\tno_truth_our_auc_mat[dname1][dname2] = no_truth_our_auc\n\n'''\nndname = len(dnames)\nheat_mat = np.zeros((ndname,ndname))\ngroup_l = []\nfor i,dname1 in enumerate(dnames):\n\tgroup_l.append(dname2keyword[dname1])\n\tfor j,dname2 in enumerate(dnames):\n\t\tif dname1 == dname2:\n\t\t\tcontinue\n\t\theat_mat[i,j] = np.mean(no_truth_our_auc_mat[dname1][dname2])\nplot_heatmap_cross_dataset(heat_mat, group_l, file_name = fig_dir + '.pdf', ylabel = 'AUROC', title='AUROC')\n'''\n" ]
[ [ "numpy.diag", "numpy.dot", "sklearn.metrics.roc_auc_score", "numpy.sqrt", "numpy.nan_to_num", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.nanmean", "scipy.sparse.vstack", "sklearn.metrics.f1_score", "numpy.exp", "numpy.where", "numpy.unique", "sklearn.utils.graph_shortest_path.graph_shortest_path", "numpy.eye", "sklearn.metrics.cohen_kappa_score", "sklearn.metrics.precision_recall_curve", "numpy.full", "numpy.save", "numpy.std", "numpy.argmax", "numpy.copy", "numpy.log1p", "numpy.load", "numpy.zeros", "numpy.log", "numpy.random.choice", "numpy.isnan", "sklearn.metrics.pairwise.cosine_similarity", "sklearn.model_selection.train_test_split", "scipy.sparse.csr_matrix", "scipy.sparse.linalg.svds", "numpy.transpose", "numpy.argsort", "sklearn.metrics.auc", "numpy.array", "numpy.sum", "numpy.log2", "numpy.random.seed", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.percentile", "sklearn.preprocessing.normalize", "numpy.shape", "sklearn.metrics.average_precision_score", "numpy.vstack", "sklearn.metrics.accuracy_score" ], [ "scipy.stats.ttest_rel", "sklearn.metrics.roc_auc_score", "numpy.abs", "numpy.unique", "numpy.median", "numpy.mean", "numpy.argsort", "numpy.load", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
HongLabTHU/dual-mVEPs
[ "f387584865a45a7257d8203fcb9522820e1311de" ]
[ "Offline/dataset.py" ]
[ "import glob\nimport os\nimport warnings\nfrom datetime import datetime\nfrom copy import deepcopy\n\nimport numpy as np\nimport pyedflib\nimport scipy.io as sio\n\nfrom config import cfg\nfrom thirdparty.cerebus import NsxFile, NevFile\nfrom thirdparty.nex import Reader as NexReader\nfrom .utils import find_nearest_time\n\n\ndef _load_neuracle(data_dir):\n \"\"\"\n neuracle file loader\n :param data_dir: root data dir for the experiment\n :return:\n data: ndarray, (channels, timesteps)\n ch_name: list, name of channels\n timestamp: list, index of trigger\n \"\"\"\n f = {\n 'data': os.path.join(data_dir, 'data.bdf'),\n 'evt': os.path.join(data_dir, 'evt.bdf')\n }\n # read data\n f_data = pyedflib.EdfReader(f['data'])\n ch_names = f_data.getSignalLabels()\n data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)])\n\n # sample frequiencies\n sfreq = f_data.getSampleFrequencies()\n assert np.unique(sfreq).size == 1\n if cfg.amp_info.samplerate != sfreq[0]:\n warnings.warn('Samplerate in config file does not equal to data file record')\n cfg.amp_info.samplerate = int(sfreq[0])\n\n # read event\n f_evt = pyedflib.EdfReader(f['evt'])\n event, _, _ = f_evt.readAnnotations()\n event = list(map(lambda x: int(x * cfg.amp_info.samplerate), event))\n\n return data, ch_names, event\n\n\ndef _load_usbamp(data_dir):\n \"\"\"\n USBAmp file loader\n :param data_dir: root dir\n :return:\n data: ndarray, (channels, timesteps)\n ch_name: list, name of channels\n timestamp: list, index of trigger\n \"\"\"\n # edf USBAmp\n files = glob.glob(os.path.join(data_dir, '*.edf'))\n assert len(files) == 1\n f = pyedflib.EdfReader(files[0])\n ch_names = f.getSignalLabels()\n # filter channel\n # find trigger channel\n triggers = []\n sig = []\n for i, chan in enumerate(ch_names):\n if 'trigger' in chan:\n triggers.append(i)\n else:\n sig.append(i)\n sigbuf = np.array([f.readSignal(i) for i in range(len(ch_names))])\n ch_names = [ch_names[i] for i in sig]\n trigger = -1\n for ch_ind in triggers:\n if not np.allclose(np.diff(sigbuf[ch_ind]), 0):\n trigger = ch_ind\n break\n diff = np.diff(sigbuf[trigger])\n timestamp = np.nonzero(np.logical_and(diff <= 1, diff >= 0.2))[0].tolist()\n data = sigbuf[sig]\n return data, ch_names, timestamp\n\n\ndef _load_nex(data_dir):\n \"\"\"\n nex file loader\n :param data_dir:\n :return:\n data: ndarray, shape (ch, timesteps)\n ch_names: list, name of each channel\n timestamps: list, stimulation onset\n \"\"\"\n files = glob.glob(os.path.join(data_dir, '*.nex'))\n assert len(files) == 1\n\n reader = NexReader(useNumpy=True)\n data = reader.ReadNexFile(files[0])\n\n var = data['Variables']\n ch_names = []\n trigger_ch = None\n con_data = []\n samplerate = cfg.amp_info.samplerate\n for i, ch in enumerate(var):\n if 'CH' in ch['Header']['Name']:\n ch_names.append(ch['Header']['Name'])\n con_data.append(ch['ContinuousValues'])\n samplerate = ch['Header']['SamplingRate']\n if 'digin' == ch['Header']['Name']:\n trigger_ch = i\n if samplerate != cfg.amp_info.samplerate:\n warnings.warn('Samplerate in config file does not equal to data file record, recorded value is %d' % samplerate)\n assert trigger_ch is not None\n timestamp = np.round(data['Variables'][trigger_ch]['Timestamps'] * samplerate).astype(np.int32).tolist()\n con_data = np.array(con_data)\n return con_data, ch_names, timestamp\n\n\ndef _load_cerebus(data_dir):\n # search data_dir\n nsx_files = glob.glob(os.path.join(data_dir, '*.ns*'))\n nev_files = glob.glob(os.path.join(data_dir, '*.nev'))\n assert len(nsx_files) == len(nev_files) == 1\n # loading\n f_data = NsxFile(nsx_files[0])\n f_evt = NevFile(nev_files[0])\n data = f_data.getdata()\n evt = f_evt.getdata()\n\n f_data.close()\n f_evt.close()\n\n # some basic information\n samplerate = data['samp_per_s']\n if cfg.amp_info.samplerate != samplerate:\n warnings.warn('Samplerate in config file does not equal to data file record')\n cfg.amp_info.samplerate = samplerate\n\n timestampresolution = f_evt.basic_header['TimeStampResolution']\n ch_names = []\n for info in f_data.extended_headers:\n ch_names.append(info['ElectrodeLabel'])\n\n event = evt['dig_events']['TimeStamps'][0]\n event = list(map(lambda x: int(x / timestampresolution * cfg.amp_info.samplerate), event))\n return data['data'], ch_names, event\n\n\nclass Dataset:\n \"\"\"\n for loading data and event order.\n \"\"\"\n data_format = {\n 'nex': _load_nex,\n 'ns3': _load_cerebus,\n 'nev': _load_cerebus,\n 'edf': _load_usbamp,\n 'bdf': _load_neuracle\n }\n\n def __init__(self, subject, date=None, loaddata=True):\n self.subject = subject\n self._subj_path = os.path.dirname(__file__) + '/../data/' + subject\n if date is None:\n self._date = find_nearest_time(self._subj_path)\n else:\n if isinstance(date, datetime):\n # convert datetime to str\n self._date = date.strftime(\"%Y-%m-%d-%H-%M-%S\")\n else:\n self._date = date\n print(self._date)\n self.root_dir = os.path.join(self._subj_path, self._date)\n\n # self.montage = OrderedSet(cfg.subj_info.montage)\n self.montage = deepcopy(cfg.subj_info.montage)\n\n # load stim order\n self.events = self.load_event()\n\n if loaddata:\n self.load_all()\n else:\n self.data, self.ch_names, self.timestamp, self.montage_indices, self.events_backup = [None] * 5\n\n def load_all(self):\n # load data and timestamps\n dataarray, ch_names, timestamp = self._load_data()\n timestamp = Dataset.ts_check(timestamp)\n self.data = dataarray\n # list to set\n self.ch_names = ch_names\n self.timestamp = timestamp\n self.montage_indices = self.get_channel_indices(self.montage, self.ch_names)\n\n self.events_backup = self.events.copy()\n if cfg.exp_config.bidir:\n assert 2 * len(timestamp) == self.events.size, print('Dual-directional: ', len(timestamp), self.events.size)\n self.events = self.events[:, ::2]\n else:\n assert len(timestamp) == self.events.size, print('Unidirectional: ', len(timestamp), self.events.size)\n\n def _load_data(self):\n \"\"\"\n Read data according to file format\n :return:\n dataext: str, data file name\n\n \"\"\"\n walk_path = self.root_dir\n loader = None\n for f in os.listdir(walk_path):\n _ext = f.split('.')[-1]\n try:\n loader = Dataset.data_format[_ext]\n break\n except KeyError:\n pass\n if loader is None:\n raise FileNotFoundError('No matching data format found')\n return loader(walk_path)\n\n def load_event(self):\n walk_path = self.root_dir\n file = glob.glob(os.path.join(walk_path, self.subject) + '*')\n assert len(file) == 1\n file = file[0]\n\n if file.endswith('.mat'):\n raw = sio.loadmat(file)\n order = raw['stim_order']\n order -= 1\n return order.reshape((-1, 12))\n else:\n with open(file) as f:\n stim_order = [[int(x) for x in line.split()] for line in f if len(line) > 1]\n return np.array(stim_order)\n\n @staticmethod\n def get_channel_indices(target_channels, channels_in_data):\n \"\"\"\n Get corresponding index number for channels in target channels\n :param target_channels: list, target channel names\n :param channels_in_data: list, all channel names in data source.\n :return:\n \"\"\"\n indices = []\n # build a dictionary for indexing\n channel_book = {name: i for i, name in enumerate(channels_in_data)}\n for ch in target_channels:\n try:\n indices.append(channel_book[ch])\n except ValueError as err:\n print(err)\n\n return indices\n\n @staticmethod\n def ts_check(ts):\n # check time stamp intervals.\n # In our experience, sometimes an accidental wrong trigger may appear at the beginning during recording.\n fs = cfg.amp_info.samplerate\n while len(ts) % 12 and (not (fs * 0.1 <= ts[1] - ts[0] <= fs * 0.3)):\n del ts[0]\n return ts\n" ]
[ [ "numpy.logical_and", "numpy.unique", "scipy.io.loadmat", "numpy.round", "numpy.diff", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
pabloserna/SentimentAnalysisinAWS
[ "d94572665442ef6f49deb07ed78f8104654fefc3" ]
[ "train/model_stack.py" ]
[ "import torch.nn as nn\n\nclass LSTMClassifier(nn.Module):\n \"\"\"\n This is the simple RNN model we will be using to perform Sentiment Analysis.\n \"\"\"\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size):\n \"\"\"\n Initialize the model by settingg up the various layers.\n \"\"\"\n super(LSTMClassifier, self).__init__()\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n self.lstmA = nn.LSTM(embedding_dim, hidden_dim)\n self.lstmB = nn.LSTM(hidden_dim, hidden_dim)\n self.dense = nn.Linear(in_features=hidden_dim, out_features=1)\n self.sig = nn.Sigmoid()\n \n self.word_dict = None\n\n def forward(self, x):\n \"\"\"\n Perform a forward pass of our model on some input.\n \"\"\"\n x = x.t()\n lengths = x[0,:]\n reviews = x[1:,:]\n embeds = self.embedding(reviews)\n lstm_out1, _ = self.lstmA(embeds)\n lstm_out, _ = self.lstmB(lstm_out1)\n out = self.dense(lstm_out)\n out = out[lengths - 1, range(len(lengths))]\n return self.sig(out.squeeze())" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid", "torch.nn.Embedding", "torch.nn.LSTM" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KlausBSautter/Kratos
[ "1ceb900dbacfab344e27e32285250eafc52093ec" ]
[ "applications/RomApplication/python_scripts/structural_mechanics_analysis_rom.py" ]
[ "import KratosMultiphysics\nimport KratosMultiphysics.RomApplication as romapp\nimport KratosMultiphysics.StructuralMechanicsApplication\nfrom KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod\nfrom KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper\nfrom KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis\n\nimport json\nimport numpy as np\n\nclass StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis):\n\n def __init__(self,model,project_parameters, hyper_reduction_element_selector = None):\n super().__init__(model,project_parameters)\n if hyper_reduction_element_selector != None :\n if hyper_reduction_element_selector == \"EmpiricalCubature\":\n self.hyper_reduction_element_selector = EmpiricalCubatureMethod()\n self.time_step_residual_matrix_container = []\n else:\n err_msg = \"The requested element selection method \\\"\" + hyper_reduction_element_selector + \"\\\" is not in the rom application\\n\"\n err_msg += \"Available options are: \\\"EmpiricalCubature\\\"\"\n raise Exception(err_msg)\n else:\n self.hyper_reduction_element_selector = None\n\n #### Internal functions ####\n def _CreateSolver(self):\n \"\"\" Create the Solver (and create and import the ModelPart if it is not alread in the model) \"\"\"\n ## Solver construction\n with open('RomParameters.json') as rom_parameters:\n rom_settings = KratosMultiphysics.Parameters(rom_parameters.read())\n self.project_parameters[\"solver_settings\"].AddValue(\"rom_settings\", rom_settings[\"rom_settings\"])\n return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters[\"solver_settings\"],self.project_parameters[\"problem_data\"][\"parallel_type\"].GetString())\n\n def _GetSimulationName(self):\n return \"::[ROM Simulation]:: \"\n\n def ModifyAfterSolverInitialize(self):\n \"\"\"Here is where the ROM_BASIS is imposed to each node\"\"\"\n super().ModifyAfterSolverInitialize()\n computing_model_part = self._solver.GetComputingModelPart()\n with open('RomParameters.json') as f:\n data = json.load(f)\n nodal_dofs = len(data[\"rom_settings\"][\"nodal_unknowns\"])\n nodal_modes = data[\"nodal_modes\"]\n counter = 0\n rom_dofs= self.project_parameters[\"solver_settings\"][\"rom_settings\"][\"number_of_rom_dofs\"].GetInt()\n for node in computing_model_part.Nodes:\n aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs)\n for j in range(nodal_dofs):\n Counter=str(node.Id)\n for i in range(rom_dofs):\n aux[j,i] = nodal_modes[Counter][j][i]\n node.SetValue(romapp.ROM_BASIS, aux ) # ROM basis\n counter+=1\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters[\"solver_settings\"][\"rom_settings\"], self._GetSolver().get_solution_scheme())\n\n def FinalizeSolutionStep(self):\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n print('\\n\\n\\n\\nGenerating matrix of residuals')\n ResMat = self.ResidualUtilityObject.GetResiduals()\n NP_ResMat = np.array(ResMat, copy=False)\n self.time_step_residual_matrix_container.append(NP_ResMat)\n super().FinalizeSolutionStep()\n\n def Finalize(self):\n super().Finalize()\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements()\n ModelPartName = self._GetSolver().settings[\"model_import_settings\"][\"input_filename\"].GetString()\n self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName)\n self.hyper_reduction_element_selector.Run()\n\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sentinel-hub/multi-temporal-super-resolution
[ "5ef642304a980db87bdb935a7a7450bd649f8912" ]
[ "sr/data_loader.py" ]
[ "import os\nfrom collections import OrderedDict\nfrom typing import Tuple, List, Callable\n\nfrom fs_s3fs import S3FS\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom skimage.exposure import match_histograms\nfrom datetime import datetime\nfrom eolearn.core import EOPatch\n\n\ndef augment(\n lr: np.ndarray,\n hr: np.ndarray,\n flip: bool = True,\n rotate: bool = True,\n distribution_shift: bool = False,\n distribution_scale: bool = False,\n permute_timestamps: bool = True,\n max_distribution_shift: float = 0.25,\n max_distribution_scale_diff: float = 0.25,\n proba_of_original: float = 0.67\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs a series of image augmentations with specified probability.\n\n :param lr: array of low-resolution images, shape is `CxTxHxW`\n :param hr: array of high-resolution images, shape is `CxHxW`\n :param flip: whether to randomly flip height or width of arrays\n :param rotate: whether to randomly rotate the arrays\n :param distribution_shift: add an offset to the distribution\n :param distribution_scale: scale the channels distribution\n :param permute_timestamps: permute timestamps (not desired for HRN)\n :param max_distribution_shift: set max distribution shift used in distribution shift augmentation\n :param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation\n :param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations\n :returns: augmented lr and hr arrays\n \"\"\"\n\n # Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`\n n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)\n if aug_op)\n rng_threshold = proba_of_original ** (1. / n_aug_conditions)\n\n if flip and np.random.random() > rng_threshold:\n flip_axis = np.random.choice([-2, -1])\n lr = np.flip(lr, axis=flip_axis)\n hr = np.flip(hr, axis=flip_axis)\n\n if rotate and np.random.random() > rng_threshold:\n k = np.random.choice(np.arange(-2, 3))\n\n lr = np.rot90(lr, k=k, axes=(-2, -1))\n hr = np.rot90(hr, k=k, axes=(-2, -1))\n\n if distribution_shift and np.random.random() > rng_threshold:\n d_shift = (np.random.random() - 0.5) * max_distribution_shift\n\n lr = lr + d_shift\n hr = hr + d_shift\n\n if distribution_scale and np.random.random() > rng_threshold:\n d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff\n\n lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]\n hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]\n\n lr = (lr - lr_mean) * d_scale + lr_mean\n hr = (hr - hr_mean) * d_scale + hr_mean\n\n if permute_timestamps and np.random.random() > rng_threshold:\n # expects lr in `CxTxHxW` shape\n indices = np.random.permutation(lr.shape[1])\n lr = lr[:, indices]\n\n return lr, hr\n\n\ndef pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:\n \"\"\" Create an array with first dimension equal to k, filling with 0s in front or at back \"\"\"\n n_pad = k - len(feat)\n\n if n_pad < 0:\n raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')\n\n (_, h, w, c) = feat.shape\n if pad_to_front:\n feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))\n else:\n feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))\n\n return feat\n\n\nclass ImageSet(OrderedDict):\n \"\"\"\n An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ImageSet, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n dict_info = f\"{'name':>10} : {self['name']}\"\n\n for name, v in self.items():\n if hasattr(v, 'shape'):\n dict_info += f\"\\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})\"\n else:\n dict_info += f\"\\n{name:>10} : {v.__class__.__name__} ({v})\"\n return dict_info\n\n\ndef read_imageset(imset_file: str,\n filesystem: S3FS = None,\n normalize: bool = True,\n country_norm_df: pd.DataFrame = None,\n norm_deimos_npz: np.lib.npyio.NpzFile = None,\n norm_s2_npz: np.lib.npyio.NpzFile = None,\n n_views: int = 16,\n padding: str = 'zeros',\n histogram_matching: bool = False) -> ImageSet:\n \"\"\"\n Retrieves all assets from the given directory.\n\n :param imset_file: name of npz file with sample imageset\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of\n timeframes are taken\n :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image\n \"\"\"\n assert padding in ['zeros', 'repeat']\n\n # Read asset names\n npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,\n allow_pickle=True)\n \n features = npz['features']\n hr = npz['labels']\n\n if normalize:\n country = npz['countries']\n country_stats = country_norm_df[country_norm_df.country == str(country)]\n norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values\n\n norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values\n features = (features - norm_median) / norm_std\n\n deimos_p1 = norm_deimos_npz['p1']\n deimos_p99 = norm_deimos_npz['p99']\n\n s2_p1 = norm_s2_npz['p1']\n s2_p99 = norm_s2_npz['p99']\n\n hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)\n features = (features - s2_p1) / (s2_p99 - s2_p1)\n\n alphas = np.ones(n_views)\n\n if histogram_matching:\n hr = match_histograms(hr, features[-1], multichannel=True)\n\n n_feature_timestamps = len(features)\n if n_feature_timestamps < n_views:\n if padding == 'zeros':\n features = pad_to_k(features, n_views, pad_to_front=False)\n alphas[n_feature_timestamps:] = 0\n elif padding == 'repeat':\n n_pad = n_views - n_feature_timestamps\n padded = features[-1:].repeat(n_pad, axis=0)\n features = np.concatenate((features, padded))\n else:\n features = features[-n_views:, ...]\n\n # Tensor is `CxTxHxW`\n features = np.moveaxis(features, -1, 0)\n hr = np.moveaxis(hr, 2, 0)\n \n imageset = ImageSet(name=os.path.basename(imset_file),\n timestamp_deimos=str(npz['timetamps_deimos'].item()),\n lr=features,\n hr=hr,\n alphas=alphas)\n return imageset\n\n\nclass ImagesetDataset(Dataset):\n \"\"\" Derived Dataset class for loading many imagesets from a list of directories.\n\n :param imset_dir: name of directory containing files\n :param imset_npz_files: list of filenames that constitute the dataset\n :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`\n if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS\n (`BxTxCxHxW`)\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence\n :param channels_labels: which channels (i.e. indices) are extracted from hr image\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of\n timeframes are taken\n :param transform: function executed on lr and hr arrays as augmentation\n :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image\n \"\"\"\n\n def __init__(\n self,\n imset_dir: str,\n imset_npz_files: list,\n time_first: bool,\n filesystem: object = None,\n normalize: bool = True,\n country_norm_df: object = None,\n norm_deimos_npz: np.ndarray = None,\n norm_s2_npz: np.ndarray = None,\n channels_feats: List[int] = [0, 1, 2, 3],\n channels_labels: List[int] = [0, 1, 2, 3],\n n_views: int = 16,\n padding: str = 'zeros',\n transform: Callable = None,\n histogram_matching: bool = False\n ):\n\n super().__init__()\n self.imset_dir = imset_dir\n self.filesystem = filesystem\n self.imset_npz_files = imset_npz_files\n self.time_first = time_first\n self.normalize = normalize\n self.country_norm_df = country_norm_df\n self.norm_deimos_npz = norm_deimos_npz\n self.norm_s2_npz = norm_s2_npz\n self.channels_feats = channels_feats\n self.channels_labels = channels_labels\n self.n_views = n_views\n self.padding = padding\n self.transform = transform\n self.histogram_matching = histogram_matching\n\n def __len__(self):\n return len(self.imset_npz_files)\n\n def __getitem__(self, index: int) -> ImageSet:\n \"\"\" Returns an ImageSet dict of all assets in the directory of the given index.\"\"\"\n\n if isinstance(index, int):\n imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])\n else:\n raise KeyError('Index must be of type `int`.')\n\n imset = read_imageset(\n imset_file=imset_file,\n filesystem=self.filesystem,\n normalize=self.normalize,\n country_norm_df=self.country_norm_df,\n norm_deimos_npz=self.norm_deimos_npz,\n norm_s2_npz=self.norm_s2_npz,\n n_views=self.n_views,\n padding=self.padding,\n histogram_matching=self.histogram_matching\n )\n\n lr = imset['lr'][self.channels_feats]\n hr = imset['hr'][self.channels_labels]\n\n if self.transform is not None:\n lr, hr = self.transform(lr, hr)\n\n if self.time_first:\n lr = np.swapaxes(lr, 0, 1)\n\n imset['lr'] = torch.from_numpy(lr.copy())\n imset['hr'] = torch.from_numpy(hr.copy())\n imset['alphas'] = torch.from_numpy(imset['alphas'])\n\n return imset\n\n\ndef filter_cloudy_s2(eop, max_cc):\n idxs = [] \n for i, _ in enumerate(eop.timestamp): \n if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1): \n idxs.append(i)\n eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]\n eop.data['CLP'] = eop.data['CLP'][idxs, ...]\n eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]\n eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]\n eop.timestamp = list(np.array(eop.timestamp)[idxs])\n return eop \n\n\ndef timestamps_within_date(timestamps, start_date, end_date): \n timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch\n return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]\n\n\ndef read_imageset_eopatch(imset_file: str,\n start_date: datetime, \n end_date: datetime,\n country: str,\n filesystem: S3FS = None,\n normalize: bool = True,\n country_norm_df: pd.DataFrame = None,\n norm_s2_npz: np.lib.npyio.NpzFile = None,\n n_views: int = 16,\n padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:\n \"\"\"\n Retrieves all assets from the given directory.\n\n :param imset_file: name of npz file with sample imageset\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param start_date: specifies the start of the temporal range of the stack of images used for prediction\n :param end_date: specifies the end of the temporal range of the stack of images used for prediction\n :param country: specifies the name of the country so it can be matched with the country_norm_df \n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of\n timeframes are taken\n \"\"\"\n assert padding in ['zeros', 'repeat']\n\n eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)\n noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)\n ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)\n features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000\n filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]\n\n\n if normalize:\n country_stats = country_norm_df[country_norm_df.country == str(country)]\n norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values\n norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values\n features = (features - norm_median) / norm_std\n\n s2_p1 = norm_s2_npz['p1']\n s2_p99 = norm_s2_npz['p99']\n features = (features - s2_p1) / (s2_p99 - s2_p1)\n\n alphas = np.ones(n_views)\n if histogram_matching:\n hr = match_histograms(hr, features[-1], multichannel=True)\n\n\n n_feature_timestamps = len(features)\n if n_feature_timestamps < n_views:\n if padding == 'zeros':\n features = pad_to_k(features, n_views, pad_to_front=False)\n alphas[n_feature_timestamps:] = 0\n elif padding == 'repeat':\n n_pad = n_views - n_feature_timestamps\n padded = features[-1:].repeat(n_pad, axis=0)\n features = np.concatenate((features, padded))\n else:\n features = features[-n_views:, ...]\n\n # Tensor is `CxTxHxW`\n features = np.moveaxis(features, -1, 0)\n\n imageset = ImageSet(name=os.path.basename(imset_file),\n lr=features,\n alphas=alphas,\n\t\t\tts=filtered_ts[::-1])\n return imageset\n\n\nclass EopatchPredictionDataset(Dataset):\n \"\"\" Derived Dataset class for loading many imagesets from a list of directories.\n\n :param imset_dir: name of directory containing files\n :param imset_npz_files: list of filenames that constitute the dataset\n :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`\n if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS\n (`BxTxCxHxW`)\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param start_date: specifies the start of the temporal range of the stack of images used for prediction\n :param end_date: specifies the end of the temporal range of the stack of images used for prediction\n :param country: specifies the name of the country so it can be matched with the country_norm_df \n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence\n :param channels_labels: which channels (i.e. indices) are extracted from hr image\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of\n timeframes are taken\n :param transform: function executed on lr and hr arrays as augmentation\n \"\"\"\n\n def __init__(\n self,\n imset_dir: str,\n imset_npz_files: list,\n time_first: bool,\n start_date: datetime,\n end_date: datetime,\n country: str,\n filesystem: object = None,\n normalize: bool = True,\n country_norm_df: object = None,\n norm_deimos_npz: np.ndarray = None,\n norm_s2_npz: np.ndarray = None,\n channels_feats: List[int] = [0, 1, 2, 3],\n n_views: int = 16,\n padding: str = 'zeros',\n histogram_matching: bool = False\n ):\n\n super().__init__()\n self.imset_dir = imset_dir\n self.filesystem = filesystem\n self.imset_npz_files = imset_npz_files\n self.time_first = time_first\n self.normalize = normalize\n self.country_norm_df = country_norm_df\n self.norm_deimos_npz = norm_deimos_npz\n self.norm_s2_npz = norm_s2_npz\n self.channels_feats = channels_feats\n self.n_views = n_views\n self.padding = padding\n self.start_date = start_date\n self.end_date = end_date\n self.histogram_matching = histogram_matching\n self.country = country\n\n def __len__(self):\n return len(self.imset_npz_files)\n\n def __getitem__(self, index: int) -> ImageSet:\n \"\"\" Returns an ImageSet dict of all assets in the directory of the given index.\"\"\"\n\n if isinstance(index, int):\n imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])\n else:\n raise KeyError('Index must be of type `int`.') \n \n imset = read_imageset_eopatch(\n imset_file=imset_file,\n filesystem=self.filesystem,\n normalize=self.normalize,\n country_norm_df=self.country_norm_df,\n norm_deimos_npz=self.norm_deimos_npz,\n norm_s2_npz=self.norm_s2_npz,\n n_views=self.n_views,\n padding=self.padding,\n start_date=self.start_date,\n end_date=self.end_date,\n country=self.country,\n histogram_matching=self.histogram_matching, \n )\n\n lr = imset['lr'][self.channels_feats]\n \n if self.time_first:\n lr = np.swapaxes(lr, 0, 1)\n\n imset['lr'] = torch.from_numpy(lr.copy())\n imset['alphas'] = torch.from_numpy(imset['alphas'])\n\n return imset\n\n" ]
[ [ "numpy.rot90", "numpy.swapaxes", "numpy.random.random", "numpy.random.choice", "numpy.arange", "torch.from_numpy", "numpy.ones", "numpy.concatenate", "numpy.random.permutation", "numpy.mean", "numpy.moveaxis", "numpy.load", "numpy.array", "numpy.flip", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aaparikh/Intermediate-Python-Practice
[ "6f49bea8f677e7ed500cd1ec91df4c8531832abb" ]
[ "ranndom_m.py" ]
[ "#there are many ways we can do random numbers\n\n#1. import random\n#used to produce pseudo-random numbers. \n# They are called pseudo-random because they are not truly random and can be reproduced.\nimport random\n\na = random.random() #random float between 0 and 1\nb = random.uniform(1,10) #random float between 1 and 10\nc = random.randrange(1,10) #random integer between 1 and 10 (not including 10)\nd = random.randint(1,10) #random integer between 1 and 10 (including 10)\ne = random.choice(['a','b','c']) #random element from a list\n#sample picks one element one time and choices may pick one element multiple times\nf = random.sample(range(1,10),3) #3 random elements from a list\ng = random.choices(range(1,10),k=3) #3 random elements from a list\nh = random.normalvariate(0,1) #random float from normal distribution with mean 0 and standard deviation 1\nrandom.shuffle(['a','b','c']) #shuffle a list in place\nrandom.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)\n\n\nimport secrets #secrets — Generate secure random numbers for managing secrets (True randomness)\n# https://docs.python.org/3/library/secrets.html\n#But this is slower than random module as more complex algorithms are used.\n\na = secrets.randbelow(10) #random integer between 0 and 9\nb = secrets.randbits(10) #random integer between 0 and 2**10-1\nc = secrets.choice(['a','b','c']) #random element from a list\nd = secrets.sample(range(1,10),3) #3 random elements from a list\n\n\n#2. import numpy\nimport numpy as np\n#numpy random generator uses a different generator than random module and also has a different seed\nnp.random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)\na = np.random.random() #random float between 0 and 1\nb = np.random.uniform(1,10) #random float between 1 and 10\nc = np.random.randrange(1,10) #random integer between 1 and 10 (not including 10)\nd = np.random.randint(1,10) #random integer between 1 and 10 (including 10)\ne = np.random.choice(['a','b','c']) #random element from a list\nf = np.random.randn(3) #list of 3 random elements " ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.random.choice", "numpy.random.randrange", "numpy.random.randn", "numpy.random.uniform", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Subarna578/pythainlp
[ "9650a40396719284add17bb09f50e948dea41053" ]
[ "pythainlp/transliterate/thai2rom.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nRomanization of Thai words based on machine-learnt engine (\"thai2rom\")\n\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pythainlp.corpus import download, get_corpus_path\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ThaiTransliterator:\n def __init__(self):\n \"\"\"\n Transliteration of Thai words\n Now supports Thai to Latin (romanization)\n \"\"\"\n # Download the model, if it's not on your machine.\n self.__filemodel = get_corpus_path(\"thai2rom-pytorch-attn\")\n if not self.__filemodel:\n download(\"thai2rom-pytorch-attn\")\n self.__filemodel = get_corpus_path(\"thai2rom-pytorch-attn\")\n\n loader = torch.load(self.__filemodel, map_location=device)\n\n INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader[\"encoder_params\"]\n OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader[\"decoder_params\"]\n\n self._maxlength = 100\n\n self._char_to_ix = loader[\"char_to_ix\"]\n self._ix_to_char = loader[\"ix_to_char\"]\n self._target_char_to_ix = loader[\"target_char_to_ix\"]\n self._ix_to_target_char = loader[\"ix_to_target_char\"]\n\n # encoder/ decoder\n # Restore the model and construct the encoder and decoder.\n self._encoder = Encoder(\n INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)\n\n self._decoder = AttentionDecoder(\n OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT\n )\n\n self._network = Seq2Seq(\n self._encoder,\n self._decoder,\n self._target_char_to_ix[\"<start>\"],\n self._target_char_to_ix[\"<end>\"],\n self._maxlength,\n ).to(device)\n\n self._network.load_state_dict(loader[\"model_state_dict\"])\n self._network.eval()\n\n def _prepare_sequence_in(self, text: str):\n \"\"\"\n Prepare input sequence for PyTorch\n \"\"\"\n idxs = []\n for ch in text:\n if ch in self._char_to_ix:\n idxs.append(self._char_to_ix[ch])\n else:\n idxs.append(self._char_to_ix[\"<UNK>\"])\n idxs.append(self._char_to_ix[\"<end>\"])\n tensor = torch.tensor(idxs, dtype=torch.long)\n return tensor.to(device)\n\n def romanize(self, text: str) -> str:\n \"\"\"\n :param str text: Thai text to be romanized\n :return: English (more or less) text that spells out how the Thai text\n should be pronounced.\n \"\"\"\n input_tensor = self._prepare_sequence_in(text).view(1, -1)\n input_length = [len(text) + 1]\n\n target_tensor_logits = self._network(input_tensor,\n input_length,\n None, 0)\n\n # Seq2seq model returns <END> as the first token,\n # As a result, target_tensor_logits.size() is torch.Size([0])\n if target_tensor_logits.size(0) == 0:\n target = [\"<PAD>\"]\n else:\n target_tensor = (\n torch.argmax(\n target_tensor_logits.squeeze(1),\n 1).cpu().numpy()\n )\n target = [self._ix_to_target_char[t] for t in target_tensor]\n\n return \"\".join(target)\n\n\nclass Encoder(nn.Module):\n def __init__(self, vocabulary_size, embedding_size,\n hidden_size, dropout=0.5):\n \"\"\"Constructor\"\"\"\n super(Encoder, self).__init__()\n self.hidden_size = hidden_size\n self.character_embedding = nn.Embedding(vocabulary_size,\n embedding_size)\n self.rnn = nn.LSTM(\n input_size=embedding_size,\n hidden_size=hidden_size // 2,\n bidirectional=True,\n batch_first=True,\n )\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, sequences, sequences_lengths):\n\n # sequences: (batch_size, sequence_length=MAX_LENGTH)\n # sequences_lengths: (batch_size)\n\n batch_size = sequences.size(0)\n self.hidden = self.init_hidden(batch_size)\n\n sequences_lengths = np.sort(sequences_lengths)[::-1]\n index_sorted = np.argsort(\n -sequences_lengths\n ) # use negation in sort in descending order\n index_unsort = np.argsort(index_sorted) # to unsorted sequence\n\n index_sorted = torch.from_numpy(index_sorted)\n sequences = sequences.index_select(0, index_sorted.to(device))\n\n sequences = self.character_embedding(sequences)\n sequences = self.dropout(sequences)\n\n sequences_packed = nn.utils.rnn.pack_padded_sequence(\n sequences, sequences_lengths.copy(), batch_first=True\n )\n\n sequences_output, self.hidden = self.rnn(sequences_packed,\n self.hidden)\n\n sequences_output, _ = nn.utils.rnn.pad_packed_sequence(\n sequences_output, batch_first=True\n )\n\n index_unsort = torch.from_numpy(index_unsort).to(device)\n sequences_output = sequences_output.index_select(\n 0, index_unsort.clone().detach()\n )\n\n return sequences_output, self.hidden\n\n def init_hidden(self, batch_size):\n h_0 = torch.zeros(\n [2, batch_size, self.hidden_size // 2], requires_grad=True\n ).to(device)\n c_0 = torch.zeros(\n [2, batch_size, self.hidden_size // 2], requires_grad=True\n ).to(device)\n\n return (h_0, c_0)\n\n\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size):\n super(Attn, self).__init__()\n\n self.method = method\n self.hidden_size = hidden_size\n\n if self.method == \"general\":\n self.attn = nn.Linear(self.hidden_size, hidden_size)\n\n elif self.method == \"concat\":\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))\n\n def forward(self, hidden, encoder_outputs, mask):\n # Calculate energies for each encoder output\n if self.method == \"dot\":\n attn_energies = torch.bmm(encoder_outputs,\n hidden.transpose(1, 2)).squeeze(2)\n elif self.method == \"general\":\n attn_energies = self.attn(\n encoder_outputs.view(-1, encoder_outputs.size(-1))\n ) # (batch_size * sequence_len, hidden_size)\n attn_energies = torch.bmm(\n attn_energies.view(\n *encoder_outputs.size()), hidden.transpose(1, 2)\n ).squeeze(2) # (batch_size, sequence_len)\n elif self.method == \"concat\":\n attn_energies = self.attn(\n torch.cat((\n hidden.expand(*encoder_outputs.size()),\n encoder_outputs\n ), 2)\n ) # (batch_size, sequence_len, hidden_size)\n attn_energies = torch.bmm(\n attn_energies,\n self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),\n ).squeeze(2)\n\n attn_energies = attn_energies.masked_fill(mask == 0, -1e10)\n\n # Normalize energies to weights in range 0 to 1\n return F.softmax(attn_energies, 1)\n\n\nclass AttentionDecoder(nn.Module):\n def __init__(self, vocabulary_size, embedding_size,\n hidden_size, dropout=0.5):\n \"\"\"Constructor\"\"\"\n super(AttentionDecoder, self).__init__()\n self.vocabulary_size = vocabulary_size\n self.hidden_size = hidden_size\n self.character_embedding = nn.Embedding(vocabulary_size,\n embedding_size)\n self.rnn = nn.LSTM(\n input_size=embedding_size + self.hidden_size,\n hidden_size=hidden_size,\n bidirectional=False,\n batch_first=True,\n )\n\n self.attn = Attn(method=\"general\", hidden_size=self.hidden_size)\n self.linear = nn.Linear(hidden_size, vocabulary_size)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, input, last_hidden, encoder_outputs, mask):\n \"\"\"\"Defines the forward computation of the decoder\"\"\"\n\n # input: (batch_size, 1)\n # last_hidden: (batch_size, hidden_dim)\n # encoder_outputs: (batch_size, sequence_len, hidden_dim)\n # mask: (batch_size, sequence_len)\n\n hidden = last_hidden.permute(1, 0, 2)\n attn_weights = self.attn(hidden, encoder_outputs, mask)\n\n context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)\n context_vector = torch.sum(context_vector, dim=1)\n context_vector = context_vector.unsqueeze(1)\n\n embedded = self.character_embedding(input)\n embedded = self.dropout(embedded)\n\n rnn_input = torch.cat((context_vector, embedded), -1)\n\n output, hidden = self.rnn(rnn_input)\n output = output.view(-1, output.size(2))\n\n x = self.linear(output)\n\n return x, hidden[0], attn_weights\n\n\nclass Seq2Seq(nn.Module):\n def __init__(\n self, encoder, decoder, target_start_token,\n target_end_token, max_length\n ):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n self.pad_idx = 0\n self.target_start_token = target_start_token\n self.target_end_token = target_end_token\n self.max_length = max_length\n\n assert encoder.hidden_size == decoder.hidden_size\n\n def create_mask(self, source_seq):\n mask = source_seq != self.pad_idx\n return mask\n\n def forward(\n self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5\n ):\n\n # source_seq: (batch_size, MAX_LENGTH)\n # source_seq_len: (batch_size, 1)\n # target_seq: (batch_size, MAX_LENGTH)\n\n batch_size = source_seq.size(0)\n start_token = self.target_start_token\n end_token = self.target_end_token\n max_len = self.max_length\n target_vocab_size = self.decoder.vocabulary_size\n\n outputs = torch.zeros(max_len,\n batch_size,\n target_vocab_size).to(device)\n\n if target_seq is None:\n assert teacher_forcing_ratio == 0, \"Must be zero during inference\"\n inference = True\n else:\n inference = False\n\n encoder_outputs, encoder_hidden = self.encoder(source_seq,\n source_seq_len)\n\n decoder_input = (\n torch.tensor([[start_token] * batch_size]).view(batch_size,\n 1).to(device)\n )\n\n encoder_hidden_h_t = torch.cat(\n [encoder_hidden[0][0], encoder_hidden[0][1]], dim=1\n ).unsqueeze(dim=0)\n decoder_hidden = encoder_hidden_h_t\n\n max_source_len = encoder_outputs.size(1)\n mask = self.create_mask(source_seq[:, 0:max_source_len])\n\n for di in range(max_len):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs, mask\n )\n\n topv, topi = decoder_output.topk(1)\n outputs[di] = decoder_output.to(device)\n\n teacher_force = random.random() < teacher_forcing_ratio\n\n decoder_input = (\n target_seq[:, di].reshape(batch_size, 1)\n if teacher_force\n else topi.detach()\n )\n\n if inference and decoder_input == end_token:\n return outputs[:di]\n\n return outputs\n\n_THAI_TO_ROM = ThaiTransliterator()\n\n\ndef romanize(text: str) -> str:\n return _THAI_TO_ROM.romanize(text)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.nn.LSTM", "torch.load", "torch.cat", "torch.zeros", "torch.sum", "torch.from_numpy", "torch.nn.Embedding", "torch.tensor", "numpy.sort", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.FloatTensor", "torch.cuda.is_available", "numpy.argsort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RaphaelOlivier/speechbrain
[ "142dc6caa4b46ca4c9341b0cd39627f489808749" ]
[ "speechbrain/lobes/models/huggingface_wav2vec.py" ]
[ "\"\"\"This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.\n\nReference: https://arxiv.org/abs/2006.11477\nReference: https://arxiv.org/abs/1904.05862\nReference: https://arxiv.org/abs/2110.13900\nTransformer from HuggingFace needs to be installed:\nhttps://huggingface.co/transformers/installation.html\n\nAuthors\n * Titouan Parcollet 2021\n * Boumadane Abdelmoumene 2021\n\"\"\"\n\nimport os\nimport torch\nimport logging\nimport pathlib\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch import nn\nfrom huggingface_hub import model_info\nfrom speechbrain.pretrained.fetching import fetch\n\n# We check if transformers is installed.\ntry:\n import transformers\n from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel\n from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig\n from transformers import Wav2Vec2FeatureExtractor\n from transformers import Wav2Vec2ForPreTraining\n from transformers.models.wav2vec2.modeling_wav2vec2 import (\n _compute_mask_indices,\n )\n\nexcept ImportError:\n MSG = \"Please install transformers from HuggingFace to use wav2vec2 / Hubert\\n\"\n MSG += \"E.G. run: pip install transformers\"\n raise ImportError(MSG)\n\nlogger = logging.getLogger(__name__)\n\nHF_models = {\n \"wav2vec2\": Wav2Vec2Model,\n \"hubert\": HubertModel,\n \"wavlm\": WavLMModel,\n \"data2vec\": Data2VecAudioModel\n}\n\nHF_config = {\n \"wav2vec2\": Wav2Vec2Config,\n \"hubert\": HubertConfig,\n \"wavlm\": WavLMConfig,\n \"data2vec\": Data2VecAudioConfig\n}\n\n\nclass HuggingFaceWav2Vec2(nn.Module):\n \"\"\"This lobe enables the integration of HuggingFace and SpeechBrain\n pretrained wav2vec2.0/Hubert models.\n\n Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477\n Source paper Hubert: https://arxiv.org/abs/2106.07447\n Transformer from HuggingFace needs to be installed:\n https://huggingface.co/transformers/installation.html\n\n The model can be used as a fixed feature extractor or can be finetuned. It\n will download automatically the model from HuggingFace or use a local path.\n\n Arguments\n ---------\n source : str\n HuggingFace hub name: e.g \"facebook/wav2vec2-large-lv60\"\n save_path : str\n Path (dir) of the downloaded model.\n output_norm : bool (default: True)\n If True, a layer_norm (affine) will be applied to the output obtained\n from the wav2vec model.\n freeze : bool (default: True)\n If True, the model is frozen. If False, the model will be trained\n alongside with the rest of the pipeline.\n freeze_feature_extractor : bool (default: False)\n When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False\n all the wav2vec model will be trained including featue_extractor module.\n apply_spec_augment : bool (default: False)\n If True, the model will apply spec augment on the output of feature extractor\n (inside huggingface Wav2VecModel() class).\n If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.\n Example\n -------\n >>> inputs = torch.rand([10, 600])\n >>> model_hub = \"facebook/wav2vec2-base-960h\"\n >>> save_path = \"savedir\"\n >>> model = HuggingFaceWav2Vec2(model_hub, save_path)\n >>> outputs = model(inputs)\n \"\"\"\n\n def __init__(\n self,\n source,\n save_path,\n output_norm=True,\n freeze=True,\n freeze_feature_extractor=False,\n apply_spec_augment=False,\n load_pretrained_weights=True,\n ):\n super().__init__()\n\n # Download the extractor from HuggingFace.\n # The extractor is only used to retrieve the normalisation information\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n source, cache_dir=save_path\n )\n\n # Select specific self-supervised loader (eg. Wav2Vec2, Hubert)\n if \"hubert\" in source:\n config = HF_config.get(\"hubert\")\n model = HF_models.get(\"hubert\")\n elif \"wavlm\" in source:\n config = HF_config.get(\"wavlm\")\n model = HF_models.get(\"wavlm\")\n elif \"data2vec\" in source:\n config = HF_config.get(\"data2vec\")\n model = HF_models.get(\"data2vec\")\n else:\n config = HF_config.get(\"wav2vec2\")\n model = HF_models.get(\"wav2vec2\")\n\n # Download and load the model\n self._from_pretrained(\n source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights\n )\n\n # set apply_spec_augment\n self.model.config.apply_spec_augment = apply_spec_augment\n\n # We check if inputs need to be normalized w.r.t pretrained wav2vec2\n self.normalize_wav = self.feature_extractor.do_normalize\n\n self.freeze = freeze\n self.freeze_feature_extractor = freeze_feature_extractor\n self.output_norm = output_norm\n if self.freeze:\n logger.warning(\n \"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen.\"\n )\n self.model.eval()\n for param in self.model.parameters():\n param.requires_grad = False\n else:\n self.model.train()\n if self.freeze_feature_extractor:\n self.model.feature_extractor._freeze_parameters()\n\n def _from_pretrained(self, source, config, model, save_path, load_weights):\n \"\"\"This function manages the source checking and loading of the params.\n # 1. Is the model from HF or a local path\n # 2. Is the model pretrained with HF or SpeechBrain\n # 3. Download (if appropriate) and load with respect to 1. and 2.\n \"\"\"\n\n is_sb, ckpt_file = self._check_model_source(source)\n if not load_weights:\n config = config.from_pretrained(source, cache_dir=save_path)\n self.model = model(config)\n elif is_sb:\n config = config.from_pretrained(source, cache_dir=save_path)\n self.model = model(config)\n self.model.gradient_checkpointing_disable() # Required by DDP\n # fetch the checkpoint file\n ckpt_full_path = fetch(\n filename=ckpt_file, source=source, savedir=save_path\n )\n # We transfer the parameters from the checkpoint.\n self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)\n else:\n if load_weights:\n self.model = model.from_pretrained(source, cache_dir=save_path)\n else:\n self.model=model()\n\n def _load_sb_pretrained_w2v2_parameters(self, path):\n \"\"\"Loads the parameter of a w2v2 model pretrained with SpeechBrain and the\n HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom\n loading because HuggingFace adds a level to the checkpoint when storing\n the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain\n and HuggingFaceWav2Vec2.\n\n In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter\n would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it\n is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).\n \"\"\"\n\n modified_state_dict = {}\n orig_state_dict = torch.load(path, map_location=\"cpu\")\n\n # We remove the .wav2vec2 in the state dict.\n for key, params in orig_state_dict.items():\n if \"wav2vec2.\" in key:\n save_key = key.replace(\"model.wav2vec2.\", \"\")\n modified_state_dict[save_key] = params\n\n incompatible_keys = self.model.load_state_dict(\n modified_state_dict, strict=False\n )\n for missing_key in incompatible_keys.missing_keys:\n logger.warning(\n f\"During parameter transfer to {self.model} loading from \"\n + f\"{path}, the transferred parameters did not have \"\n + f\"parameters for the key: {missing_key}\"\n )\n for unexpected_key in incompatible_keys.unexpected_keys:\n logger.warning(\n f\"The param with the key: {unexpected_key} is discarded as it \"\n + \"is useless for wav2vec 2.0 finetuning.\"\n )\n\n def _check_model_source(self, path):\n \"\"\"Checks if the pretrained model has been trained with SpeechBrain and\n is hosted locally or on a HuggingFace hub.\n \"\"\"\n checkpoint_filename = \"\"\n source = pathlib.Path(path)\n is_local = True\n is_sb = True\n\n # If path is a huggingface hub.\n if not source.exists():\n is_local = False\n\n if is_local:\n # Test for HuggingFace model\n if any(File.endswith(\".bin\") for File in os.listdir(path)):\n is_sb = False\n return is_sb, checkpoint_filename\n\n # Test for SpeechBrain model and get the filename.\n for File in os.listdir(path):\n if File.endswith(\".ckpt\"):\n checkpoint_filename = os.path.join(path, File)\n is_sb = True\n return is_sb, checkpoint_filename\n else:\n files = model_info(\n path\n ).siblings # get the list of files of the Hub\n\n # Test if it's an HuggingFace model or a SB one\n for File in files:\n if File.rfilename.endswith(\".ckpt\"):\n checkpoint_filename = File.rfilename\n is_sb = True\n return is_sb, checkpoint_filename\n\n for File in files:\n if File.rfilename.endswith(\".bin\"):\n checkpoint_filename = File.rfilename\n is_sb = False\n return is_sb, checkpoint_filename\n\n err_msg = f\"{path} does not contain a .bin or .ckpt checkpoint !\"\n raise FileNotFoundError(err_msg)\n\n def forward(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n\n # If we freeze, we simply remove all grads and features from the graph.\n if self.freeze:\n with torch.no_grad():\n return self.extract_features(wav).detach()\n\n return self.extract_features(wav)\n\n def extract_features(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n\n if self.normalize_wav:\n wav = F.layer_norm(wav, wav.shape)\n\n # Extract wav2vec output\n out = self.model(wav)[0]\n\n # We normalize the output if required\n if self.output_norm:\n out = F.layer_norm(out, out.shape)\n\n return out\n\n\nclass HuggingFaceWav2Vec2Pretrain(nn.Module):\n \"\"\"This lobe enables the integration of HuggingFace\n wav2vec2.0 models to be pretrained.\n\n Source paper: https://arxiv.org/abs/2006.11477\n Transformer from HuggingFace needs to be installed:\n https://huggingface.co/transformers/installation.html\n\n The return is an HuggingFace format and the mask indices that contains:\n https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining\n\n For instance, it returns the loss that can be accessed with .loss\n\n Arguments\n ---------\n source : str\n HuggingFace hub name: e.g \"facebook/wav2vec2-large-lv60\"\n save_path : str\n Path (dir) of the downloaded model.\n mask_prob : float (default: 0.65)\n Probability of masking a given frame. Default is taken from the paper.\n mask_length : float (default: 10)\n Length (i.e. number of consecutive masked frames). Default is taken from\n the paper.\n Example\n -------\n >>> inputs = torch.rand([10, 32000])\n >>> model_hub = \"facebook/wav2vec2-base-960h\"\n >>> save_path = \"savedir\"\n >>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)\n >>> outputs, _ = model(inputs)\n \"\"\"\n\n def __init__(\n self,\n source,\n save_path,\n mask_prob=0.65,\n mask_length=10,\n normalize_wav=True,\n ):\n super().__init__()\n\n self.mask_prob = mask_prob\n self.mask_length = mask_length\n self.normalize_wav = normalize_wav\n\n # Download the config of the model from HuggingFace.\n self.config = Wav2Vec2Config.from_pretrained(\n source, cache_dir=save_path\n )\n self.config.output_hidden_states = (\n True # We want the hidden states as well!\n )\n\n self.model = Wav2Vec2ForPreTraining(self.config)\n self.model.gradient_checkpointing_disable() # Required by DDP\n self.model.train()\n\n # We check if inputs need to be normalized w.r.t pretrained wav2vec2\n\n def forward(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n batch_size, raw_sequence_length = wav.shape\n\n if self.normalize_wav:\n wav = F.layer_norm(wav, wav.shape)\n\n sequence_length = self.model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n\n # 1. Compute the indices that will be masked\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.mask_prob,\n mask_length=self.mask_length,\n )\n torch_mask_time_indices = torch.tensor(\n mask_time_indices, device=wav.device, dtype=torch.long,\n )\n\n # 2. Sample the negative samples from the entire sequence.\n # Fairseq does it only on the masked indices, but this only work if you\n # have long sentences. For more versatily, we sample on the entire sequence.\n # value.\n full_sentence_indices = np.ones((batch_size, sequence_length))\n\n # print(np.sum(mask_time_indices, axis=1))\n negative_sample_indices = torch.tensor(\n transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(\n (batch_size, sequence_length),\n num_negatives=self.config.num_negatives,\n mask_time_indices=full_sentence_indices,\n ),\n device=wav.device,\n dtype=torch.long,\n )\n\n return (\n self.model(\n wav,\n mask_time_indices=torch_mask_time_indices,\n sampled_negative_indices=negative_sample_indices,\n ),\n torch_mask_time_indices,\n )\n" ]
[ [ "torch.nn.functional.layer_norm", "torch.load", "torch.tensor", "numpy.ones", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sbrml/pilco
[ "77b6d8b9033ffdb23cae4936b028f42144f37846" ]
[ "pilco/environments/custom/continuous_mountaincar.py" ]
[ "\"\"\"\nOur modification of the OpenAI Gym Continuous Mountain Car by Olivier Sigaud:\nhttps://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py\n\nwhich was (ultimately) based on Sutton's implementation:\nhttp://incompleteideas.net/sutton/MountainCar/MountainCar1.cp\n\"\"\"\n\nfrom pilco.errors import EnvironmentError\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport numpy as np\n\n\nclass MountainCar(gym.Env):\n\n metadata = {'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30}\n\n def __init__(self):\n\n # State and action bounds\n self.min_action = -1.0\n self.max_action = 1.0\n self.min_position = - 3.0\n self.max_position = 3.0\n self.max_speed = 0.07\n self.goal_position = 0.5\n\n # Force per mass the car can output\n self.power = 0.0015\n\n self.low_state = np.array([self.min_position, -self.max_speed],\n dtype=np.float32)\n\n self.high_state = np.array([self.max_position, self.max_speed],\n dtype=np.float32)\n\n self.viewer = None\n\n # Allowed action space\n self.action_space = spaces.Box(low=self.min_action,\n high=self.max_action,\n shape=(1,),\n dtype=np.float32)\n\n self.seed()\n\n # Temporary hack to work with rest of library\n self.env = self\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n\n def step(self, action):\n\n # Check if action is in permissible space\n if not self.action_space.contains(action):\n raise EnvironmentError(f'Expected action in the range of [-1., 1.] '\n f'got action {action}.')\n\n # Unpack positiion and valocity\n position, velocity = self.state\n\n # Increment position by velocity\n position_ = position + velocity\n\n # Increment velocity by Euler rule and clip\n velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position)\n velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed)\n\n self.state = np.array([position_, velocity_])\n\n return self.state, None, False, {}\n\n\n def reset(self):\n self.state = np.array([-0.5, 0.])\n return np.array(self.state)\n\n\n def _height(self, xs):\n return 0.55 + 0.45 * np.sin(3 * xs)\n\n def render(self, mode='human'):\n\n # Set picture size\n screen_width = 600\n screen_height = 400\n\n world_width = self.max_position - self.min_position\n scale = screen_width/world_width\n\n # Set car size\n carwidth = 40\n carheight = 20\n\n if self.viewer is None:\n\n from gym.envs.classic_control import rendering\n\n # Car constants\n clearance = 10\n\n # Overall viewer\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n # Track on which the car moves\n xs = np.linspace(self.min_position, self.max_position, 200)\n ys = self._height(xs)\n xys = list(zip((xs - self.min_position) * scale, ys * scale))\n\n # Add car\n self.track = rendering.make_polyline(xys)\n self.track.set_linewidth(4)\n self.viewer.add_geom(self.track)\n self.cartrans = rendering.Transform()\n\n # Car chasis\n l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0\n car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n car.add_attr(rendering.Transform(translation=(0, clearance)))\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n\n # Front wheel\n frontwheel = rendering.make_circle(carheight / 2.5)\n frontwheel.set_color(.5, .5, .5)\n frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))\n frontwheel.add_attr(self.cartrans)\n self.viewer.add_geom(frontwheel)\n\n # Back wheel\n backwheel = rendering.make_circle(carheight / 2.5)\n backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))\n backwheel.add_attr(self.cartrans)\n backwheel.set_color(.5, .5, .5)\n self.viewer.add_geom(backwheel)\n\n # Flagpole on mountain peak\n flagx = scale * (0.5 - self.min_position)\n flagy1 = scale * self._height(self.goal_position)\n flagy2 = flagy1 + 50\n flagpole = rendering.Line((flagx, flagy1),\n (flagx, flagy2))\n self.viewer.add_geom(flagpole)\n\n # Flag on flagpole\n flag = rendering.FilledPolygon([(flagx, flagy2),\n (flagx, flagy2 - 10),\n (flagx + 25, flagy2 - 5)])\n flag.set_color(.8, .8, 0)\n self.viewer.add_geom(flag)\n\n\n # Translate and rotate car\n self.cartrans.set_translation(scale * (self.state[0] - self.min_position),\n scale * self._height(self.state[0]))\n self.cartrans.set_rotation(np.cos(3 * self.state[0]))\n\n return self.viewer.render(return_rgb_array=mode=='rgb_array')\n\n\n def close(self):\n\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n" ]
[ [ "numpy.linspace", "numpy.clip", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eltociear/NiaPy
[ "7884aefec8f013d9f8db5c1af7080a61dd19a31d" ]
[ "examples/custom_problem.py" ]
[ "# encoding=utf8\n# This is temporary fix to import module from parent folder\n# It will be removed when package is published on PyPI\nimport sys\n\nsys.path.append('../')\n\nimport numpy as np\nfrom niapy.task import StoppingTask\nfrom niapy.problems import Problem\nfrom niapy.algorithms.basic import ParticleSwarmAlgorithm\n\n\nclass MyProblem(Problem):\n def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs):\n super().__init__(dimension, lower, upper, *args, **kwargs)\n\n def _evaluate(self, x):\n return np.sum(x ** 2)\n\n\n# we will run Particle Swarm Algorithm on custom problem\ntask = StoppingTask(problem=MyProblem(dimension=10), max_iters=1000)\nalgo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4)\nbest = algo.run(task=task)\nprint('%s -> %s ' % (best[0], best[1]))\n" ]
[ [ "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mrzhuzhe/Kaggle_Lux_AI_2021
[ "08b795e71e78c768d28c648290a15d58ca718776" ]
[ "lux_ai/lux_gym/multi_subtask.py" ]
[ "from abc import ABC, abstractmethod\nimport numpy as np\nimport random\nfrom typing import Callable, Dict, Optional, Tuple, Sequence\n\nfrom .reward_spaces import Subtask\nfrom ..lux.game import Game\n\n\nclass SubtaskSampler(ABC):\n def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):\n self.subtask_constructors = subtask_constructors\n\n @abstractmethod\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n pass\n\n # noinspection PyMethodMayBeStatic\n def get_info(self) -> Dict[str, np.ndarray]:\n return {}\n\n\nclass RandomSampler(SubtaskSampler):\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n return self.subtask_constructors[random.randrange(len(self.subtask_constructors))]()\n\n\nclass DifficultySampler(SubtaskSampler):\n def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):\n super(DifficultySampler, self).__init__(subtask_constructors)\n self.active_subtask_idx = -1\n self.summed_rewards = np.zeros(len(self.subtask_constructors))\n self.n_trials = np.zeros(len(self.subtask_constructors))\n\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n if final_rewards is not None:\n self.n_trials[self.active_subtask_idx] += 1\n self.summed_rewards[self.active_subtask_idx] += np.mean(final_rewards)\n\n self.active_subtask_idx = np.random.choice(len(self.subtask_constructors), p=self.weights)\n return self.subtask_constructors[self.active_subtask_idx]()\n\n @property\n def weights(self) -> np.ndarray:\n weights = Subtask.get_reward_spec().reward_max - self.summed_rewards / np.maximum(self.n_trials, 1)\n return weights / weights.sum()\n\n def get_info(self) -> Dict[str, np.ndarray]:\n return {\n f\"LOGGING_{subtask.__name__}_subtask_difficulty\": self.weights[i]\n for i, subtask in enumerate(self.subtask_constructors)\n }\n\n\nclass MultiSubtask(Subtask):\n def __init__(\n self,\n subtask_constructors: Sequence[Callable[..., Subtask]] = (),\n subtask_sampler_constructor: Callable[..., SubtaskSampler] = RandomSampler,\n **kwargs\n ):\n super(MultiSubtask, self).__init__(**kwargs)\n self.subtask_constructors = subtask_constructors\n self.subtask_sampler = subtask_sampler_constructor(self.subtask_constructors)\n self.active_subtask = self.subtask_sampler.sample(None)\n self.info = {\n f\"LOGGING_{subtask.__name__}_subtask_reward\": np.array([float(\"nan\"), float(\"nan\")])\n for subtask in self.subtask_constructors\n }\n\n def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:\n reward, done = self.active_subtask.compute_rewards_and_done(game_state, done)\n for subtask in self.subtask_constructors:\n reward_key = f\"LOGGING_{subtask.__name__}_subtask_reward\"\n if isinstance(self.active_subtask, subtask):\n self.info[reward_key] = np.array(reward)\n else:\n self.info[reward_key] = np.array([float(\"nan\"), float(\"nan\")])\n if done:\n self.active_subtask = self.subtask_sampler.sample(reward)\n return reward, done\n\n def completed_task(self, game_state: Game) -> np.ndarray:\n raise NotImplementedError\n\n def get_info(self) -> Dict[str, np.ndarray]:\n return dict(**self.info, **self.subtask_sampler.get_info())\n\n def get_subtask_encoding(self, subtask_encoding_dict: dict) -> int:\n return self.active_subtask.get_subtask_encoding(subtask_encoding_dict)\n" ]
[ [ "numpy.maximum", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Acetonen/Interkamen_career
[ "75cc0a5832b7c1e303967cc337bb001e3383eb9e" ]
[ "interkamen_career/modules/mechanics_economic.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Visualise statistic by machine economic.\"\"\"\n\n\nfrom __future__ import annotations\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom typing import Dict\nfrom .mechanic_report import MechReports\nfrom .administration.logger_cfg import Logs\nfrom .support_modules.custom_exceptions import MainMenu\nfrom .support_modules.standart_functions import (\n BasicFunctionsS\n as BasF_S\n)\n\nLOGGER = Logs().give_logger(__name__)\n\n\nclass MechEconomic(MechReports):\n \"\"\"Visualise statistic by machine economic.\"\"\"\n\n __slots__ = (\n 'mech_econ_path',\n 'mech_econ_data',\n 'mech_econ_file',\n )\n\n def __init__(self, user):\n \"\"\"Load mech econom data.\"\"\"\n super().__init__(user)\n self.mech_econ_data = {}\n self.mech_econ_path = (\n super().get_root_path() / 'data' / 'mech_ecomomic'\n )\n if self.mech_econ_path.exists():\n self.mech_econ_file = super().load_data(\n data_path=self.mech_econ_path,\n user=user,\n )\n else:\n self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0])\n\n def _save_mech_econom(self):\n \"\"\"Save mech econom and create log file.\"\"\"\n self.mech_econ_file = self.mech_econ_file.append(\n self.mech_econ_data,\n ignore_index=True\n )\n self._dump_mech_econ_data()\n self._log_mech_econ_creation()\n\n def _dump_mech_econ_data(self):\n \"\"\"Dump salary data to file.\"\"\"\n super().dump_data(\n data_path=self.mech_econ_path,\n base_to_dump=self.mech_econ_file,\n user=self.user,\n )\n\n def _log_mech_econ_creation(self):\n \"\"\"Save log about salary creation.\"\"\"\n report_name = '{}-{}'.format(\n self.mech_econ_data['year'],\n self.mech_econ_data['month'],\n )\n LOGGER.warning(\n f\"User '{self.user.login}' create mechanic econom.: {report_name}\"\n )\n\n def _visualise_one_day_cost(self):\n \"\"\"Visualise cost of one day by each machine.\"\"\"\n year = self._chose_year()\n data_by_year = super().give_dataframe_by_year(year)\n data_for_plot = {\n 'mach': [],\n 'day_cost': [],\n }\n for mach in super().maint_dict['mach_name']:\n totall_cost = sum(self.mech_econ_file[mach])\n total_work = sum(data_by_year.work)\n number_of_wdays = total_work\n day_cost = round(totall_cost/number_of_wdays, 0)\n data_for_plot['mach'].append(mach)\n data_for_plot['day_cost'].append(day_cost)\n data_for_plot = pd.DataFrame(data_for_plot)\n self._create_one_day_cost_plot(data_for_plot)\n\n def _input_machines_econ(self, mech_econ_date):\n \"\"\"Input money, spent for machine in month.\"\"\"\n self.mech_econ_data['year'] = mech_econ_date['year']\n self.mech_econ_data['month'] = mech_econ_date['month']\n super().clear_screen()\n print(\"Введите сумму для каждой техники:\")\n for mach in super().maint_dict['mach_name']:\n self.mech_econ_data[mach] = float(input(f\"{mach}: \"))\n save = input(\n \"\\nДанные введены.\"\n \"\\n[s] - сохранить данные: \"\n )\n if save.lower() == 's':\n self._save_mech_econom()\n print(\"Данные сохранены.\")\n else:\n print(\"Вы отменили сохранение.\")\n input(\"\\n[ENTER] - выйти.\")\n\n def _visualise_statistic(self, year):\n \"\"\"Visualise statistic.\"\"\"\n mech_econ_year = self.mech_econ_file.year == year\n data_by_year = (\n self.mech_econ_file[mech_econ_year]\n .sort_values(by=['month'])\n )\n super().print_all_dataframe(data_by_year)\n input(\"\\n[ENTER] - выйти.\")\n\n def _chose_year(self):\n \"\"\"Show statistic about drill instrument.\"\"\"\n print(\"[ENTER] - выход\"\n \"\\nВыберете год:\")\n year = super().choise_from_list(\n sorted(set(self.mech_econ_file.year)),\n none_option=True\n )\n if year:\n return year\n else:\n raise MainMenu\n\n @BasF_S.set_plotter_parametrs\n def _create_one_day_cost_plot(self, dataframe):\n \"\"\"Create one day cost plot.\"\"\"\n figure = plt.figure()\n\n x_cost = list(range(len(super().maint_dict['mach_name'])))\n\n axle = figure.add_subplot(111)\n axle.bar(\n x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r',\n label='Коэффициент', tick_label=dataframe.mach\n )\n axle.tick_params(labelrotation=90)\n axle.set_title(\n \"Коэффициент целесообразности содержания техники руб/час. \",\n fontsize=\"x-large\")\n axle.set_ylabel('руб.')\n axle.legend()\n axle.grid(\n True, linestyle='--', which='major',\n color='grey', alpha=.25, axis='y'\n )\n figure.tight_layout()\n plt.show()\n\n def create_mech_econom(self):\n \"\"\"Create mechanic econom data report.\"\"\"\n mech_econ_date = self.input_date()\n check = super().check_date_in_dataframe(\n self.mech_econ_file,\n mech_econ_date\n )\n if check:\n print(\"Данные за этот месяц уже внесены.\")\n input(\"\\n[ENTER] - выйти.\")\n else:\n self._input_machines_econ(mech_econ_date)\n\n def show_econ_statistic(self, stat_variants: Dict):\n \"\"\"Show machine economic statistic.\"\"\"\n stat_variants = {\n 'Целесообразность затрат на содержание техники.':\n self._visualise_one_day_cost,\n }\n print(\"[ENTER] - выйти.\"\n \"\\nВыберете вид отчета:\")\n stat = super().choise_from_list(stat_variants, none_option=True)\n if stat:\n stat_variants[stat]()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.DataFrame", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
robbycostales/multiagent-particle-envs
[ "22a00b18e13b629a206a8ffc8d8319d06dd5d7b0" ]
[ "multiagent/scenarios/simple_speaker_listener.py" ]
[ "import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent.scenario import BaseScenario\n\nclass Scenario(BaseScenario):\n def make_world(self, dim_c=3):\n world = World()\n # set any world properties first\n world.dim_c = dim_c\n num_landmarks = 3\n # add agents\n world.agents = [Agent() for i in range(2)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = False\n agent.size = 0.075\n # speaker\n world.agents[0].movable = False\n # listener\n world.agents[1].silent = True\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n landmark.size = 0.04\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # assign goals to agents\n for agent in world.agents:\n agent.goal_a = None\n agent.goal_b = None\n # want listener to go to the goal landmark\n world.agents[0].goal_a = world.agents[1]\n world.agents[0].goal_b = np.random.choice(world.landmarks)\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.25,0.25,0.25])\n # random properties for landmarks\n world.landmarks[0].color = np.array([0.65,0.15,0.15])\n world.landmarks[1].color = np.array([0.15,0.65,0.15])\n world.landmarks[2].color = np.array([0.15,0.15,0.65])\n # special colors for goals\n world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n # returns data for benchmarking purposes\n return reward(agent, reward)\n\n def reward(self, agent, world):\n # squared distance from listener to landmark\n a = world.agents[0]\n dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))\n return -dist2\n\n def observation(self, agent, world):\n # goal color\n goal_color = np.zeros(world.dim_color)\n if agent.goal_b is not None:\n goal_color = agent.goal_b.color\n\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n # communication of all other agents\n comm = []\n for other in world.agents:\n if other is agent or (other.state.c is None): continue\n comm.append(other.state.c)\n\n # speaker\n if not agent.movable:\n return np.concatenate([goal_color])\n # listener\n if agent.silent:\n return np.concatenate([agent.state.p_vel] + entity_pos + comm)\n\n" ]
[ [ "numpy.square", "numpy.random.choice", "numpy.concatenate", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
t27/carla-scenic-data-collector
[ "3f38fa0e23a9f0ed85726292c5703c8505330870" ]
[ "carla_python_api_recorder.py" ]
[ "# Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode\n\n\n#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\nimport glob\nimport os\nimport sys\nimport pandas as pd\nfrom tqdm import tqdm\nimport math\n\n\nCARLA_VERSION = \"0.9.11\"\ntry:\n # sys.path.append(\"./libs/carla-0.9.9-py3.7-linux-x86_64.egg\")\n if CARLA_VERSION == \"0.9.9\":\n sys.path.append(\"./libs/carla-0.9.9-py3.7-linux-x86_64.egg\")\n elif CARLA_VERSION == \"0.9.11\":\n sys.path.append(\"./libs/carla-0.9.11-py3.7-linux-x86_64.egg\")\nexcept IndexError:\n pass\n\nimport carla\n\nimport argparse\nimport random\nimport time\nimport logging\nimport click\nimport pathlib\n\nimport spawn\n\ncurrent_dir = pathlib.Path(__file__).parent.absolute()\nSEED = 27\nrandom.seed(SEED)\n\n\ndef get_metadata(actor, frame_id):\n type_id = actor.type_id\n\n def splitCarlaVec(vect):\n return vect.x, vect.y, vect.z\n\n id = actor.id\n # clsname = ClientSideBoundingBoxes.get_class_name(actor)\n tf = actor.get_transform()\n roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw\n loc = actor.get_location()\n pos_x, pos_y, pos_z = splitCarlaVec(loc)\n try:\n bbox3d = actor.bounding_box\n bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(\n bbox3d.location\n )\n bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)\n except:\n bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None\n bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None\n\n velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())\n acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())\n angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(\n actor.get_angular_velocity()\n )\n\n try:\n # need to do this because Carla's Actor object doesnt support getattr\n traffic_light_state = actor.state.name\n except:\n traffic_light_state = None\n\n return (\n frame_id,\n id,\n type_id,\n pos_x,\n pos_y,\n pos_z,\n roll,\n pitch,\n yaw,\n velocity_x,\n velocity_y,\n velocity_z,\n acc_x,\n acc_y,\n acc_z,\n angular_vel_x,\n angular_vel_y,\n angular_vel_z,\n bbox3d_offset_x,\n bbox3d_offset_y,\n bbox3d_offset_z,\n bbox3d_extent_x,\n bbox3d_extent_y,\n bbox3d_extent_z,\n traffic_light_state,\n )\n\n\nglobal_collision = False\n\n\ndef collision_detect_callback(event):\n actor_we_collide_against = event.other_actor\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n if \"vehicle.\" in actor_we_collide_against.type_id:\n global global_collision\n global_collision = True\n\n\ndef attach_collision_sensor(actor, world):\n blueprint_library = world.get_blueprint_library()\n\n collision_sensor = world.spawn_actor(\n blueprint_library.find(\"sensor.other.collision\"),\n carla.Transform(),\n attach_to=actor,\n )\n\n collision_sensor.listen(lambda event: collision_detect_callback(event))\n\n return collision_sensor\n\n\ndef run(\n client,\n round_name,\n recording_dir,\n speed_violation_prob=60,\n tl_violation_prob=70,\n perc_speed_diff=-30,\n num_vehicles=25,\n SESSION_DURATION=60,\n):\n safe = True # avoid spawning vehicles whose geometry is not ideal for carla\n\n actor_list = []\n sensors = []\n\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\n\n try:\n FPS = 5\n DELTA_T = 1 / FPS\n\n world = client.get_world()\n blueprints = world.get_blueprint_library().filter(\"vehicle.*\")\n traffic_manager = client.get_trafficmanager()\n traffic_manager.set_global_distance_to_leading_vehicle(2.0)\n if CARLA_VERSION == \"0.9.11\":\n print(\"Using deterministic Traffic Manager\")\n traffic_manager.set_random_device_seed(SEED)\n settings = client.get_world().get_settings()\n if not settings.synchronous_mode:\n traffic_manager.set_synchronous_mode(True)\n synchronous_master = True\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = DELTA_T\n client.get_world().apply_settings(settings)\n else:\n synchronous_master = False\n\n recording_dir_path = pathlib.Path(recording_dir)\n recording_dir_path.mkdir(exist_ok=True)\n session_recording = str(recording_dir_path / f\"{round_name}.csv\")\n carla_session_recording = str(\n recording_dir_path.absolute() / f\"{round_name}_carla_recording\"\n )\n print(\"Recording on file: %s\" % client.start_recorder(carla_session_recording))\n vehicles_list, walkers_list, all_actors = spawn.spawn(\n client, world, num_vehicles, 0, safe\n )\n world.tick()\n print(\"spawned %d vehicles, press Ctrl+C to exit.\" % len(actor_list))\n # fmt: off\n df_columns = [\n \"frame_id\", \"id\", \"type_id\", \"pos_x\", \"pos_y\", \"pos_z\", \"roll\", \"pitch\", \"yaw\", \n \"velocity_x\", \"velocity_y\", \"velocity_z\", \"acc_x\", \"acc_y\", \"acc_z\", \n \"angular_vel_x\", \"angular_vel_y\", \"angular_vel_z\", \n \"bbox3d_offset_x\", \"bbox3d_offset_y\", \"bbox3d_offset_z\", \n \"bbox3d_extent_x\", \"bbox3d_extent_y\", \"bbox3d_extent_z\", \"traffic_light_color\",\n ]\n # fmt: on\n # get all non vehicle agents\n global global_collision\n global_collision = False\n actors = world.get_actors()\n for actor in actors:\n if \"vehicle.\" in actor.type_id:\n sensors.append(attach_collision_sensor(actor, world))\n non_vehicles = [\n x\n for x in actors\n if (\"vehicle\" not in x.type_id and \"traffic_light\" not in x.type_id)\n ] # signs, traffic lights etc\n frame_id = 0\n df_arr = []\n non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]\n df_arr += non_vehicle_arr\n pbar = tqdm(total=FPS * SESSION_DURATION)\n max_frames = FPS * SESSION_DURATION\n collision_detected_once = False\n while frame_id < max_frames:\n if global_collision and not collision_detected_once:\n # Todo, if detected, start a countdown of N frames and break only after N iterations\n print(\"detected collision, exiting!\")\n collision_detected_once = True\n max_frames = frame_id + 5\n # continue\n\n actors = world.get_actors()\n for actor in actors:\n if \"vehicle.\" in actor.type_id:\n # print(actor.type_id)\n tm_port = traffic_manager.get_port()\n actor.set_autopilot(True, tm_port)\n traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)\n traffic_manager.distance_to_leading_vehicle(actor, 3)\n if random.random() * 100 < speed_violation_prob:\n traffic_manager.vehicle_percentage_speed_difference(\n actor, perc_speed_diff\n )\n\n vehicles_and_lights = [\n x\n for x in actors\n if \"vehicle\" in x.type_id or \"traffic_light\" in x.type_id\n ]\n metadata_arr = [\n get_metadata(actor, frame_id) for actor in vehicles_and_lights\n ]\n df_arr += metadata_arr\n frame_id += 1\n pbar.update(1)\n world.tick()\n df = pd.DataFrame(df_arr, columns=df_columns)\n pbar.close()\n print(f\"Saving CSV({len(df.frame_id.unique())} frames)\")\n # df.to_parquet(f\"session_data.parquet\")\n df.to_csv(session_recording, index=False)\n world.tick()\n # if args.recorder_time > 0:\n # time.sleep(args.recorder_time)\n # else:\n # while True:\n # world.wait_for_tick()\n # # time.sleep(0.1)\n\n finally:\n if synchronous_master:\n settings = world.get_settings()\n settings.synchronous_mode = False\n settings.fixed_delta_seconds = None\n world.apply_settings(settings)\n print(\"\\ndestroying %d actors\" % (len(sensors) + len(vehicles_list)))\n # all_agents = sensors + vehicles_list\n for s in sensors:\n s.destroy()\n client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])\n\n print(\"Stop recording\")\n client.stop_recorder()\n\n\[email protected]()\[email protected](\n \"-s\",\n \"--scenario_type\",\n type=click.Choice([\"tl_sl\", \"nominal\"], case_sensitive=False),\n required=True,\n)\[email protected](\"-n\", \"--num_rounds\", default=100)\[email protected](\"--test\", is_flag=True)\ndef main(scenario_type, num_rounds, test):\n # print(scenario_type, test, num_rounds)\n if test:\n random.seed(72)\n\n if scenario_type.lower() == \"tl_sl\":\n SPEED_VIOLATION_PROB = 60\n TL_VIOLATION_PROB = 70\n PERC_SPEED_DIFF = -30\n SCENARIO_NAME = \"tl_sl\"\n # NUM_ROUNDS = 100\n elif scenario_type.lower() == \"nominal\":\n SPEED_VIOLATION_PROB = 0\n TL_VIOLATION_PROB = 0\n PERC_SPEED_DIFF = 0\n SCENARIO_NAME = \"nominal\"\n # NUM_ROUNDS = 200\n NUM_ROUNDS = num_rounds\n print(f\"Recording {SCENARIO_NAME} data\")\n try:\n host = \"127.0.0.1\" # IP of the host server (default: 127.0.0.1)\n port = 2000 # TCP port to listen to (default: 2000)\",\n client = carla.Client(host, port)\n if test:\n scenario_dir = f\"test_{SCENARIO_NAME}_recordings\"\n else:\n scenario_dir = f\"{SCENARIO_NAME}_recordings\"\n\n round_names = []\n for i in range(NUM_ROUNDS):\n run(\n client,\n f\"{scenario_type}_round_{i}\",\n scenario_dir,\n SPEED_VIOLATION_PROB,\n TL_VIOLATION_PROB,\n PERC_SPEED_DIFF,\n )\n round_names.append(f\"{scenario_type}_round_{i}\")\n # client.reload_world()\n except KeyboardInterrupt:\n pass\n finally:\n print(\"\\ndone.\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
dcambie/spectrochempy
[ "e376082d66be7a4c528b7d83be076d77534e39bd", "4707c51dba0032c160afc40682fa16d4b9855ded", "4707c51dba0032c160afc40682fa16d4b9855ded", "4707c51dba0032c160afc40682fa16d4b9855ded", "4707c51dba0032c160afc40682fa16d4b9855ded", "4707c51dba0032c160afc40682fa16d4b9855ded", "4707c51dba0032c160afc40682fa16d4b9855ded" ]
[ "spectrochempy/core/dataset/nddataset.py", "spectrochempy/core/readers/readcsv.py", "spectrochempy/api.py", "spectrochempy/core/dataset/meta.py", "spectrochempy/core/readers/readjdx.py", "tests/test_analysis/test_svd.py", "tests/test_processors/test_autosub.py" ]
[ "# -*- coding: utf-8 -*-\n\n#\n# ======================================================================================================================\n# Copyright (©) 2015-2019 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\"\"\"\nThis module implements the |NDDataset| class.\n\"\"\"\n\n__all__ = ['NDDataset']\n\nimport textwrap\nimport warnings\nimport sys\n\nimport numpy as np\nfrom traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union\nfrom traittypes import Array\n\nfrom spectrochempy.core.project.baseproject import AbstractProject\nfrom spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME\nfrom spectrochempy.core.dataset.ndcomplex import NDComplexArray\nfrom spectrochempy.core.dataset.coord import Coord, LinearCoord\nfrom spectrochempy.core.dataset.coordset import CoordSet\nfrom spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators\nfrom spectrochempy.core.dataset.ndio import NDIO\nfrom spectrochempy.core.dataset.ndplot import NDPlot\nfrom spectrochempy.core import error_, warning_\nfrom spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)\n\nHAS_XARRAY = False\ntry:\n import xarray as xr\n\n HAS_XARRAY = True # pragma: no cover\nexcept ImportError:\n xr = None # pragma: no cover\n\n\n# ======================================================================================================================\n# NDDataset class definition\n# ======================================================================================================================\n\nclass NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):\n # coordinates\n _coordset = Instance(CoordSet, allow_none=True)\n\n # model data (e.g., for fit)\n _modeldata = Array(Float(), allow_none=True)\n\n # some setting for NDDataset\n _copy = Bool(False)\n _labels_allowed = Bool(False) # no labels for NDDataset\n\n # dataset can be members of a project.\n # we use the abstract class to avoid circular imports.\n _parent = Instance(AbstractProject, allow_none=True)\n\n # For the GUI interface\n\n # parameters state\n _state = Dict()\n\n # processed data (for GUI)\n _processeddata = Array(Float(), allow_none=True)\n\n # processed mask (for GUI)\n _processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))\n\n # baseline data (for GUI)\n _baselinedata = Array(Float(), allow_none=True)\n\n # reference data (for GUI)\n _referencedata = Array(Float(), allow_none=True)\n\n # ------------------------------------------------------------------------------------------------------------------\n # initialisation\n # ------------------------------------------------------------------------------------------------------------------\n # ..................................................................................................................\n def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):\n \"\"\"\n The main N-dimensional dataset class used by |scpy|.\n\n The NDDataset is the main object use by SpectroChemPy. Like numpy ndarrays, NDDataset have the capability to be\n sliced, sorted and subject to mathematical operations. But, in addition, NDDataset may have units,\n can be masked\n and each dimensions can have coordinates also with units. This make NDDataset aware of unit compatibility,\n e.g.,\n for binary operation such as additions or subtraction or during the application of mathematical operations.\n In addition or in replacement of numerical data for coordinates, NDDataset can also have labeled coordinates\n where labels can be different kind of objects (strings, datetime, numpy nd.ndarray or othe NDDatasets, etc…).\n\n Parameters\n ----------\n data : array of floats\n Data array contained in the object. The data can be a list, a tuple, a |ndarray|, a ndarray-like,\n a |NDArray| or any subclass of |NDArray|. Any size or shape of data is accepted. If not given, an empty\n |NDArray| will be inited.\n At the initialisation the provided data will be eventually casted to a numpy-ndarray.\n If a subclass of |NDArray| is passed which already contains some mask, labels, or units, these elements\n will\n be used to accordingly set those of the created object. If possible, the provided data will not be copied\n for `data` input, but will be passed by reference, so you should make a copy of the `data` before passing\n them if that's the desired behavior or set the `copy` argument to True.\n coordset : An instance of |CoordSet|, optional\n `coords` contains the coordinates for the different dimensions of the `data`. if `coords` is provided,\n it must specified the `coord` and `labels` for all dimensions of the `data`.\n Multiple `coord`'s can be specified in an |CoordSet| instance for each dimension.\n coordunits : list, optional\n A list of units corresponding to the dimensions in the order of the coordset.\n coordtitles : list, optional\n A list of titles corresponding of the dimensions in the order of the coordset.\n **kwargs : dict\n See other parameters.\n\n Other Parameters\n ----------------\n dtype : str or dtype, optional, default=np.float64\n If specified, the data will be casted to this dtype, else the data will be casted to float64 or complex128.\n dims : list of chars, optional\n If specified the list must have a length equal to the number od data dimensions (ndim) and the chars\n must be\n taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in\n this order.\n name : str, optional\n A user friendly name for this object. If not given, the automatic `id` given at the object creation will be\n used as a name.\n labels : array of objects, optional\n Labels for the `data`. labels can be used only for 1D-datasets.\n The labels array may have an additional dimension, meaning several series of labels for the same data.\n The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of\n |NDArray|.\n mask : array of bool or `NOMASK`, optional\n Mask for the data. The mask array must have the same shape as the data. The given array can be a list,\n a tuple, or a |ndarray|. Each values in the array must be `False` where the data are *valid* and True when\n they are not (like in numpy masked arrays). If `data` is already a :class:`~numpy.ma.MaskedArray`, or any\n array object (such as a |NDArray| or subclass of it), providing a `mask` here will causes the mask from the\n masked array to be ignored.\n units : |Unit| instance or str, optional\n Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also\n explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_\n package.\n title : str, optional\n The title of the dimension. It will later be used for instance for labelling plots of the data.\n It is optional but recommended to give a title to each ndarray.\n dlabel : str, optional\n Alias of `title`.\n meta : dict-like object, optional\n Additional metadata for this object. Must be dict-like but no\n further restriction is placed on meta.\n author : str, optional\n Name(s) of the author(s) of this dataset. BNy default, name of the computer note where this dataset is\n created.\n description : str, optional\n A optional description of the nd-dataset. A shorter alias is `desc`.\n history : str, optional\n A string to add to the object history.\n copy : bool, optional\n Perform a copy of the passed object. Default is False.\n\n See Also\n --------\n Coord : Explicit coordinates object.\n LinearCoord : Implicit coordinates objet.\n CoordSet : Set of coordinates.\n\n Notes\n -----\n The underlying array in a |NDDataset| object can be accessed through the `data` attribute, which will return\n a conventional |ndarray|.\n\n Examples\n --------\n Usage by an end-user\n\n >>> from spectrochempy import *\n >>> x = NDDataset([1, 2, 3])\n >>> print(x.data) # doctest: +NORMALIZE_WHITESPACE\n [ 1 2 3]\n \"\"\"\n super().__init__(data, **kwargs)\n\n self._parent = None\n\n # eventually set the coordinates with optional units and title\n\n if isinstance(coordset, CoordSet):\n self.set_coordset(**coordset)\n\n else:\n if coordset is None:\n coordset = [None] * self.ndim\n\n if coordunits is None:\n coordunits = [None] * self.ndim\n\n if coordtitles is None:\n coordtitles = [None] * self.ndim\n\n _coordset = []\n for c, u, t in zip(coordset, coordunits, coordtitles):\n if not isinstance(c, CoordSet):\n if isinstance(c, LinearCoord):\n coord = LinearCoord(c)\n else:\n coord = Coord(c)\n if u is not None:\n coord.units = u\n if t is not None:\n coord.title = t\n else:\n if u: # pragma: no cover\n warning_('units have been set for a CoordSet, but this will be ignored '\n '(units are only defined at the coordinate level')\n if t: # pragma: no cover\n warning_('title will be ignored as they are only defined at the coordinates level')\n coord = c\n\n _coordset.append(coord)\n\n if _coordset and set(_coordset) != {Coord()}: # if they are no coordinates do nothing\n self.set_coordset(*_coordset)\n\n # ------------------------------------------------------------------------------------------------------------------\n # special methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def __dir__(self):\n # WARNING: be carefull to keep the present order of the three first elements! Needed for save/load operations\n return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',\n 'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',\n 'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()\n\n # ..................................................................................................................\n def __getitem__(self, items):\n\n saveditems = items\n\n # coordinate selection to test first\n if isinstance(items, str):\n try:\n return self._coordset[items]\n except Exception:\n pass\n\n # slicing\n new, items = super().__getitem__(items, return_index=True)\n\n if new is None:\n return None\n\n if self._coordset is not None:\n names = self._coordset.names # all names of the current coordinates\n new_coords = [None] * len(names)\n for i, item in enumerate(items):\n # get the corresponding dimension name in the dims list\n name = self.dims[i]\n # get the corresponding index in the coordinate's names list\n idx = names.index(name)\n if self._coordset[idx].is_empty:\n new_coords[idx] = Coord(None, name=name)\n elif isinstance(item, slice):\n # add the slice on the corresponding coordinates on the dim to the new list of coordinates\n if not isinstance(self._coordset[idx], CoordSet):\n new_coords[idx] = self._coordset[idx][item]\n else:\n # we must slice all internal coordinates\n newc = []\n for c in self._coordset[idx]:\n newc.append(c[item])\n new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure\n # the order will be # kept for internal coordinates\n new_coords[idx]._default = self._coordset[idx]._default # set the same default coord\n new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim\n\n elif isinstance(item, (np.ndarray, list)):\n new_coords[idx] = self._coordset[idx][item]\n\n new.set_coordset(*new_coords, keepnames=True)\n\n new.history = f'Slice extracted: ({saveditems})'\n return new\n\n # ..................................................................................................................\n def __getattr__(self, item):\n # when the attribute was not found\n if item in [\"__numpy_ufunc__\", \"interface\", '_pytestfixturefunction', '__dataclass_fields__',\n '_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',\n 'clevels', '__wrapped__', 'coords', '__await__',\n '__aiter__'] or '_validate' in item or '_changed' in item:\n # raise an error so that traits, ipython operation and more ... will be handled correctly\n raise AttributeError\n\n # syntax such as ds.x, ds.y, etc...\n\n if item[0] in self.dims or self._coordset:\n\n # look also properties\n attribute = None\n index = 0\n # print(item)\n if len(item) > 2 and item[1] == '_':\n attribute = item[1:]\n item = item[0]\n index = self.dims.index(item)\n\n if self._coordset:\n try:\n c = self._coordset[item]\n if isinstance(c, str) and c in self.dims:\n # probaly a reference to another coordinate name\n c = self._coordset[c]\n\n if c.name in self.dims or c._parent_dim in self.dims:\n if attribute is not None:\n # get the attribute\n return getattr(c, attribute)\n else:\n return c\n else:\n raise AttributeError\n\n except Exception as err:\n if item in self.dims:\n return None\n else:\n raise err\n elif attribute is not None:\n if attribute == 'size':\n # we want the size but there is no coords, get it from the data shape\n return self.shape[index]\n else:\n raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')\n\n return None\n\n raise AttributeError\n\n def __setattr__(self, key, value):\n\n if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...\n # Note the above test is important to avoid errors with traitlets\n # even if it looks redundant with the folllowing\n if key in self.dims:\n if self._coordset is None:\n # we need to create a coordset first\n self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))\n idx = self._coordset.names.index(key)\n _coordset = self._coordset\n listcoord = False\n if isinstance(value, list):\n listcoord = all([isinstance(item, Coord) for item in value])\n if listcoord:\n _coordset[idx] = list(CoordSet(value).to_dict().values())[0]\n _coordset[idx].name = key\n _coordset[idx]._is_same_dim = True\n elif isinstance(value, CoordSet):\n if len(value) > 1:\n value = CoordSet(value)\n _coordset[idx] = list(value.to_dict().values())[0]\n _coordset[idx].name = key\n _coordset[idx]._is_same_dim = True\n elif isinstance(value, (Coord, LinearCoord)):\n value.name = key\n _coordset[idx] = value\n else:\n _coordset[idx] = Coord(value, name=key)\n _coordset = self._valid_coordset(_coordset)\n self._coordset.set(_coordset)\n else:\n raise AttributeError(f'Coordinate `{key}` is not used.')\n else:\n super().__setattr__(key, value)\n\n # ..................................................................................................................\n def __eq__(self, other, attrs=None):\n attrs = self.__dir__()\n for attr in (\n 'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',\n 'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',\n 'state'):\n # these attibutes are not used for comparison (comparison based on data and units!)\n try:\n attrs.remove(attr)\n except ValueError:\n pass\n\n return super().__eq__(other, attrs)\n\n # ..................................................................................................................\n def __hash__(self):\n # all instance of this class has same hash, so they can be compared\n return super().__hash__ + hash(self._coordset)\n\n # ------------------------------------------------------------------------------------------------------------------\n # Default values\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n @default('_coordset')\n def _coordset_default(self):\n return None\n\n # ..................................................................................................................\n @default('_modeldata')\n def _modeldata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_processeddata')\n def _processeddata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_baselinedata')\n def _baselinedata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_referencedata')\n def _referencedata_default(self):\n return None\n\n # ------------------------------------------------------------------------------------------------------------------\n # GUI options\n # ------------------------------------------------------------------------------------------------------------------\n # TODO: refactor the spectrochempy preference system to have a common basis\n\n @property\n def state(self):\n # state of the controller window for this dataset\n return self._state\n\n @state.setter\n def state(self, val):\n self._state = val\n\n @property\n def processeddata(self):\n return self._processeddata\n\n @processeddata.setter\n def processeddata(self, val):\n self._processeddata = val\n\n @property\n def processedmask(self):\n return self._processedmask\n\n @processedmask.setter\n def processedmask(self, val):\n self._processedmask = val\n\n @property\n def baselinedata(self):\n return self._baselinedata\n\n @baselinedata.setter\n def baselinedata(self, val):\n self._baselinedata = val\n\n @property\n def referencedata(self):\n return self._referencedata\n\n @referencedata.setter\n def referencedata(self, val):\n self._referencedata = val\n\n # ------------------------------------------------------------------------------------------------------------------\n # Validators\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n @validate('_coordset')\n def _coordset_validate(self, proposal):\n coords = proposal['value']\n return self._valid_coordset(coords)\n\n def _valid_coordset(self, coords):\n # uses in coords_validate and setattr\n if coords is None:\n return\n\n for k, coord in enumerate(coords):\n\n if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:\n continue\n\n # For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet\n if not isinstance(coord, (LinearCoord, Coord, CoordSet)):\n if isinstance(coord, NDArray):\n coord = coords[k] = Coord(coord)\n else:\n raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '\n f' CoordSet class, but an instance of {type(coord)} has been passed')\n\n if self.dims and coord.name in self.dims:\n # check the validity of the given coordinates in terms of size (if it correspond to one of the dims)\n size = coord.size\n\n if self.implements('NDDataset'):\n idx = self._get_dims_index(coord.name)[0] # idx in self.dims\n if size != self._data.shape[idx]:\n raise ValueError(f'the size of a coordinates array must be None or be equal'\n f' to that of the respective `{coord.name}`'\n f' data dimension but coordinate size={size} != data shape[{idx}]='\n f'{self._data.shape[idx]}')\n else:\n pass # bypass this checking for any other derived type (should be done in the subclass)\n\n coords._parent = self\n return coords\n\n # ..................................................................................................................\n @property\n def _dict_dims(self):\n _dict = {}\n for index, dim in enumerate(self.dims):\n if dim not in _dict:\n _dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}\n return _dict\n\n # ------------------------------------------------------------------------------------------------------------------\n # public methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def add_coordset(self, *coords, dims=None, **kwargs):\n \"\"\"\n Add one or a set of coordinates from a dataset.\n\n Parameters\n ----------\n *coords : iterable\n Coordinates object(s).\n dims : list\n Name of the coordinates.\n **kwargs : dict\n Keywords passed to the coordset.\n \"\"\"\n if not coords and not kwargs:\n # reset coordinates\n self._coordset = None\n return\n\n if self._coordset is None:\n # make the whole coordset at once\n self._coordset = CoordSet(*coords, dims=dims, **kwargs)\n else:\n # add one coordinate\n self._coordset._append(*coords, **kwargs)\n\n if self._coordset:\n # set a notifier to the updated traits of the CoordSet instance\n HasTraits.observe(self._coordset, self._dims_update, '_updated')\n # force it one time after this initialization\n self._coordset._updated = True\n\n # ..................................................................................................................\n def coord(self, dim='x'):\n \"\"\"\n Return the coordinates along the given dimension.\n\n Parameters\n ----------\n dim : int or str\n A dimension index or name, default index = `x`.\n If an integer is provided, it is equivalent to the `axis` parameter for numpy array.\n\n Returns\n -------\n |Coord|\n Coordinates along the given axis.\n \"\"\"\n idx = self._get_dims_index(dim)[0] # should generate an error if the\n # dimension name is not recognized\n if idx is None:\n return None\n\n if self._coordset is None:\n return None\n\n # idx is not necessarily the position of the coordinates in the CoordSet\n # indeed, transposition may have taken place. So we need to retrieve the coordinates by its name\n name = self.dims[idx]\n if name in self._coordset.names:\n idx = self._coordset.names.index(name)\n return self._coordset[idx]\n else:\n error_(f'could not find this dimenson name: `{name}`')\n return None\n\n # ..................................................................................................................\n @property\n def coordset(self):\n \"\"\"\n |CoordSet| instance.\n\n Contains the coordinates of the various dimensions of the dataset.\n It's a readonly property. Use set_coords to change one or more coordinates at once.\n \"\"\"\n if self._coordset and all(c.is_empty for c in self._coordset):\n # all coordinates are empty, this is equivalent to None for the coordset\n return None\n return self._coordset\n\n # ..................................................................................................................\n @coordset.setter\n def coordset(self, coords):\n if isinstance(coords, CoordSet):\n self.set_coordset(**coords)\n else:\n self.set_coordset(coords)\n\n # ..................................................................................................................\n @property\n def coordnames(self):\n \"\"\"\n List of the |Coord| names.\n\n Read only property.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.names\n\n # ..................................................................................................................\n @property\n def coordtitles(self):\n \"\"\"\n List of the |Coord| titles.\n\n Read only property. Use set_coordtitle to eventually set titles.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.titles\n\n # ..................................................................................................................\n @property\n def coordunits(self):\n \"\"\"\n List of the |Coord| units.\n\n Read only property. Use set_coordunits to eventually set units.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.units\n\n # ..................................................................................................................\n @property\n def data(self):\n \"\"\"\n The ``data`` array.\n\n If there is no data but labels, then the labels are returned instead of data.\n \"\"\"\n return super().data\n\n # ..................................................................................................................\n @data.setter\n def data(self, data):\n # as we can't write super().data = data, we call _set_data\n # see comment in the data.setter of NDArray\n super()._set_data(data)\n\n # ..................................................................................................................\n def delete_coordset(self):\n \"\"\"\n Delete all coordinate settings.\n \"\"\"\n self._coordset = None\n\n # ..................................................................................................................\n def implements(self, name=None):\n \"\"\"\n Check if the current object implements `NDDataset`.\n\n Rather than isinstance(obj, NDDataset) use object.implements('NDDataset').\n This is useful to check type without importing the module\n\n Parameters\n ----------\n name : str\n Name of the object class. If None, the function returns the class name.\n If name is given, it checks if it correspond to the current class name.\n\n Returns\n -------\n str or bool\n If name is given, a bool is returned\n If name is None, the classname is returned\n\n Examples\n --------\n >>> from spectrochempy import NDDataset, Coord\n >>> co = Coord([1., 2., 3.])\n >>> co.implements('NDDataset')\n False\n >>> co.implements('Coord')\n True\n >>> ds = NDDataset([1., 2., 3.])\n >>> ds.implements()\n 'NDDataset'\n \"\"\"\n\n if name is None:\n return 'NDDataset'\n else:\n return name == 'NDDataset'\n\n # ..................................................................................................................\n @property\n def labels(self):\n # not valid for NDDataset\n # There is no label for nd-dataset\n raise NotImplementedError # pragma: no cover\n\n # ..................................................................................................................\n @property\n def modeldata(self):\n \"\"\"\n |ndarray| - models data.\n\n Data eventually generated by modelling of the data.\n \"\"\"\n return self._modeldata\n\n # ..................................................................................................................\n @modeldata.setter\n def modeldata(self, data):\n self._modeldata = data\n\n # ..................................................................................................................\n @property\n def parent(self):\n \"\"\"\n |Project| instance\n\n The parent project of the dataset.\n \"\"\"\n return self._parent\n\n # ..................................................................................................................\n @parent.setter\n def parent(self, value):\n if self._parent is not None:\n # A parent project already exists for this dataset but the\n # entered values gives a different parent. This is not allowed,\n # as it can produce impredictable results. We will first remove it\n # from the current project.\n self._parent.remove_dataset(self.name)\n self._parent = value\n\n # ..................................................................................................................\n def set_coordset(self, *args, **kwargs):\n \"\"\"\n Set one or more coordinates at once.\n\n Warnings\n --------\n This method replace all existing coordinates.\n\n See Also\n --------\n add_coords, set_coordtitles, set_coordunits\n \"\"\"\n self._coordset = None\n self.add_coordset(*args, dims=self.dims, **kwargs)\n\n # ..................................................................................................................\n def set_coordtitles(self, *args, **kwargs):\n \"\"\"\n Set titles of the one or more coordinates.\n \"\"\"\n self._coordset.set_titles(*args, **kwargs)\n\n # ..................................................................................................................\n def set_coordunits(self, *args, **kwargs):\n \"\"\"\n Set units of the one or more coordinates.\n \"\"\"\n self._coordset.set_units(*args, **kwargs)\n\n # ..................................................................................................................\n def sort(self, **kwargs):\n \"\"\"\n Returns the dataset sorted along a given dimension.\n\n (by default, the last dimension [axis=-1]) using the numeric or label values.\n\n Parameters\n ----------\n dim : str or int, optional, default=-1\n dimension index or name along which to sort.\n pos : int , optional\n If labels are multidimensional - allow to sort on a define\n row of labels : labels[pos]. Experimental : Not yet checked.\n by : str among ['value', 'label'], optional, default=``value``\n Indicate if the sorting is following the order of labels or\n numeric coord values.\n descend : `bool`, optional, default=`False`\n If true the dataset is sorted in a descending direction. Default is False except if coordinates\n are reversed.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n sorted_dataset\n \"\"\"\n\n inplace = kwargs.get('inplace', False)\n if not inplace:\n new = self.copy()\n else:\n new = self\n\n # parameter for selecting the level of labels (default None or 0)\n pos = kwargs.pop('pos', None)\n\n # parameter to say if selection is done by values or by labels\n by = kwargs.pop('by', 'value')\n\n # determine which axis is sorted (dims or axis can be passed in kwargs)\n # it will return a tuple with axis and dim\n axis, dim = self.get_axis(**kwargs)\n if axis is None:\n axis, dim = self.get_axis(axis=0)\n\n # get the corresponding coordinates (remember the their order can be different form the order\n # of dimension in dims. S we cannot jsut take the coord from the indice.\n coord = getattr(self, dim) # get the coordinate using the syntax such as self.x\n\n descend = kwargs.pop('descend', None)\n if descend is None:\n # when non specified, default is False (except for reversed coordinates\n descend = coord.reversed\n\n # import warnings\n # warnings.simplefilter(\"error\")\n\n indexes = []\n for i in range(self.ndim):\n if i == axis:\n if not coord.has_data:\n # sometimes we have only label for Coord objects.\n # in this case, we sort labels if they exist!\n if coord.is_labeled:\n by = 'label'\n else:\n # nothing to do for sorting\n # return self itself\n return self\n\n args = coord._argsort(by=by, pos=pos, descend=descend)\n setattr(new, dim, coord[args])\n indexes.append(args)\n else:\n indexes.append(slice(None))\n\n new._data = new._data[tuple(indexes)]\n if new.is_masked:\n new._mask = new._mask[tuple(indexes)]\n\n return new\n\n # ..................................................................................................................\n def squeeze(self, *dims, inplace=False):\n \"\"\"\n Remove single-dimensional entries from the shape of a NDDataset.\n\n Parameters\n ----------\n dim : None or int or tuple of ints, optional\n Selects a subset of the single-dimensional entries in the\n shape. If a dimension (dim) is selected with shape entry greater than\n one, an error is raised.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n squeezed\n The input array, but with all or a subset of the\n dimensions of length 1 removed.\n\n Raises\n ------\n ValueError\n If `dim` is not `None`, and the dimension being squeezed is not\n of length 1.\n \"\"\"\n # make a copy of the original dims\n old = self.dims[:]\n\n # squeeze the data and determine which axis must be squeezed\n new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)\n\n if axis is not None and new._coordset is not None:\n # if there are coordinates they have to be squeezed as well (remove\n # coordinate for the squeezed axis)\n\n for i in axis:\n dim = old[i]\n del new._coordset[dim]\n\n return new\n\n def expand_dims(self, dim=None):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded array shape.\n\n Parameters\n ----------\n dim : int or str\n Position in the expanded axes where the new axis (or axes) is placed.\n\n Returns\n -------\n result : ndarray\n View of `a` with the number of dimensions increased.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n \"\"\" # TODO\n\n # ..................................................................................................................\n def swapdims(self, dim1, dim2, inplace=False):\n \"\"\"\n Interchange two dimensions of a NDDataset.\n\n Parameters\n ----------\n dim1 : int\n First axis.\n dim2 : int\n Second axis.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n swaped_dataset\n\n See Also\n --------\n transpose\n \"\"\"\n\n new = super().swapdims(dim1, dim2, inplace=inplace)\n new.history = f'Data swapped between dims {dim1} and {dim2}'\n return new\n\n # ..................................................................................................................\n @property\n def T(self):\n \"\"\"\n Transposed |NDDataset|.\n\n The same object is returned if `ndim` is less than 2.\n \"\"\"\n return self.transpose()\n\n # ..................................................................................................................\n def take(self, indices, **kwargs):\n \"\"\"\n Take elements from an array\n\n Parameters\n ----------\n indices\n kwargs\n\n Returns\n -------\n \"\"\"\n\n # handle the various syntax to pass the axis\n dims = self._get_dims_from_args(**kwargs)\n axis = self._get_dims_index(dims)\n axis = axis[0] if axis else None\n\n # indices = indices.tolist()\n if axis is None:\n # just do a fancy indexing\n return self[indices]\n\n if axis < 0:\n axis = self.ndim + axis\n\n index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])\n new = self[index]\n return new\n\n def to_array(self):\n \"\"\"\n Return a numpy masked array (i.e., other NDDataset attributes are lost.\n\n Examples\n ========\n >>> import spectrochempy as scp\n >>> dataset = scp.read('wodger.spg')\n >>> a = scp.to_array(dataset)\n\n equivalent to:\n\n >>> a = np.ma.array(dataset)\n\n or\n\n >>> a= dataset.masked_data\n \"\"\"\n return np.ma.array(self)\n\n # ..................................................................................................................\n def to_xarray(self, **kwargs):\n \"\"\"\n Convert a NDDataset instance to an `~xarray.DataArray` object\n ( the xarray library must be available )\n\n Parameters\n\n Returns\n -------\n object : a xarray.DataArray object\n \"\"\"\n # Information about DataArray from the DataArray docstring\n #\n # Attributes\n # ----------\n # dims: tuple\n # Dimension names associated with this array.\n # values: np.ndarray\n # Access or modify DataArray values as a numpy array.\n # coords: dict-like\n # Dictionary of DataArray objects that label values along each dimension.\n # name: str or None\n # Name of this array.\n # attrs: OrderedDict\n # Dictionary for holding arbitrary metadata.\n # Init docstring\n #\n # Parameters\n # ----------\n # data: array_like\n # Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n # or castable to an ``ndarray``.\n # coords: sequence or dict of array_like objects, optional\n # Coordinates (tick labels) to use for indexing along each dimension.\n # If dict-like, should be a mapping from dimension names to the\n # corresponding coordinates. If sequence-like, should be a sequence\n # of tuples where the first element is the dimension name and the\n # second element is the corresponding coordinate array_like object.\n # dims: str or sequence of str, optional\n # Name(s) of the data dimension(s). Must be either a string (only\n # for 1D data) or a sequence of strings with length equal to the\n # number of dimensions. If this argument is omitted, dimension names\n # are taken from ``coords`` (if possible) and otherwise default to\n # ``['dim_0', ... 'dim_n']``.\n # name: str or None, optional\n # Name of this array.\n # attrs: dict_like or None, optional\n # Attributes to assign to the new instance. By default, an empty\n # attribute dictionary is initialized.\n # encoding: dict_like or None, optional\n # Dictionary specifying how to encode this array's data into a\n # serialized format like netCDF4. Currently used keys (for netCDF)\n # include '_FillValue', 'scale_factor', 'add_offset', 'dtype',\n # 'units' and 'calendar' (the later two only for datetime arrays).\n # Unrecognized keys are ignored.\n\n if not HAS_XARRAY:\n warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)\n return None\n\n x, y = self.x, self.y\n tx = x.title\n if y:\n ty = y.title\n da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )\n\n da.attrs['units'] = self.units\n else:\n da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )\n\n da.attrs['units'] = self.units\n\n da.attrs['title'] = self.title\n\n return da\n\n # ..................................................................................................................\n def transpose(self, *dims, inplace=False):\n \"\"\"\n Permute the dimensions of a NDDataset.\n\n Parameters\n ----------\n dims : sequence of dimension indexes or names, optional\n By default, reverse the dimensions, otherwise permute the dimensions\n according to the values given.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n transposed_array\n\n See Also\n --------\n swapdims : Interchange two dimensions of a NDDataset.\n \"\"\"\n new = super().transpose(*dims, inplace=inplace)\n new.history = f'Data transposed between dims: {dims}' if dims else ''\n\n return new\n\n # ------------------------------------------------------------------------------------------------------------------\n # private methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def _cstr(self):\n # Display the metadata of the object and partially the data\n out = ''\n out += ' name: {}\\n'.format(self.name)\n out += ' author: {}\\n'.format(self.author)\n out += ' created: {}\\n'.format(self._date)\n # out += ' modified: {}\\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''\n\n wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,\n width=self._text_width)\n\n pars = self.description.strip().splitlines()\n if pars:\n out += ' description: '\n desc = ''\n if pars:\n desc += '{}\\n'.format(wrapper1.fill(pars[0]))\n for par in pars[1:]:\n desc += '{}\\n'.format(textwrap.indent(par, ' ' * 15))\n # the three escaped null characters are here to facilitate\n # the generation of html outputs\n desc = '\\0\\0\\0{}\\0\\0\\0\\n'.format(desc.rstrip())\n out += desc\n\n if self._history:\n pars = self.history\n out += ' history: '\n hist = ''\n if pars:\n hist += '{}\\n'.format(wrapper1.fill(pars[0]))\n for par in pars[1:]:\n hist += '{}\\n'.format(textwrap.indent(par, ' ' * 15))\n # the three escaped null characters are here to facilitate\n # the generation of html outputs\n hist = '\\0\\0\\0{}\\0\\0\\0\\n'.format(hist.rstrip())\n out += hist\n\n out += '{}\\n'.format(self._str_value().rstrip())\n out += '{}\\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''\n out += '{}\\n'.format(self._str_dims().rstrip())\n\n if not out.endswith('\\n'):\n out += '\\n'\n out += '\\n'\n\n if not self._html_output:\n return colored_output(out.rstrip())\n else:\n return out.rstrip()\n\n # ..................................................................................................................\n def _loc2index(self, loc, dim=-1):\n # Return the index of a location (label or coordinates) along the dim\n # This can work only if `coords` exists.\n\n if self._coordset is None:\n raise SpectroChemPyException('No coords have been defined. Slicing or selection'\n ' by location ({}) needs coords definition.'.format(loc))\n\n coord = self.coord(dim)\n\n return coord._loc2index(loc)\n\n # ..................................................................................................................\n def _str_dims(self):\n if self.is_empty:\n return ''\n if len(self.dims) < 1 or not hasattr(self, \"_coordset\"):\n return ''\n if not self._coordset or len(self._coordset) < 1:\n return ''\n\n self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default\n\n txt = self._coordset._cstr()\n txt = txt.rstrip() # remove the trailing '\\n'\n return txt\n\n _repr_dims = _str_dims\n\n # ------------------------------------------------------------------------------------------------------------------\n # events\n # ------------------------------------------------------------------------------------------------------------------\n\n def _dims_update(self, change=None):\n # when notified that a coords names have been updated\n _ = self.dims # fire an update\n\n # ..................................................................................................................\n\n\n# ======================================================================================================================\n# module function\n# ======================================================================================================================\n\n# make some NDDataset operation accessible from the spectrochempy API\nthismodule = sys.modules[__name__]\n\napi_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',\n 'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',\n 'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']\n\n# todo: check the fact that some function are defined also in ndmath\nfor funcname in api_funcs:\n setattr(thismodule, funcname, getattr(NDDataset, funcname))\n\n thismodule.__all__.append(funcname)\n\n# load one method from NDIO\nload = NDDataset.load\n__all__ += ['load']\n\n# ======================================================================================================================\n# Set the operators\n# ======================================================================================================================\n\n_set_operators(NDDataset, priority=100000)\n_set_ufuncs(NDDataset)\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\n__all__ = ['read_csv']\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard and other imports\n# ----------------------------------------------------------------------------------------------------------------------\n\nimport warnings\nimport locale\nimport io\nfrom datetime import datetime, timezone\n\nimport numpy as np\n\nfrom spectrochempy.core.dataset.coord import Coord\nfrom spectrochempy.core import preferences as prefs\nfrom spectrochempy.core.readers.importer import Importer, importermethod\n\ntry:\n locale.setlocale(locale.LC_ALL, 'en_US') # to avoid problems with date format\nexcept Exception:\n try:\n locale.setlocale(locale.LC_ALL, 'en_US.utf8') # to avoid problems with date format\n except Exception:\n warnings.warn('Could not set locale: en_US or en_US.utf8')\n\n\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\ndef read_csv(*paths, **kwargs):\n \"\"\"\n Open a *.csv file or a list of *.csv files.\n\n This is limited to 1D array - csv file must have two columns [index, data]\n without header.\n\n Parameters\n ----------\n *paths : str, pathlib.Path object, list of str, or list of pathlib.Path objects, optional\n The data source(s) can be specified by the name or a list of name for the file(s) to be loaded:\n\n *e.g.,( file1, file2, ..., **kwargs )*\n\n If the list of filenames are enclosed into brackets:\n\n *e.g.,* ( **[** *file1, file2, ...* **]**, **kwargs *)*\n\n The returned datasets are merged to form a single dataset,\n except if `merge` is set to False. If a source is not provided (i.e. no `filename`, nor `content`),\n a dialog box will be opened to select files.\n **kwargs : dict\n See other parameters.\n\n Returns\n --------\n read_csv\n |NDDataset| or list of |NDDataset|.\n\n Other Parameters\n ----------------\n protocol : {'scp', 'omnic', 'opus', 'topspin', 'matlab', 'jcamp', 'csv', 'excel'}, optional\n Protocol used for reading. If not provided, the correct protocol\n is inferred (whnever it is possible) from the file name extension.\n directory : str, optional\n From where to read the specified `filename`. If not specified, read in the default ``datadir`` specified in\n SpectroChemPy Preferences.\n merge : bool, optional\n Default value is False. If True, and several filenames have been provided as arguments,\n then a single dataset with merged (stacked along the first\n dimension) is returned (default=False).\n sortbydate : bool, optional\n Sort multiple spectra by acquisition date (default=True).\n description: str, optional\n A Custom description.\n origin : {'omnic', 'tga'}, optional\n in order to properly interpret CSV file it can be necessary to set the origin of the spectra.\n Up to now only 'omnic' and 'tga' have been implemented.\n csv_delimiter : str, optional\n Set the column delimiter in CSV file.\n By default it is the one set in SpectroChemPy ``Preferences``.\n content : bytes object, optional\n Instead of passing a filename for further reading, a bytes content can be directly provided as bytes objects.\n The most convenient way is to use a dictionary. This feature is particularly useful for a GUI Dash application\n to handle drag and drop of files into a Browser.\n For exemples on how to use this feature, one can look in the ``tests/tests_readers`` directory.\n listdir : bool, optional\n If True and filename is None, all files present in the provided `directory` are returned (and merged if `merge`\n is True. It is assumed that all the files correspond to current reading protocol (default=True).\n recursive : bool, optional\n Read also in subfolders. (default=False).\n\n See Also\n --------\n read_topspin : Read TopSpin Bruker NMR spectra.\n read_omnic : Read Omnic spectra.\n read_opus : Read OPUS spectra.\n read_spg : Read Omnic *.spg grouped spectra.\n read_spa : Read Omnic *.Spa single spectra.\n read_srs : Read Omnic series.\n read_zip : Read Zip files.\n read_matlab : Read Matlab files.\n read : Generic file reading.\n\n Examples\n ---------\n >>> import spectrochempy as scp\n >>> scp.read_csv('agirdata/P350/TGA/tg.csv')\n NDDataset: [float64] unitless (shape: (y:1, x:3247))\n\n Additional information can be stored in the dataset if the origin is given\n (known origin for now : tga or omnic)\n # TODO: define some template to allow adding new origins\n\n >>> scp.read_csv('agirdata/P350/TGA/tg.csv', origin='tga')\n NDDataset: [float64] wt.% (shape: (y:1, x:3247))\n\n Sometimes the delimiteur needs to be adjusted\n\n >>> prefs = scp.preferences\n >>> scp.read_csv('irdata/IR.CSV', directory=prefs.datadir, origin='omnic', csv_delimiter=',')\n NDDataset: [float64] a.u. (shape: (y:1, x:3736))\n \"\"\"\n kwargs['filetypes'] = ['CSV files (*.csv)']\n kwargs['protocol'] = ['csv']\n importer = Importer()\n return importer(*paths, **kwargs)\n\n\n# ======================================================================================================================\n# Private functions\n# ======================================================================================================================\n\n@importermethod\ndef _read_csv(*args, **kwargs):\n # read csv file\n dataset, filename = args\n content = kwargs.get('content', None)\n delimiter = kwargs.get(\"csv_delimiter\", prefs.csv_delimiter)\n\n def _open():\n if content is not None:\n f = io.StringIO(content.decode(\"utf-8\"))\n else:\n f = open(filename, 'r')\n return f\n\n try:\n fid = _open()\n d = np.loadtxt(fid, unpack=True, delimiter=delimiter)\n fid.close()\n except ValueError:\n # it might be that the delimiter is not correct (default is ','), but\n # french excel export with the french locale for instance, use \";\".\n _delimiter = ';'\n try:\n fid = _open()\n if fid:\n fid.close()\n fid = _open()\n d = np.loadtxt(fid, unpack=True, delimiter=_delimiter)\n fid.close()\n except Exception:\n # in french, very often the decimal '.' is replaced by a\n # comma: Let's try to correct this\n if fid:\n fid.close()\n if not isinstance(fid, io.StringIO):\n with open(fid, \"r\") as fid_:\n txt = fid_.read()\n else:\n txt = fid.read()\n txt = txt.replace(',', '.')\n fil = io.StringIO(txt)\n try:\n d = np.loadtxt(fil, unpack=True, delimiter=delimiter)\n except Exception:\n raise IOError(\n '{} is not a .csv file or its structure cannot be recognized')\n\n # First column is the x coordinates\n coordx = Coord(d[0])\n\n # create a second coordinate for dimension y of size 1\n coordy = Coord([0])\n\n # and data is the second column - we make it a vector\n data = d[1].reshape((1, coordx.size))\n\n # update the dataset\n dataset.data = data\n dataset.set_coordset(y=coordy, x=coordx)\n\n # set the additional attributes\n name = filename.stem\n dataset.filename = filename\n dataset.name = kwargs.get('name', name)\n dataset.title = kwargs.get('title', None)\n dataset.units = kwargs.get('units', None)\n dataset.description = kwargs.get('description',\n '\"name\" ' + 'read from .csv file')\n dataset.history = str(datetime.now(timezone.utc)) + ':read from .csv file \\n'\n dataset._date = datetime.now(timezone.utc)\n dataset._modified = dataset.date\n\n # here we can check some particular format\n origin = kwargs.get('origin', '')\n if 'omnic' in origin:\n # this will be treated as csv export from omnic (IR data)\n dataset = _add_omnic_info(dataset, **kwargs)\n elif 'tga' in origin:\n # this will be treated as csv export from tga analysis\n dataset = _add_tga_info(dataset, **kwargs)\n elif origin:\n origin = kwargs.get('origin', None)\n raise NotImplementedError(f\"Sorry, but reading a csv file with '{origin}' origin is not implemented. \"\n \"Please, remove or set the keyword 'origin'\\n \"\n '(Up to now implemented csv files are: `omnic`, `tga`)')\n return dataset\n\n\n# .............................................................................\ndef _add_omnic_info(dataset, **kwargs):\n # get the time and name\n name = desc = dataset.name\n\n # modify the dataset metadata\n dataset.units = 'absorbance'\n dataset.title = 'absorbance'\n dataset.name = name\n dataset.description = ('Dataset from .csv file: {}\\n'.format(desc))\n dataset.history = str(datetime.now(timezone.utc)) + ':read from omnic exported csv file \\n'\n dataset.origin = 'omnic'\n\n # Set the NDDataset date\n dataset._date = datetime.now(timezone.utc)\n dataset._modified = dataset.date\n\n # x axis\n dataset.x.units = 'cm^-1'\n\n # y axis ?\n if '_' in name:\n name, dat = name.split('_')\n # if needed convert weekday name to English\n dat = dat.replace('Lun', 'Mon')\n dat = dat[:3].replace('Mar', 'Tue') + dat[3:]\n dat = dat.replace('Mer', 'Wed')\n dat = dat.replace('Jeu', 'Thu')\n dat = dat.replace('Ven', 'Fri')\n dat = dat.replace('Sam', 'Sat')\n dat = dat.replace('Dim', 'Sun')\n # convert month name to English\n dat = dat.replace('Aout', 'Aug')\n\n # get the dates\n acqdate = datetime.strptime(dat, \"%a %b %d %H-%M-%S %Y\")\n\n # Transform back to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc))\n # to transform back to datetime obkct\n timestamp = acqdate.timestamp()\n\n dataset.y = Coord(np.array([timestamp]), name='y')\n dataset.set_coordtitles(y='acquisition timestamp (GMT)', x='wavenumbers')\n dataset.y.labels = np.array([[acqdate], [name]])\n dataset.y.units = 's'\n\n return dataset\n\n\ndef _add_tga_info(dataset, **kwargs):\n # for TGA, some information are needed.\n # we add them here\n dataset.x.units = 'hour'\n dataset.units = 'weight_percent'\n dataset.x.title = 'time-on-stream'\n dataset.title = 'mass change'\n dataset.origin = 'tga'\n\n return dataset\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n pass\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\n# \"\"\"\n# Main package\n#\n# During the initialization of this package, a `matplotlib` backend is set\n# and some `IPython` configurations are made.\n#\n#\n# \"\"\"\n\nimport sys\n\nimport matplotlib as mpl\n\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython import get_ipython\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Check the environment for plotting\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Do we run in IPython ?\nIN_IPYTHON = False\nkernel = None\nip = None\nif InteractiveShell.initialized():\n IN_IPYTHON = True\n ip = get_ipython()\n kernel = getattr(ip, \"kernel\", None)\n\nNO_DISPLAY = False\nNO_DIALOG = False\n\n# Are we buidings the docs ?\nif 'make.py' in sys.argv[0]:\n # if we are building the documentation, in principle it should be done\n # using the make.py located at the root of the spectrchempy package.\n NO_DISPLAY = True\n NO_DIALOG = True\n mpl.use('agg', force=True)\n\n# is there a --nodisplay flag\nif '--nodisplay' in sys.argv:\n NO_DISPLAY = True\n NO_DIALOG = True\n mpl.use('agg', force=True)\n\n# Are we running pytest?\nif 'pytest' in sys.argv[0] or 'py.test' in sys.argv[0]:\n # if we are testing we also like a silent work with no figure popup!\n NO_DISPLAY = True\n NO_DIALOG = True\n\n # OK, but if we are doing individual function testing in PyCharm\n # it is interesting to see the plots and the file dialogs (except if we set explicitely --nodisplay argument!\n # if len(sys.argv) > 1 and not any([arg.endswith(\".py\") for arg in sys.argv[1:]]) and '--nodisplay' not in sys.argv:\n if len(sys.argv) > 1 and any(\n [arg.split('::')[0].endswith(\".py\") for arg in sys.argv[1:]]) and '--nodisplay' not in sys.argv:\n # individual module testing\n NO_DISPLAY = False\n NO_DIALOG = False\n\n if NO_DISPLAY:\n mpl.use('agg', force=True)\n\n# Are we running in PyCharm scientific mode?\nif mpl.get_backend() == 'module://backend_interagg':\n IN_PYCHARM_SCIMODE = True\nelse:\n IN_PYCHARM_SCIMODE = False\n\nif not (IN_IPYTHON and kernel) and not IN_PYCHARM_SCIMODE and not NO_DISPLAY:\n backend = mpl.rcParams['backend'] # 'Qt5Agg'\n mpl.use(backend, force=True)\n\nALL = ['NO_DISPLAY', 'NO_DIALOG']\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Now we can start loading the API\n# ----------------------------------------------------------------------------------------------------------------------\n# import the core api\nfrom spectrochempy import core\nfrom spectrochempy.core import * # noqa: F403, F401, E402\n\nALL += core.__all__\n\n\nif not IN_IPYTHON:\n # needed in windows terminal - but must not be inited in Jupyter notebook\n from colorama import init as initcolor\n\n initcolor()\n\n# def set_backend():\n\n# workaround this problem https://github.com/jupyter/notebook/issues/3385\n# ip.magic('matplotlib notebook')\n\nif IN_IPYTHON and kernel and not NO_DISPLAY:\n try:\n if 'ipykernel_launcher' in sys.argv[0] and \\\n \"--InlineBackend.rc={'figure.dpi': 96}\" in sys.argv:\n # We are running from NBSphinx - the plot must be inline to show up.\n ip.magic('matplotlib inline')\n else:\n # Do not set the widget backend.... do not work most of the time after upbgrade of the various\n # library and\n # jupyter!!! ...\n ip.magic('matplotlib inline') # widget\n except Exception:\n ip.magic('matplotlib qt')\n\n\n# set_backend()\n\n# a usefull utilities for dealing with path\nfrom spectrochempy.utils import pathclean\n\nDATADIR = pathclean(preferences.datadir)\n\n__all__ = ['pathclean', 'DATADIR'] + ALL\n\nimport warnings\n\nwarnings.filterwarnings(action='ignore', module='matplotlib') # , category=UserWarning)\n# warnings.filterwarnings(action=\"error\", category=DeprecationWarning)\n\n# ==============================================================================\nif __name__ == '__main__':\n pass\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie,\n# Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in\n# the root directory =\n# ======================================================================================================================\n\"\"\"\nThis module mainly contains the definition of a Meta class object\n\nSuch object is particularly used in `SpectrochemPy` by the |NDDataset| object\nto store metadata. Like a regular dictionary, the\nelements can be accessed by key, but also by attributes, *e.g.*\n``a = meta['key']`` give the same results as ``a = meta.key``.\n\"\"\"\n\n# from traitlets import HasTraits, Dict, Bool, default\n\n# import sys\nimport copy\nimport json\n\nimport numpy as np\n\n# constants\n# ----------------------------------------------------------------------------------------------------------------------\n\n__all__ = ['Meta']\n\n\n# ======================================================================================================================\n# Class Meta\n# ======================================================================================================================\n\nclass Meta(object): # HasTraits):\n \"\"\"A dictionary to store metadata.\n\n The metadata are accessible by item or by attributes, and\n the dictionary can be made read-only if necessary.\n\n Examples\n --------\n\n First we initialise a metadata object\n\n >>> m = Meta()\n\n then, metadata can be set by attribute (or by key like in a regular\n dictionary), and further accessed by attribute (or key):\n\n >>> m.chaine = \"a string\"\n >>> m[\"entier\"] = 123456\n >>> print(m.entier)\n 123456\n >>> print(m.chaine)\n a string\n\n One can make the dictionary read-only\n\n >>> m.readonly = True\n >>> m.chaine = \"a modified string\"\n Traceback (most recent call last):\n ...\n ValueError : 'the metadata `chaine` is read only'\n >>> print(m.chaine)\n a string\n\n .. rubric:: Methods\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n # private attributes\n # ------------------------------------------------------------------------------------------------------------------\n\n _data = {}\n\n # ------------------------------------------------------------------------------------------------------------------\n # public attributes\n # ------------------------------------------------------------------------------------------------------------------\n\n readonly = False # Bool(False)\n parent = None\n name = None\n\n # ------------------------------------------------------------------------------------------------------------------\n # special methods\n # ------------------------------------------------------------------------------------------------------------------\n def __init__(self, **data):\n \"\"\"\n Parameters\n ----------\n **data : keywords\n The dictionary can be already inited with some keywords.\n \"\"\"\n self.parent = data.pop('parent', None)\n self.name = data.pop('name', None)\n self._data = data\n\n def __dir__(self):\n return ['data', 'readonly', 'parent', 'name']\n\n def __setattr__(self, key, value):\n if key not in ['readonly', 'parent', 'name', '_data', '_trait_values', '_trait_notifiers', '_trait_validators',\n '_cross_validation_lock', '__wrapped__']:\n self[key] = value\n else:\n self.__dict__[key] = value # to avoid a recursive call # we can not use # self._readonly = value!\n\n def __getattr__(self, key):\n if key.startswith('_ipython') or key.startswith('_repr'):\n raise AttributeError\n if key in ['__wrapped__']:\n return False\n return self[key]\n\n def __setitem__(self, key, value):\n if key in self.__dir__() or key.startswith('_'):\n raise KeyError('`{}` can not be used as a metadata key'.format(key))\n elif not self.readonly:\n self._data.update({key: value})\n else:\n raise ValueError('the metadata `{}` is read only'.format(key))\n\n def __getitem__(self, key):\n return self._data.get(key, None)\n\n def __len__(self):\n return len(self._data)\n\n def __copy__(self):\n ret = self.__class__()\n ret.update(copy.deepcopy(self._data))\n ret.readonly = self.readonly\n ret.parent = self.parent\n ret.name = self.name\n return ret\n\n def __deepcopy__(self, memo=None):\n return self.__copy__()\n\n def __eq__(self, other):\n m1 = self._data\n if hasattr(other, \"_data\"):\n m2 = other._data\n elif isinstance(other, dict):\n m2 = other\n else:\n return False\n eq = True\n for k, v in m1.items():\n if isinstance(v, list):\n for i, ve in enumerate(v):\n eq &= np.all(ve == m2[k][i])\n else:\n eq &= np.all(v == m2.get(k, None))\n return eq\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __iter__(self):\n for item in sorted(self._data.keys()):\n yield item\n\n def __str__(self):\n return str(self._data)\n\n def _repr_html_(self):\n s = json.dumps(self._data, sort_keys=True, indent=4)\n return s.replace('\\n', '<br/>').replace(' ', '&nbsp;')\n\n # ------------------------------------------------------------------------------------------------------------------\n # public methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def implements(self, name=None):\n if name is None:\n return 'Meta'\n else:\n return name == 'Meta'\n\n def to_dict(self):\n \"\"\"Transform a metadata dictionary to a regular one.\n\n Returns\n -------\n dict\n A regular dictionary\n \"\"\"\n\n return self._data\n\n def get(self, key, default=None):\n \"\"\"\n\n Parameters\n ----------\n :param key:\n :return:\n \"\"\"\n return self._data.get(key, default)\n\n def update(self, d):\n \"\"\"Feed a metadata dictionary with the content of an another\n dictionary\n\n Parameters\n ----------\n d : dict-like object\n Any dict-like object can be used, such as `dict`, traits `Dict` or\n another `Meta` object.\n \"\"\"\n\n if isinstance(d, Meta) or hasattr(d, '_data'):\n d = d.to_dict()\n if d:\n self._data.update(d)\n\n def copy(self):\n \"\"\" Return a disconnected copy of self.\n\n Returns\n -------\n meta\n A disconnected meta object identical to the original object\n \"\"\"\n return self.__copy__()\n\n def keys(self):\n \"\"\"A list of metadata contained in the object.\n\n Returns\n -------\n list\n A sorted key's list\n\n Examples\n --------\n >>> m = Meta()\n >>> m.td = 10\n >>> m.si = 20\n >>> print(m.keys())\n ['si', 'td']\n\n Notes\n -----\n Alternatively, it is possible to iter directly on the Meta object\n\n >>> m = Meta()\n >>> m.td = 10\n >>> m.si = 20\n >>> for key in m :\n ... print(key)\n si\n td\n \"\"\"\n return [key for key in self]\n\n def items(self):\n \"\"\"A list of metadata items contained in the object.\n\n Returns\n -------\n list\n An item list sorted by key\n\n Examples\n --------\n >>> m = Meta()\n >>> m.td = 10\n >>> m.si = 20\n >>> print(m.items())\n [('si', 20), ('td', 10)]\n \"\"\"\n return [(key, self[key]) for key in self]\n\n def swap(self, dim1, dim2, inplace=True):\n \"\"\"\n Permute meta corresponding to distinct axis to reflect swapping on the\n corresponding data array\n\n Parameters\n ----------\n dim1\n dim2\n inplace\n\n Returns\n -------\n \"\"\"\n\n newmeta = self.copy()\n\n newmeta.readonly = False\n newmeta.parent = None\n newmeta.name = None\n\n for key in self:\n if isinstance(self[key], list) and len(self[key]) > 1:\n # print (newmeta[key], len(self[key]))\n X = newmeta[key]\n X[dim1], X[dim2] = X[dim2], X[dim1]\n else:\n newmeta[key] = self[key]\n\n newmeta.readonly = self.readonly\n newmeta.parent = self.parent\n newmeta.name = self.name\n\n if not inplace:\n return newmeta\n else:\n self._data = newmeta._data\n\n def permute(self, *dims, inplace=True):\n \"\"\"\n\n Parameters\n ----------\n dims\n inplace\n\n Returns\n -------\n \"\"\"\n\n newmeta = self.copy()\n\n newmeta.readonly = False\n newmeta.parent = None\n newmeta.name = None\n\n for key in self:\n if isinstance(self[key], list) and len(self[key]) > 1:\n newmeta[key] = type(self[key])()\n for dim in dims:\n newmeta[key].append(self[key][dim])\n else:\n newmeta[key] = self[key]\n\n newmeta.readonly = self.readonly\n newmeta.parent = self.parent\n newmeta.name = self.name\n\n if not inplace:\n return newmeta\n else:\n self._data = newmeta._data\n\n @property\n def data(self):\n return self._data\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\"\"\"\nThis module to extend NDDataset with the import methods.\n\"\"\"\n\n__all__ = ['read_jcamp', 'read_jdx', 'read_dx']\n__dataset_methods__ = __all__\n\nimport io\nimport re\nfrom datetime import datetime, timezone\nimport numpy as np\n\nfrom spectrochempy.core.dataset.coord import Coord\nfrom spectrochempy.core.readers.importer import Importer, importermethod\nfrom spectrochempy.utils.exceptions import deprecated\n\n\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\ndef read_jcamp(*paths, **kwargs):\n \"\"\"\n Open Infrared JCAMP-DX files with extension ``.jdx`` or ``.dx``.\n\n Limited to AFFN encoding (see R. S. McDonald and Paul A. Wilks,\n JCAMP-DX: A Standard Form for Exchange of Infrared Spectra in Computer Readable Form,\n Appl. Spec., 1988, 1, 151–162. doi:10.1366/0003702884428734.)\n\n Parameters\n ----------\n *paths : str, pathlib.Path object, list of str, or list of pathlib.Path objects, optional\n The data source(s) can be specified by the name or a list of name for the file(s) to be loaded:\n\n *e.g.,( file1, file2, ..., **kwargs )*\n\n If the list of filenames are enclosed into brackets:\n\n *e.g.,* ( **[** *file1, file2, ...* **]**, **kwargs *)*\n\n The returned datasets are merged to form a single dataset,\n except if `merge` is set to False. If a source is not provided (i.e. no `filename`, nor `content`),\n a dialog box will be opened to select files.\n **kwargs : dict\n See other parameters.\n\n Returns\n --------\n read_jcamp\n |NDDataset| or list of |NDDataset|.\n\n Other Parameters\n ----------------\n protocol : {'scp', 'omnic', 'opus', 'topspin', 'matlab', 'jcamp', 'csv', 'excel'}, optional\n Protocol used for reading. If not provided, the correct protocol\n is inferred (whnever it is possible) from the file name extension.\n directory : str, optional\n From where to read the specified `filename`. If not specified, read in the default ``datadir`` specified in\n SpectroChemPy Preferences.\n merge : bool, optional\n Default value is False. If True, and several filenames have been provided as arguments,\n then a single dataset with merged (stacked along the first\n dimension) is returned (default=False).\n sortbydate : bool, optional\n Sort multiple spectra by acquisition date (default=True).\n description: str, optional\n A Custom description.\n content : bytes object, optional\n Instead of passing a filename for further reading, a bytes content can be directly provided as bytes objects.\n The most convenient way is to use a dictionary. This feature is particularly useful for a GUI Dash application\n to handle drag and drop of files into a Browser.\n For exemples on how to use this feature, one can look in the ``tests/tests_readers`` directory.\n listdir : bool, optional\n If True and filename is None, all files present in the provided `directory` are returned (and merged if `merge`\n is True. It is assumed that all the files correspond to current reading protocol (default=True).\n recursive : bool, optional\n Read also in subfolders. (default=False).\n\n See Also\n ---------\n read : Generic read method.\n read_topspin : Read TopSpin Bruker NMR spectra.\n read_omnic : Read Omnic spectra.\n read_opus : Read OPUS spectra.\n read_spg : Read Omnic *.spg grouped spectra.\n read_spa : Read Omnic *.Spa single spectra.\n read_srs : Read Omnic series.\n read_csv : Read CSV files.\n read_zip : Read Zip files.\n read_matlab : Read Matlab files.\n \"\"\"\n kwargs['filetypes'] = ['JCAMP-DX files (*.jdx *.dx)']\n kwargs['protocol'] = ['jcamp']\n importer = Importer()\n return importer(*paths, **kwargs)\n\n\n@deprecated(\"read_jdx reading method is deprecated and may be removed in next versions \"\n \"- use read_jcamp instead\")\ndef read_jdx(*args, **kwargs):\n return read_jcamp(*args, **kwargs)\n\n\n@deprecated(\"read_dx reading method is deprecated and may be removed in next versions \"\n \"- use read_jcamp instead\")\ndef read_dx(*args, **kwargs):\n return read_jcamp(*args, **kwargs)\n\n\n# ======================================================================================================================\n# private functions\n# ======================================================================================================================\n\n@importermethod\ndef _read_jdx(*args, **kwargs):\n\n # read jdx file\n dataset, filename = args\n content = kwargs.get('content', None)\n sortbydate = kwargs.pop(\"sortbydate\", True)\n\n if content is not None:\n fid = io.StringIO(content.decode(\"utf-8\"))\n else:\n fid = open(filename, 'r')\n\n # Read header of outer Block\n # ..................................................................................................................\n keyword = ''\n\n while keyword != '##TITLE':\n keyword, text = _readl(fid)\n if keyword != 'EOF':\n jdx_title = text\n else:\n raise ValueError('No ##TITLE LR in outer block header')\n\n while (keyword != '##DATA TYPE') and (keyword != '##DATATYPE'):\n keyword, text = _readl(fid)\n if keyword != 'EOF':\n jdx_data_type = text\n else:\n raise ValueError('No ##DATA TYPE LR in outer block header')\n\n if jdx_data_type == 'LINK':\n while keyword != '##BLOCKS':\n keyword, text = _readl(fid)\n nspec = int(text)\n elif jdx_data_type.replace(' ', '') == 'INFRAREDSPECTRUM':\n nspec = 1\n else:\n raise ValueError('DATA TYPE must be LINK or INFRARED SPECTRUM')\n\n # Create variables\n # ..................................................................................................................\n xaxis = np.array([])\n data = np.array([])\n alltitles, alltimestamps, alldates, xunits, yunits = [], [], [], [], []\n nx, firstx, lastx = np.zeros(nspec, 'int'), np.zeros(nspec, 'float'), np.zeros(nspec, 'float')\n\n # Read the spectra\n # ..................................................................................................................\n for i in range(nspec):\n\n # Reset variables\n keyword = ''\n\n # (year, month,...) must be reset at each spectrum because labels \"time\"\n # and \"longdate\" are not required in JDX file\n [year, month, day, hour, minute, second] = '', '', '', '', '', ''\n\n # Read JDX file for spectrum n° i\n while keyword != '##END':\n keyword, text = _readl(fid)\n if keyword in ['##ORIGIN', '##OWNER', '##JCAMP-DX']:\n continue\n elif keyword == '##TITLE':\n # Add the title of the spectrum in the list alltitles\n alltitles.append(text)\n elif keyword == '##LONGDATE':\n [year, month, day] = text.split('/')\n elif keyword == '##TIME':\n [hour, minute, second] = re.split(r'[:.]', text)\n elif keyword == '##XUNITS':\n xunits.append(text)\n elif keyword == '##YUNITS':\n yunits.append(text)\n elif keyword == '##FIRSTX':\n firstx[i] = float(text)\n elif keyword == '##LASTX':\n lastx[i] = float(text)\n elif keyword == '##XFACTOR':\n xfactor = float(text)\n elif keyword == '##YFACTOR':\n yfactor = float(text)\n elif keyword == '##NPOINTS':\n nx[i] = float(text)\n elif keyword == '##XYDATA':\n # Read the intensities\n allintensities = []\n while keyword != '##END':\n keyword, text = _readl(fid)\n # for each line, get all the values exept the first one (first value = wavenumber)\n intensities = list(filter(None, text.split(' ')[1:]))\n if len(intensities) > 0:\n allintensities += intensities\n spectra = np.array([allintensities]) # convert allintensities into an array\n spectra[spectra == '?'] = 'nan' # deals with missing or out of range intensity values\n spectra = spectra.astype(np.float32)\n spectra *= yfactor\n # add spectra in \"data\" matrix\n if not data.size:\n data = spectra\n else:\n data = np.concatenate((data, spectra), 0)\n\n # Check \"firstx\", \"lastx\" and \"nx\"\n if firstx[i] != 0 and lastx[i] != 0 and nx[i] != 0:\n if not xaxis.size:\n # Creation of xaxis if it doesn't exist yet\n xaxis = np.linspace(firstx[0], lastx[0], nx[0])\n xaxis = np.around((xaxis * xfactor), 3)\n else:\n # Check the consistency of xaxis\n if nx[i] - nx[i - 1] != 0:\n raise ValueError('Inconsistent data set: number of wavenumber per spectrum should be identical')\n elif firstx[i] - firstx[i - 1] != 0:\n raise ValueError('Inconsistent data set: the x axis should start at same value')\n elif lastx[i] - lastx[i - 1] != 0:\n raise ValueError('Inconsistent data set: the x axis should end at same value')\n else:\n raise ValueError('##FIRST, ##LASTX or ##NPOINTS are unusuable in the spectrum n°', i + 1)\n\n # Creation of the acquisition date\n if (year != '' and month != '' and day != '' and hour != '' and minute != '' and second != ''):\n date = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second), tzinfo=timezone.utc)\n timestamp = date.timestamp()\n # Transform back to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc))\n # to transform back to datetime object\n else:\n timestamp = date = None\n # Todo: cases where incomplete date and/or time info\n alltimestamps.append(timestamp)\n alldates.append(date)\n\n # Check the consistency of xunits and yunits\n if i > 0:\n if yunits[i] != yunits[i - 1]:\n raise ValueError(f'##YUNITS should be the same for all spectra (check spectrum n°{i + 1}')\n elif xunits[i] != xunits[i - 1]:\n raise ValueError(f'##XUNITS should be the same for all spectra (check spectrum n°{i + 1}')\n\n # Determine xaxis name ****************************************************\n if xunits[0].strip() == '1/CM':\n axisname = 'wavenumbers'\n axisunit = 'cm^-1'\n elif xunits[0].strip() == 'MICROMETERS':\n axisname = 'wavelength'\n axisunit = 'um'\n elif xunits[0].strip() == 'NANOMETERS':\n axisname = 'wavelength'\n axisunit = 'nm'\n elif xunits[0].strip() == 'SECONDS':\n axisname = 'time'\n axisunit = 's'\n elif xunits[0].strip() == 'ARBITRARY UNITS':\n axisname = 'arbitrary unit'\n axisunit = None\n else:\n axisname = ''\n axisunit = ''\n fid.close()\n\n dataset.data = data\n dataset.name = jdx_title\n if yunits[0].strip() == 'ABSORBANCE':\n dataset.units = 'absorbance'\n dataset.title = 'absorbance'\n elif yunits[0].strip() == 'TRANSMITTANCE':\n # TODO: This units not in pint. Add this\n dataset.title = 'transmittance'\n\n # now add coordinates\n _x = Coord(xaxis, title=axisname, units=axisunit)\n if jdx_data_type == 'LINK':\n _y = Coord(alltimestamps, title='acquisition timestamp (GMT)', units='s', labels=(alldates, alltitles))\n dataset.set_coordset(y=_y, x=_x)\n else:\n _y = Coord()\n dataset.set_coordset(y=_y, x=_x)\n\n # Set origin, description and history\n dataset.origin = \"omnic\"\n dataset.description = \"Dataset from jdx: '{0}'\".format(jdx_title)\n\n dataset.history = str(datetime.now(timezone.utc)) + ':imported from jdx file \\n'\n\n if sortbydate:\n dataset.sort(dim='x', inplace=True)\n dataset.history = str(datetime.now(timezone.utc)) + ':sorted by date\\n'\n # Todo: make sure that the lowest index correspond to the largest wavenumber\n # for compatibility with dataset created by read_omnic:\n\n # Set the NDDataset date\n dataset._date = datetime.now(timezone.utc)\n dataset._modified = dataset.date\n\n return dataset\n\n\n# ......................................................................................................................\n@importermethod\ndef _read_dx(*args, **kwargs):\n return _read_jdx(*args, **kwargs)\n\n\n# ......................................................................................................................\ndef _readl(fid):\n line = fid.readline()\n if not line:\n return 'EOF', ''\n line = line.strip(' \\n') # remove newline character\n if line[0:2] == '##': # if line starts with \"##\"\n if line[0:5] == '##END': # END KEYWORD, no text\n keyword = '##END'\n text = ''\n else: # keyword + text\n keyword, text = line.split('=')\n else:\n keyword = ''\n text = line.strip()\n return keyword, text\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n pass\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\n\"\"\" Tests for the SVD class\n\n\"\"\"\nfrom numpy.testing import assert_allclose\n\nfrom spectrochempy.core.analysis.svd import SVD\nfrom spectrochempy.utils import MASKED\n\n\n# test svd\n# -----------\n\ndef test_svd(IR_dataset_2D):\n dataset = IR_dataset_2D\n\n svd = SVD(dataset)\n\n assert_allclose(svd.ev_ratio[0].data, 94.539, rtol=1e-5, atol=0.0001)\n\n # with masks\n dataset[:, 1240.0:920.0] = MASKED # do not forget to use float in slicing\n dataset[10:12] = MASKED\n\n dataset.plot_stack()\n\n svd = SVD(dataset)\n\n assert_allclose(svd.ev_ratio.data[0], 93.8, rtol=1e-4, atol=0.01)\n\n # with masks\n dataset[:, 1240.0:920.0] = MASKED # do not forget to use float in slicing\n dataset[10:12] = MASKED\n\n dataset.plot_stack()\n\n svd = SVD(dataset, full_matrices=True)\n\n assert_allclose(svd.ev_ratio.data[0], 93.8, rtol=1e-4, atol=0.01)\n", "# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\"\"\" Tests for the ndplugin module\n\n\"\"\"\n\nimport numpy as np\n\nfrom spectrochempy import show\nfrom spectrochempy.core.processors.autosub import autosub\n\n\n# autosub\n# ------\n\ndef test_autosub(IR_dataset_2D):\n dataset = IR_dataset_2D\n\n ranges = [5000., 5999.], [1940., 1820.]\n\n s1 = dataset.copy()\n ref = s1[-1].squeeze()\n\n dataset.plot_stack()\n ref.plot(clear=False, linewidth=2., color='r')\n\n s2 = dataset.copy()\n\n s3 = s2.autosub(ref, *ranges, dim=-1, method='vardiff', inplace=False)\n s3.plot()\n\n # inplace = False\n assert np.round(s2.data[-1, 0], 4) != 0.0000\n assert np.round(s3.data[-1, 0], 4) == 0.0000\n s3.name = \"vardiff\"\n\n s3.plot_stack()\n\n s4 = dataset.copy()\n s4.autosub(ref, *ranges, method='ssdiff', inplace=True)\n s4.name = \"ssdiff, inplace\"\n assert np.round(s4.data[-1, 0], 4) == 0.0000\n\n s4.plot_stack() # true avoid blocking due to graphs\n\n s4 = dataset.copy()\n s = autosub(s4, ref, *ranges, method='ssdiff')\n assert np.round(s4.data[-1, 0], 4) != 0.0000\n assert np.round(s.data[-1, 0], 4) == 0.0000\n s.name = 'ssdiff direct call'\n\n s.plot_stack()\n\n # s5 = dataset.copy()\n # ref2 = s5[:, 0].squeeze()\n # ranges2 = [0, 5], [45, 54]\n\n # TODO: not yet implemented\n # s6 = s5.autosub(ref2, *ranges2, dim='y', method='varfit', inplace=False)\n # s6.plot()\n\n show()\n" ]
[ [ "numpy.ma.array", "numpy.array" ], [ "numpy.array", "numpy.loadtxt" ], [ "matplotlib.get_backend", "matplotlib.use" ], [ "numpy.all" ], [ "numpy.linspace", "numpy.around", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "numpy.testing.assert_allclose" ], [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bicepjai/Deep-Survey-on-Text-Classification
[ "d935f0d4fc09213644d0291a0d64873912b2e331" ]
[ "lib/global_utils.py" ]
[ "import sys\nimport os\n\nimport re\nimport collections\nimport itertools\nimport bcolz\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport gc\nimport random\nimport smart_open\nimport h5py\nimport csv\nimport tensorflow as tf\nimport gensim\n\nimport datetime as dt\nfrom tqdm import tqdm_notebook as tqdm\n\n# import multiprocessing as mp\n# from itertools import repeat, product\n# from functools import partial\n\n# to be able to pickle class methods for multi processing\n# https://stackoverflow.com/questions/27318290/why-can-i-pass-an-instance-method-to-multiprocessing-process-but-not-a-multipro\n\ndef _instance_method_alias(obj, arg):\n \"\"\"\n Alias for instance method that allows the method to be called in a\n multiprocessing pool\n \"\"\"\n return obj.convertSent2WordIds(arg)\n\n\n\ndef get_embeddings_from_ft(fasttext_vec_file, dim, vocab_words):\n \"\"\"\n convert fast text .vec file to numpy array\n created embedding will be in order of words in vocab_words\n \"\"\"\n\n # gathering words from fasttext vec file--------------------\n ft_lines = None\n\n with open(fasttext_vec_file, \"r\") as f:\n ft_lines = f.readlines()\n\n ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])\n ft_vocab_size = ft_shape[0]\n\n ft_wvs_dict = {}\n\n for i, line in enumerate(ft_lines[1:]):\n str_list = line.split()\n word = str(str_list[0].strip())\n vec = np.array([np.float(f) for f in str_list[1:]])\n assert dim == len(vec), \"fast text some vectors doesn't match dimensions \"+str(dim)+\" != \"+str(len(vec))\n ft_wvs_dict[word] = vec\n\n assert ft_vocab_size == len(ft_wvs_dict), \"fast text vectors file read issue \"+str(ft_vocab_size)+\" != \"+str(len(ft_wvs_dict))\n\n # creating embedding matrix from the file --------------------\n wvs_embedding = np.random.randn(len(vocab_words), dim)\n for i,word in enumerate(vocab_words):\n if word in ft_wvs_dict:\n wvs_embedding[i] = ft_wvs_dict[word]\n\n return wvs_embedding\n\n\n#=============================================================\n# DOCUMENT PREPROCESSING\n#=============================================================\n\nCHAR_ALPHABETS = \"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\\n \"\nchar_start_tag_idx = len(CHAR_ALPHABETS) + 0\nchar_end_tag_idx = len(CHAR_ALPHABETS) + 1\nchar_unknown_tag_idx = len(CHAR_ALPHABETS) + 2\n\n# when sentences are converted to characters\n# these are appended to signal end of sentences\nchar_sent_start_tag_idx = len(CHAR_ALPHABETS) + 3\nchar_sent_end_tag_idx = len(CHAR_ALPHABETS) + 4\n\nCHAR_ALPHABETS_LEN = len(CHAR_ALPHABETS) + 4\n\nclass GenerateDataset(object):\n \"\"\"\n This class takes in preprocessed data frame and\n generated datasets as necessary\n \"\"\"\n\n def __init__(self, data_frame, vocab_idx):\n self.data_frame = data_frame\n self.vocab_idx = vocab_idx\n self.vocab_size = len(vocab_idx)\n\n # constants ================================================================================\n self.sentence_start_tag_idx = self.vocab_idx[\"<SOSent>\"]\n self.sentence_end_tag_idx = self.vocab_idx[\"<EOSent>\"]\n self.word_unknown_tag_idx = self.vocab_idx[\"<UNK>\"]\n\n self.default_unit_dict = {\n \"gene_unit\" : \"words\",\n \"variation_unit\" : \"words\",\n \"doc_unit\" : \"words\",\n \"doc_form\" : \"text\",\n \"doc_cntx_dir\" : \"forward\",\n \"divide_document\": \"single_unit\"\n }\n\n\n def convertSent2WordIds(self, sentence, add_start_end_tag=False):\n \"\"\"\n sentence is a list of word.\n It is converted to list of ids based on vocab_idx\n \"\"\"\n\n sent2id = []\n if add_start_end_tag:\n sent2id = [self.sentence_start_tag_idx]\n\n try:\n sent2id = sent2id + [self.vocab_idx[word] if self.vocab_idx[word]<self.vocab_size else self.word_unknown_tag_idx for word in sentence]\n except KeyError as e:\n print(e)\n print (sentence)\n raise ValueError('Fix this issue dude')\n\n if add_start_end_tag:\n sent2id = sent2id + [self.sentence_end_tag_idx]\n\n return sent2id\n\n\n\n def convertDoc2Sent2WordIds(self, document, add_start_end_tag=False):\n \"\"\"\n document is a list of sentence.\n sentence is a list of word.\n so given sent_list will be converted to list of list of ids based on vocab_idx\n \"\"\"\n\n return [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]\n\n\n\n def convertWord2Char2Ids(self, word, add_start_end_tag=False):\n \"\"\"\n word is a char sequence or list of characters,\n return list of ids in word or char sequence\n \"\"\"\n char2id = []\n if add_start_end_tag:\n char2id = [char_start_tag_idx]\n\n char2id = char2id + [CHAR_ALPHABETS.find(char) for char in word]\n\n if add_start_end_tag:\n char2id = char2id + [char_end_tag_idx]\n\n return char2id\n\n\n\n def convertSent2Word2Char2Ids(self, sentence, add_start_end_tag=False, unit=\"chars\"):\n \"\"\"\n sentence is list of words\n word is list of characters\n returns list of list of ids\n \"\"\"\n\n sent2words2char2id = []\n if unit == \"chars\":\n \"\"\"\n all the words are grouped as list of chars with pre-post added tags\n \"\"\"\n if add_start_end_tag:\n sent2words2char2id = [[char_sent_start_tag_idx]]\n\n sent2words2char2id = sent2words2char2id + [self.convertWord2Char2Ids(word, add_start_end_tag) if self.vocab_idx[word] < self.vocab_size else [char_unknown_tag_idx] for word in sentence]\n\n if add_start_end_tag:\n sent2words2char2id = sent2words2char2id + [[char_sent_end_tag_idx]]\n elif unit == \"raw_chars\":\n \"\"\"\n just a stream of characters\n \"\"\"\n if add_start_end_tag:\n sent2words2char2id = [char_sent_start_tag_idx]\n\n for word in sentence:\n if self.vocab_idx[word] < self.vocab_size:\n sent2words2char2id += [charid for charid in self.convertWord2Char2Ids(word, add_start_end_tag)]\n else:\n sent2words2char2id += [char_unknown_tag_idx]\n\n if add_start_end_tag:\n sent2words2char2id = sent2words2char2id + [char_sent_end_tag_idx]\n else:\n assert False, \"give valid doc_unit argument\"\n\n return sent2words2char2id\n\n\n\n def convertDoc2Sent2Word2Char2Ids(self, document, doc_form=\"sentences\", add_start_end_tag=False, unit=\"chars\"):\n \"\"\"\n document is a list of sentence.\n sentence is a list of word.\n so given sent_list will be converted to list of list of ids based on vocab_idx\n\n returns list of list if doc_form == \"text\"\n returns list of list of list if doc_form == \"sentences\"\n \"\"\"\n doc2word2char2ids = []\n\n if doc_form == \"sentences\":\n doc2word2char2ids = [self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit) for sentence in document]\n\n elif doc_form == \"text\":\n doc2word2char2ids = [list_or_charid for list_or_charid in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit)]\n else:\n assert False, \"give valid doc_form argument\"\n\n return doc2word2char2ids\n\n\n\n def generate_data(self, unit_dict=None, has_class=False, add_start_end_tag=False):\n \"\"\"\n dataframe expects to have Sentences, Variations, Genes, Class(has_class)\n\n Sentences Text attribute converted to list of sentences which in turn converted to list of words\n Variations just one sentence which is a list of words\n Genes just one sentence which is a list of words\n\n returns information based on request\n\n unit_dict contains these 5 keys that can have\n gene_unit can be [\"words\", \"chars\", \"raw_chars\"]\n variation_unit can be [\"words\", \"chars\", \"raw_chars\"]\n doc_unit can be [\"words\", \"word_list\", chars\", \"raw_chars\"]\n doc_form can be [\"sentences\", \"text\"]\n doc_cntx_dir can be [\"forward\", \"backward\"]\n divide_document can be [\"single_unit\", \"multiple_units\"]\n\n \"\"\"\n if not unit_dict:\n unit_dict = self.default_unit_dict\n\n try:\n unit_dict[\"doc_cntx_dir\"]\n except KeyError as e:\n unit_dict[\"doc_cntx_dir\"] = \"forward\"\n\n ids_document = []\n ids_labels = []\n ids_genes = []\n ids_variations = []\n\n # since sometimes the data will be shuffled in the frame\n # during train test split\n for index in self.data_frame.index:\n document = self.data_frame.Sentences[index]\n if unit_dict[\"divide_document\"] == \"single_unit\": #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n\n # doc units --------------------------------------------------------------\n if unit_dict[\"doc_unit\"] == \"words\" or unit_dict[\"doc_unit\"] == \"word_list\":\n\n if unit_dict[\"doc_form\"] == \"sentences\":\n ids_document.append(self.convertDoc2Sent2WordIds(document, add_start_end_tag))\n\n else: # unit_dict[\"doc_form\"] == \"text\"\n\n # using multiprocess to process each sentence in document and concatenate them to a single sentence\n # get_wordid_list = lambda d, setag : [wid for s in d for wid in self.convertSent2WordIds(s, setag)]\n # text_word_list = []\n # with mp.Pool(processes = 5) as pool:\n # # text_word_list = pool.starmap(get_wordid_list, product(document, [add_start_end_tag]*len(document)))\n # # text_word_list = pool.starmap(get_wordid_list, zip(document, repeat(add_start_end_tag)))\n # text_word_list = pool.map(partial(get_wordid_list, setag=add_start_end_tag), document)\n\n\n # without multiprocessing\n if unit_dict[\"doc_unit\"] == \"words\":\n text_word_list = [word_id for sentence in document for word_id in self.convertSent2WordIds(sentence, add_start_end_tag)]\n\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = text_word_list[::-1]\n\n else: # unit_dict[\"doc_unit\"] == \"word_list\": sentence form a list\n text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]\n\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag)[::-1] for sentence in document]\n\n ids_document.append(text_word_list)\n\n elif unit_dict[\"doc_unit\"] == \"chars\" or unit_dict[\"doc_unit\"] == \"raw_chars\":\n\n if unit_dict[\"doc_form\"] == \"sentences\":\n\n for sentence in document:\n ids_document.append(self.convertDoc2Sent2Word2Char2Ids(document,\n doc_form=unit_dict[\"doc_form\"], unit=unit_dict[\"doc_unit\"], add_start_end_tag=add_start_end_tag))\n\n else: # unit_dict[\"doc_form\"] == \"text\"\n text_char_list = [word_as_char_list_id for sentence in document for word_as_char_list_id in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit=unit_dict[\"doc_unit\"])]\n\n ids_document.append(text_char_list)\n\n else:\n assert False, \"give valid doc_unit key-value\"\n\n # others --------------------------------------------------------------\n if has_class:\n ids_labels.append(self.data_frame.Class[index])\n\n if unit_dict[\"gene_unit\"] == \"words\":\n ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))\n else:\n ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],\n add_start_end_tag, unit=unit_dict[\"doc_unit\"]))\n\n if unit_dict[\"variation_unit\"] == \"words\":\n ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))\n else:\n ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],\n add_start_end_tag, unit=unit_dict[\"doc_unit\"]))\n\n else: # unit_dict[\"divide_document\"] == \"multiple_unit\" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n for sentence in document:\n\n # doc units --------------------------------------------------------------\n if unit_dict[\"doc_unit\"] == \"words\":\n\n # doesnt matter if\n # unit_dict[\"doc_form\"] == \"sentences\"\n # unit_dict[\"doc_form\"] == \"text\"\n\n try:\n sentence_list = self.convertSent2WordIds(sentence, add_start_end_tag)\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = self.convertSent2WordIds(sentence, add_start_end_tag)[::-1]\n\n ids_document.append(sentence_list)\n\n except ValueError as e:\n print(e)\n print (index)\n raise ValueError('Fix this issue dude !')\n\n elif unit_dict[\"doc_unit\"] == \"chars\" or unit_dict[\"doc_unit\"] == \"raw_chars\":\n\n # doesnt matter if\n # unit_dict[\"doc_form\"] == \"sentences\"\n # unit_dict[\"doc_form\"] == \"text\"\n\n ids_document.append(self.convertSent2Word2Char2Ids(sentence, add_start_end_tag,\n unit=unit_dict[\"doc_unit\"]))\n\n\n # others --------------------------------------------------------------\n if has_class:\n ids_labels.append(self.data_frame.Class[index])\n\n if unit_dict[\"gene_unit\"] == \"words\":\n ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))\n else:\n ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],\n add_start_end_tag, unit=unit_dict[\"gene_unit\"]))\n\n if unit_dict[\"variation_unit\"] == \"words\":\n ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))\n else:\n ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],\n add_start_end_tag, unit=unit_dict[\"variation_unit\"]))\n\n\n return ids_document, ids_genes, ids_variations, ids_labels\n\n\n\n def placeholder_function(self, unit_dict=None, limit_dict=None,\n has_class=False, add_start_end_tag=False):\n \"\"\"\n dataframe expects to have Sentences, Variations, Genes, Class(has_class)\n\n Sentences Text attribute converted to list of sentences which in turn converted to list of words\n Variations just one sentence which is a list of words\n Genes just one sentence which is a list of words\n\n returns information based on request\n\n unit_dict contains these 5 keys that can have\n gene_unit can be [\"words\", \"chars\"]\n variation_unit can be [\"words\", \"chars\"]\n doc_unit can be [\"words\", \"chars\"]\n doc_form can be [\"sentences\", \"text\"]\n divide_document can be [\"single_unit\", \"multiple_units\"]\n\n limit_dict contains max sequence len to form valid matrices\n Text attribute options\n max_text_seq_len => maximum number of words in a text\n max_text_document_len => maximum number of sentences in a document\n max_text_sentence_len => maximum number of words in a sentence\n max_text_word_len => maximum number of chars in a word\n\n Gene Attribute options\n max_gene_sentence_len => maximum number of words in a sentence\n max_gene_word_len => maximum number of chars in a word\n\n Variation Attribute options\n max_variation_sentence_len => maximum number of words in a sentence\n max_variation_word_len => maximum number of chars in a word\n\n \"\"\"\n\n ids_document, ids_genes, ids_variations, ids_labels = self.generate_dataset(unit_dict, has_class, add_start_end_tag)\n\n\n# testing ======================================================================================\n\ndef test_class():\n document = [\n ['beautiful', 'is', 'better', 'than', 'ugly.'],\n ['explicit', 'is', 'better', 'than', 'implicit.'],\n ['simple', 'is', 'better', 'than', 'complex.'],\n ['complex', 'is', 'better', 'than', 'complicated.'],\n ['flat', 'is', 'better', 'than', 'nested.'],\n # ['sparse', 'is', 'better', 'than', 'dense.'],\n # ['readability', 'counts.'],\n # ['special', 'cases', \"aren't\", 'special', 'enough', 'to', 'break', 'the', 'rules.'],\n # ['although', 'practicality', 'beats', 'purity.'],\n # ['errors', 'should', 'never', 'pass', 'silently.'],\n # ['unless', 'explicitly', 'silenced.'],\n # ['in', 'the', 'face', 'of', 'ambiguity,', 'refuse', 'the', 'temptation', 'to', 'guess.'],\n # ['there', 'should', 'be', 'one--', 'and', 'preferably', 'only', 'one', '--obvious', 'way', 'to', 'do', 'it.'],\n # ['although', 'that', 'way', 'may', 'not', 'be', 'obvious', 'at', 'first', 'unless', \"you're\", 'Dutch.'],\n # ['now', 'is', 'better', 'than', 'never.'], ['Although', 'never', 'is', 'often', 'better', 'than', '*right*', 'now.'],\n # ['if', 'the', 'implementation', 'is', 'hard', 'to', 'explain,', \"it's\", 'a', 'bad', 'idea.'],\n # ['if', 'the', 'implementation', 'is', 'easy', 'to', 'explain,', 'it', 'may', 'be', 'a', 'good', 'idea.'],\n # ['namespaces', 'are', 'one', 'honking', 'great', 'idea', '--', \"let's\", 'do', 'more', 'of', 'those!'],\n ]\n\n data_dict = {\n \"ID\" : 0,\n \"Gene\" : [[\"beautiful\"]],\n \"Variation\" : [[\"complex\", \"simple\"]],\n \"Class\" : 0,\n \"Sentences\" : [document[:]]\n }\n\n custom_unit_dict = {\n \"gene_unit\" : \"raw_chars\",\n \"variation_unit\" : \"raw_chars\",\n # text transformed to sentences attribute\n \"doc_unit\" : \"raw_chars\",\n \"doc_form\" : \"sentences\",\n # \"doc_cntx_dir\" : \"forward\",\n \"divide_document\" : \"single_unit\"\n }\n\n df = pd.DataFrame(data=data_dict)\n corpus = sorted(list(set([word for sentence in document for word in sentence])))\n corpus_wordidx = {word:i for i,word in enumerate(corpus)}\n corpus_wordidx[\"<SOSent>\"] = len(corpus)\n corpus_wordidx[\"<EOSent>\"] = len(corpus) + 1\n corpus_wordidx[\"<UNK>\"] = len(corpus) + 2\n\n gen_data = GenerateDataset(df, corpus_wordidx)\n x_T, x_G, x_V, x_C = gen_data.generate_data(custom_unit_dict, has_class=True, add_start_end_tag=True)\n\n print(\"data\", df.Sentences[0], \"\\n\")\n print(corpus_wordidx)\n index = 0\n print(\"text\",np.array(x_T).shape, x_T[index])\n print(\"gene\",np.array(x_G).shape, x_G[index])\n print(\"variation\",np.array(x_V).shape, x_V[index])\n print(\"classes\",np.array(x_C).shape, x_C[index])\n\n\nif __name__ == \"__main__\":\n test_class()\n\n" ]
[ [ "numpy.array", "numpy.float", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Sensors-in-Paradise/OpportunityML
[ "a123b4842de45f735d517be6bcd96ca35171db91", "a123b4842de45f735d517be6bcd96ca35171db91" ]
[ "src/loader/load_opportunity_dataset.py", "archive/model_archive/ConvModel.py" ]
[ "import itertools\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom utils.Recording import Recording\nimport utils.settings as settings\n\n\ndef load_opportunity_dataset(opportunity_dataset_path: str) -> \"list[Recording]\":\n \"\"\"\n Returns a list of Recordings from the opportunity dataset\n \"\"\"\n print(\"Will read the opportunity dataset\")\n opportunity_dataset_path += \"/dataset\"\n subject_ids = range(1, 5)\n recording_ids = range(1, 6)\n\n # see loader/opportunity_col_names to make your selection\n selected_feature_names = [\n \"IMU-BACK-accX\",\n \"IMU-BACK-accY\",\n \"IMU-BACK-accZ\",\n \"IMU-BACK-Quaternion1\",\n \"IMU-BACK-Quaternion2\",\n \"IMU-BACK-Quaternion3\",\n \"IMU-BACK-Quaternion4\",\n \"IMU-RLA-accX\",\n \"IMU-RLA-accY\",\n \"IMU-RLA-accZ\",\n \"IMU-RLA-Quaternion1\",\n \"IMU-RLA-Quaternion2\",\n \"IMU-RLA-Quaternion3\",\n \"IMU-RLA-Quaternion4\",\n \"IMU-LLA-accX\",\n \"IMU-LLA-accY\",\n \"IMU-LLA-accZ\",\n \"IMU-LLA-Quaternion1\",\n \"IMU-LLA-Quaternion2\",\n \"IMU-LLA-Quaternion3\",\n \"IMU-LLA-Quaternion4\",\n \"IMU-L-SHOE-EuX\",\n \"IMU-L-SHOE-EuY\",\n \"IMU-L-SHOE-EuZ\",\n \"IMU-L-SHOE-Nav_Ax\",\n \"IMU-L-SHOE-Nav_Ay\",\n \"IMU-L-SHOE-Nav_Az\",\n \"IMU-L-SHOE-Body_Ax\",\n \"IMU-L-SHOE-Body_Ay\",\n \"IMU-L-SHOE-Body_Az\",\n \"IMU-L-SHOE-AngVelBodyFrameX\",\n \"IMU-L-SHOE-AngVelBodyFrameY\",\n \"IMU-L-SHOE-AngVelBodyFrameZ\",\n \"IMU-L-SHOE-AngVelNavFrameX\",\n \"IMU-L-SHOE-AngVelNavFrameY\",\n \"IMU-L-SHOE-AngVelNavFrameZ\",\n \"IMU-R-SHOE-EuX\",\n \"IMU-R-SHOE-EuY\",\n \"IMU-R-SHOE-EuZ\",\n \"IMU-R-SHOE-Nav_Ax\",\n \"IMU-R-SHOE-Nav_Ay\",\n \"IMU-R-SHOE-Nav_Az\",\n \"IMU-R-SHOE-Body_Ax\",\n \"IMU-R-SHOE-Body_Ay\",\n \"IMU-R-SHOE-Body_Az\",\n \"IMU-R-SHOE-AngVelBodyFrameX\",\n \"IMU-R-SHOE-AngVelBodyFrameY\",\n \"IMU-R-SHOE-AngVelBodyFrameZ\",\n \"IMU-R-SHOE-AngVelNavFrameX\",\n \"IMU-R-SHOE-AngVelNavFrameY\",\n \"IMU-R-SHOE-AngVelNavFrameZ\",\n ]\n print(f\"Selected features (n_features: {len(selected_feature_names)}):\\n\", \"\\n\".join([\"\\t\" + str(feature_name) for feature_name in selected_feature_names]))\n\n # Get column names\n col_names = []\n with open(\"src/loader/opportunity_col_names\", \"r\") as file:\n lines = file.read().splitlines()\n for line in lines:\n col_names.append(line)\n\n recordings = []\n for sub, rec in itertools.product(subject_ids, recording_ids):\n file_name = f\"S{sub}-ADL{rec}.dat\"\n file_path = os.path.join(opportunity_dataset_path, file_name)\n print(f\"Reading {file_path} ...\")\n file_df = pd.read_csv(file_path, delimiter=\" \", header=None)\n file_df.columns = col_names # give them the real column names\n\n recordings.append(Recording(\n sensor_frame = file_df.loc[:, selected_feature_names], \n time_frame = file_df.loc[:, 'MILLISEC'],\n activities = file_df.loc[:, 'HL_Activity'].map(\n lambda label: settings.DATA_CONFIG.raw_label_to_activity_idx(label)\n ), # Use `[0]` to get only one activity | maps 0, 101, 102, 103, 104, 105 to 0, 1, 2, 3, 4, 5\n subject=int(sub),\n recording_index=int(rec)\n ))\n\n print(f\"\\n => Total {len(recordings)} recordings read\")\n\n return recordings\n\n", "from random import shuffle\nfrom models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut\nfrom tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore\nfrom tensorflow.keras.models import Sequential # type: ignore\nimport numpy as np\n\nfrom utils.Recording import Recording\nfrom utils.array_operations import split_list_by_percentage\nfrom utils.typing import assert_type\n\n\nclass ConvModel(RainbowModelLeaveRecsOut):\n def __init__(self, **kwargs):\n \"\"\"\n Convolutional model\n :param kwargs:\n window_size: int\n stride_size: int\n test_percentage: float\n n_features: int\n n_outputs: int\n \"\"\"\n\n # hyper params to instance vars\n self.window_size = kwargs[\"window_size\"]\n self.stride_size = kwargs[\"stride_size\"]\n self.test_percentage = kwargs[\"test_percentage\"]\n\n self.verbose = 0\n self.epochs = 10\n self.batch_size = 32\n\n # create model\n self.model = self.__create_model(kwargs[\"n_features\"], kwargs[\"n_outputs\"])\n\n def __create_model(self, n_features, n_outputs):\n # window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]\n\n print(\n f\"Building model for {self.window_size} timesteps (window_size) and {n_features} features\"\n )\n model = Sequential()\n model.add(\n Conv1D(\n filters=64,\n kernel_size=3,\n activation=\"relu\",\n input_shape=(self.window_size, n_features),\n )\n )\n model.add(Conv1D(filters=64, kernel_size=3, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(MaxPooling1D(pool_size=2))\n model.add(Flatten())\n model.add(Dense(100, activation=\"relu\"))\n model.add(Dense(n_outputs, activation=\"softmax\"))\n model.compile(\n loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"]\n )\n return model\n" ]
[ [ "pandas.read_csv" ], [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
evertdeman/HD-BET
[ "817a50d2fe9b8663646cc74652cb50e26f343a3b", "41ebe0d3360b86ae6949412c9ea2a5f653612540" ]
[ "HD_BET/utils.py", "HD_BET/network_architecture.py" ]
[ "from urllib.request import urlopen\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom skimage.morphology import label\nimport os\nfrom HD_BET.paths import folder_with_parameter_files\n\n\ndef get_params_fname(fold):\n return os.path.join(folder_with_parameter_files, \"%d.model\" % fold)\n\n\ndef maybe_download_parameters(fold=0, force_overwrite=False):\n \"\"\"\n Downloads the parameters for some fold if it is not present yet.\n :param fold:\n :param force_overwrite: if True the old parameter file will be deleted (if present) prior to download\n :return:\n \"\"\"\n\n assert 0 <= fold <= 4, \"fold must be between 0 and 4\"\n\n if not os.path.isdir(folder_with_parameter_files):\n maybe_mkdir_p(folder_with_parameter_files)\n\n out_filename = get_params_fname(fold)\n\n if force_overwrite and os.path.isfile(out_filename):\n os.remove(out_filename)\n\n if not os.path.isfile(out_filename):\n url = \"https://zenodo.org/record/2540695/files/%d.model?download=1\" % fold\n print(\"Downloading\", url, \"...\")\n data = urlopen(url).read()\n with open(out_filename, 'wb') as f:\n f.write(data)\n\n\ndef init_weights(module):\n if isinstance(module, nn.Conv3d):\n module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)\n if module.bias is not None:\n module.bias = nn.init.constant(module.bias, 0)\n\n\ndef softmax_helper(x):\n rpt = [1 for _ in range(len(x.size()))]\n rpt[1] = x.size(1)\n x_max = x.max(1, keepdim=True)[0].repeat(*rpt)\n e_x = torch.exp(x - x_max)\n return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)\n\n\nclass SetNetworkToVal(object):\n def __init__(self, use_dropout_sampling=False, norm_use_average=True):\n self.norm_use_average = norm_use_average\n self.use_dropout_sampling = use_dropout_sampling\n\n def __call__(self, module):\n if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):\n module.train(self.use_dropout_sampling)\n elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \\\n isinstance(module, nn.InstanceNorm1d) \\\n or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \\\n isinstance(module, nn.BatchNorm1d):\n module.train(not self.norm_use_average)\n\n\ndef postprocess_prediction(seg):\n # basically look for connected components and choose the largest one, delete everything else\n print(\"running postprocessing... \")\n mask = seg != 0\n lbls = label(mask, connectivity=mask.ndim)\n lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]\n largest_region = np.argmax(lbls_sizes[1:]) + 1\n seg[lbls != largest_region] = 0\n return seg\n\n\ndef subdirs(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))]\n if sort:\n res.sort()\n return res\n\n\ndef subfiles(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))]\n if sort:\n res.sort()\n return res\n\n\nsubfolders = subdirs # I am tired of confusing those\n\n\ndef maybe_mkdir_p(directory):\n splits = directory.split(\"/\")[1:]\n for i in range(0, len(splits)):\n if not os.path.isdir(os.path.join(\"/\", *splits[:i+1])):\n os.mkdir(os.path.join(\"/\", *splits[:i+1]))\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom HD_BET.utils import softmax_helper\n\n\nclass EncodingModule(nn.Module):\n def __init__(self, in_channels, out_channels, filter_size=3, dropout_p=0.3, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True):\n nn.Module.__init__(self)\n self.dropout_p = dropout_p\n self.lrelu_inplace = lrelu_inplace\n self.inst_norm_affine = inst_norm_affine\n self.conv_bias = conv_bias\n self.leakiness = leakiness\n self.bn_1 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)\n self.conv1 = nn.Conv3d(in_channels, out_channels, filter_size, 1, (filter_size - 1) // 2, bias=self.conv_bias)\n self.dropout = nn.Dropout3d(dropout_p)\n self.bn_2 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)\n self.conv2 = nn.Conv3d(out_channels, out_channels, filter_size, 1, (filter_size - 1) // 2, bias=self.conv_bias)\n\n def forward(self, x):\n skip = x\n x = F.leaky_relu(self.bn_1(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n x = self.conv1(x)\n if self.dropout_p is not None and self.dropout_p > 0:\n x = self.dropout(x)\n x = F.leaky_relu(self.bn_2(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n x = self.conv2(x)\n x = x + skip\n return x\n\n\nclass Upsample(nn.Module):\n def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=True):\n super(Upsample, self).__init__()\n self.align_corners = align_corners\n self.mode = mode\n self.scale_factor = scale_factor\n self.size = size\n\n def forward(self, x):\n return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,\n align_corners=self.align_corners)\n\n\nclass LocalizationModule(nn.Module):\n def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,\n lrelu_inplace=True):\n nn.Module.__init__(self)\n self.lrelu_inplace = lrelu_inplace\n self.inst_norm_affine = inst_norm_affine\n self.conv_bias = conv_bias\n self.leakiness = leakiness\n self.conv1 = nn.Conv3d(in_channels, in_channels, 3, 1, 1, bias=self.conv_bias)\n self.bn_1 = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)\n self.conv2 = nn.Conv3d(in_channels, out_channels, 1, 1, 0, bias=self.conv_bias)\n self.bn_2 = nn.InstanceNorm3d(out_channels, affine=self.inst_norm_affine, track_running_stats=True)\n\n def forward(self, x):\n x = F.leaky_relu(self.bn_1(self.conv1(x)), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n x = F.leaky_relu(self.bn_2(self.conv2(x)), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n return x\n\n\nclass UpsamplingModule(nn.Module):\n def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,\n lrelu_inplace=True):\n nn.Module.__init__(self)\n self.lrelu_inplace = lrelu_inplace\n self.inst_norm_affine = inst_norm_affine\n self.conv_bias = conv_bias\n self.leakiness = leakiness\n self.upsample = Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True)\n self.upsample_conv = nn.Conv3d(in_channels, out_channels, 3, 1, 1, bias=self.conv_bias)\n self.bn = nn.InstanceNorm3d(out_channels, affine=self.inst_norm_affine, track_running_stats=True)\n\n def forward(self, x):\n x = F.leaky_relu(self.bn(self.upsample_conv(self.upsample(x))), negative_slope=self.leakiness,\n inplace=self.lrelu_inplace)\n return x\n\n\nclass DownsamplingModule(nn.Module):\n def __init__(self, in_channels, out_channels, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,\n lrelu_inplace=True):\n nn.Module.__init__(self)\n self.lrelu_inplace = lrelu_inplace\n self.inst_norm_affine = inst_norm_affine\n self.conv_bias = conv_bias\n self.leakiness = leakiness\n self.bn = nn.InstanceNorm3d(in_channels, affine=self.inst_norm_affine, track_running_stats=True)\n self.downsample = nn.Conv3d(in_channels, out_channels, 3, 2, 1, bias=self.conv_bias)\n\n def forward(self, x):\n x = F.leaky_relu(self.bn(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n b = self.downsample(x)\n return x, b\n\n\nclass Network(nn.Module):\n def __init__(self, num_classes=4, num_input_channels=4, base_filters=16, dropout_p=0.3,\n final_nonlin=softmax_helper, leakiness=1e-2, conv_bias=True, inst_norm_affine=True,\n lrelu_inplace=True, do_ds=True):\n super(Network, self).__init__()\n\n self.do_ds = do_ds\n self.lrelu_inplace = lrelu_inplace\n self.inst_norm_affine = inst_norm_affine\n self.conv_bias = conv_bias\n self.leakiness = leakiness\n self.final_nonlin = final_nonlin\n self.init_conv = nn.Conv3d(num_input_channels, base_filters, 3, 1, 1, bias=self.conv_bias)\n\n self.context1 = EncodingModule(base_filters, base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.down1 = DownsamplingModule(base_filters, base_filters * 2, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.context2 = EncodingModule(2 * base_filters, 2 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.down2 = DownsamplingModule(2 * base_filters, base_filters * 4, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.context3 = EncodingModule(4 * base_filters, 4 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.down3 = DownsamplingModule(4 * base_filters, base_filters * 8, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.context4 = EncodingModule(8 * base_filters, 8 * base_filters, 3, dropout_p, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.down4 = DownsamplingModule(8 * base_filters, base_filters * 16, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.context5 = EncodingModule(16 * base_filters, 16 * base_filters, 3, dropout_p, leakiness=1e-2,\n conv_bias=True, inst_norm_affine=True, lrelu_inplace=True)\n\n self.bn_after_context5 = nn.InstanceNorm3d(16 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)\n self.up1 = UpsamplingModule(16 * base_filters, 8 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.loc1 = LocalizationModule(16 * base_filters, 8 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.up2 = UpsamplingModule(8 * base_filters, 4 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.loc2 = LocalizationModule(8 * base_filters, 4 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.loc2_seg = nn.Conv3d(4 * base_filters, num_classes, 1, 1, 0, bias=False)\n self.up3 = UpsamplingModule(4 * base_filters, 2 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.loc3 = LocalizationModule(4 * base_filters, 2 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n self.loc3_seg = nn.Conv3d(2 * base_filters, num_classes, 1, 1, 0, bias=False)\n self.up4 = UpsamplingModule(2 * base_filters, 1 * base_filters, leakiness=1e-2, conv_bias=True,\n inst_norm_affine=True, lrelu_inplace=True)\n\n self.end_conv_1 = nn.Conv3d(2 * base_filters, 2 * base_filters, 3, 1, 1, bias=self.conv_bias)\n self.end_conv_1_bn = nn.InstanceNorm3d(2 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)\n self.end_conv_2 = nn.Conv3d(2 * base_filters, 2 * base_filters, 3, 1, 1, bias=self.conv_bias)\n self.end_conv_2_bn = nn.InstanceNorm3d(2 * base_filters, affine=self.inst_norm_affine, track_running_stats=True)\n self.seg_layer = nn.Conv3d(2 * base_filters, num_classes, 1, 1, 0, bias=False)\n\n def forward(self, x):\n seg_outputs = []\n\n x = self.init_conv(x)\n x = self.context1(x)\n\n skip1, x = self.down1(x)\n x = self.context2(x)\n\n skip2, x = self.down2(x)\n x = self.context3(x)\n\n skip3, x = self.down3(x)\n x = self.context4(x)\n\n skip4, x = self.down4(x)\n x = self.context5(x)\n\n x = F.leaky_relu(self.bn_after_context5(x), negative_slope=self.leakiness, inplace=self.lrelu_inplace)\n x = self.up1(x)\n\n x = torch.cat((skip4, x), dim=1)\n x = self.loc1(x)\n x = self.up2(x)\n\n x = torch.cat((skip3, x), dim=1)\n x = self.loc2(x)\n loc2_seg = self.final_nonlin(self.loc2_seg(x))\n seg_outputs.append(loc2_seg)\n x = self.up3(x)\n\n x = torch.cat((skip2, x), dim=1)\n x = self.loc3(x)\n loc3_seg = self.final_nonlin(self.loc3_seg(x))\n seg_outputs.append(loc3_seg)\n x = self.up4(x)\n\n x = torch.cat((skip1, x), dim=1)\n x = F.leaky_relu(self.end_conv_1_bn(self.end_conv_1(x)), negative_slope=self.leakiness,\n inplace=self.lrelu_inplace)\n x = F.leaky_relu(self.end_conv_2_bn(self.end_conv_2(x)), negative_slope=self.leakiness,\n inplace=self.lrelu_inplace)\n x = self.final_nonlin(self.seg_layer(x))\n seg_outputs.append(x)\n\n if self.do_ds:\n return seg_outputs[::-1]\n else:\n return seg_outputs[-1]\n" ]
[ [ "torch.nn.init.kaiming_normal", "numpy.unique", "torch.exp", "numpy.argmax", "torch.nn.init.constant", "numpy.sum" ], [ "torch.nn.Module.__init__", "torch.cat", "torch.nn.Dropout3d", "torch.nn.Conv3d", "torch.nn.functional.interpolate", "torch.nn.InstanceNorm3d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mdhasan8/Machine-Learning-in-Python
[ "d66607d3003e8279e35cf176851f506aa833a9fe" ]
[ "Neural_Network_Tensorflow.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 16 22:30:11 2020\r\n\r\n@author: Easin\r\n\"\"\"\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import Model, layers\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# MNIST dataset parameters.\r\nnum_classes = 10 # total classes (0-9 digits).\r\nnum_features = 784 # data features (img shape: 28*28).\r\n\r\n# Training parameters.\r\nlearning_rate = 0.1\r\ntraining_steps = 2000\r\nbatch_size = 256\r\ndisplay_step = 100\r\n\r\n# Network parameters.\r\nn_hidden_1 = 128 # 1st layer number of neurons.\r\nn_hidden_2 = 256 # 2nd layer number of neurons.\r\n\r\n# Prepare MNIST data.\r\nfrom tensorflow.keras.datasets import mnist\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n# Convert to float32.\r\nx_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)\r\n# Flatten images to 1-D vector of 784 features (28*28).\r\nx_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])\r\n# Normalize images value from [0, 255] to [0, 1].\r\nx_train, x_test = x_train / 255., x_test / 255.\r\n\r\n# Use tf.data API to shuffle and batch data.\r\ntrain_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))\r\ntrain_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)\r\n\r\n# Create TF Model.\r\nclass NeuralNet(Model):\r\n # Set layers.\r\n def __init__(self):\r\n super(NeuralNet, self).__init__()\r\n # First fully-connected hidden layer.\r\n self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)\r\n # First fully-connected hidden layer.\r\n self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)\r\n # Second fully-connecter hidden layer.\r\n self.out = layers.Dense(num_classes)\r\n\r\n # Set forward pass.\r\n def call(self, x, is_training=False):\r\n x = self.fc1(x)\r\n x = self.fc2(x)\r\n x = self.out(x)\r\n if not is_training:\r\n # tf cross entropy expect logits without softmax, so only\r\n # apply softmax when not training.\r\n x = tf.nn.softmax(x)\r\n return x\r\n\r\n# Build neural network model.\r\nneural_net = NeuralNet()\r\n\r\n\r\n# Cross-Entropy Loss.\r\n# Note that this will apply 'softmax' to the logits.\r\ndef cross_entropy_loss(x, y):\r\n # Convert labels to int 64 for tf cross-entropy function.\r\n y = tf.cast(y, tf.int64)\r\n # Apply softmax to logits and compute cross-entropy.\r\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)\r\n # Average loss across the batch.\r\n return tf.reduce_mean(loss)\r\n\r\n# Accuracy metric.\r\ndef accuracy(y_pred, y_true):\r\n # Predicted class is the index of highest score in prediction vector (i.e. argmax).\r\n correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\r\n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\r\n\r\n# Stochastic gradient descent optimizer.\r\noptimizer = tf.optimizers.SGD(learning_rate)\r\n\r\n\r\n# Optimization process. \r\ndef run_optimization(x, y):\r\n # Wrap computation inside a GradientTape for automatic differentiation.\r\n with tf.GradientTape() as g:\r\n # Forward pass.\r\n pred = neural_net(x, is_training=True)\r\n # Compute loss.\r\n loss = cross_entropy_loss(pred, y)\r\n \r\n # Variables to update, i.e. trainable variables.\r\n trainable_variables = neural_net.trainable_variables\r\n\r\n # Compute gradients.\r\n gradients = g.gradient(loss, trainable_variables)\r\n \r\n # Update W and b following gradients.\r\n optimizer.apply_gradients(zip(gradients, trainable_variables))\r\n \r\n\r\n# Run training for the given number of steps.\r\nfor step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):\r\n # Run the optimization to update W and b values.\r\n run_optimization(batch_x, batch_y)\r\n \r\n if step % display_step == 0:\r\n pred = neural_net(batch_x, is_training=True)\r\n loss = cross_entropy_loss(pred, batch_y)\r\n acc = accuracy(pred, batch_y)\r\n print(\"step: %i, loss: %f, accuracy: %f\" % (step, loss, acc))\r\n \r\n# Test model on validation set.\r\npred = neural_net(x_test, is_training=False)\r\nprint(\"Test Accuracy: %f\" % accuracy(pred, y_test))\r\n\r\n\r\n# Predict 5 images from validation set.\r\nn_images = 5\r\ntest_images = x_test[:n_images]\r\npredictions = neural_net(test_images)\r\n\r\n# Display image and model prediction.\r\nfor i in range(n_images):\r\n plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')\r\n plt.show()\r\n print(\"Model prediction: %i\" % np.argmax(predictions.numpy()[i]))\r\n \r\n\r\n" ]
[ [ "tensorflow.nn.softmax", "tensorflow.reduce_mean", "numpy.reshape", "tensorflow.keras.layers.Dense", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.optimizers.SGD", "tensorflow.argmax", "numpy.array", "matplotlib.pyplot.show", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] } ]
983632847/covid19_pocus_ultrasound
[ "3625e95bbf189926dbd12966ef59ee71ed10e453" ]
[ "pocovidnet/scripts/eval_vid_classifier.py" ]
[ "import argparse\nimport json\nimport os\nimport pickle\nimport numpy as np\nfrom pocovidnet.evaluate_genesis import GenesisEvaluator\nfrom pocovidnet.evaluate_video import VideoEvaluator\nfrom tensorflow.keras import backend as K\nfrom pocovidnet.videoto3d import Videoto3D\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate genesis and cam')\n parser.add_argument('--json', type=str, default=\"../data/cross_val.json\")\n parser.add_argument(\n '--genesis_weights', type=str, default='video_genesis_lr1e4'\n )\n parser.add_argument(\n '--cam_weights', type=str, default='trained_models_cam'\n )\n parser.add_argument(\n '--videos', type=str, default='../data/pocus_videos/convex'\n )\n args = parser.parse_args()\n\n with open(args.json, \"r\") as infile:\n cross_val_split = json.load(infile)\n\n VIDEO_DIR = args.videos\n all_genesis_preds = []\n all_frame_preds = []\n for i in range(5):\n gen_eval = GenesisEvaluator(\n weights_dir=args.genesis_weights, ensemble=False, split=i\n )\n K.set_image_data_format(\"channels_last\")\n normal_eval = VideoEvaluator(\n weights_dir=args.cam_weights,\n ensemble=False,\n split=i,\n model_id=\"vgg_cam\",\n num_classes=4\n )\n files = cross_val_split[str(i)][\"test\"][0]\n # print(files)\n for f in files:\n print(\"evaluate\", f)\n # TEST if the video is working\n vid3d = Videoto3D(\"\", 64, 64, 5, 5)\n vid3d.max_vid = {\"cov\": 20, \"pne\": 20, \"reg\": 20}\n X_test, _, fn = vid3d.video3d(\n [os.path.join(VIDEO_DIR, f)], [\"cov\"]\n )\n if len(np.unique(fn)) != 1:\n print(\"ERROR: WRONG FILE!\")\n print(fn)\n print(X_test.shape)\n continue\n # run genesis model\n K.set_image_data_format(\"channels_first\")\n preds = gen_eval(os.path.join(VIDEO_DIR, f))\n vid_pred_genesis = np.argmax(np.mean(preds, axis=(0, 1)))\n all_genesis_preds.append(preds)\n # run cam model\n K.set_image_data_format(\"channels_last\")\n preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f))\n frame_pred = np.argmax(np.mean(preds_framebased, axis=0))\n all_frame_preds.append(preds_framebased)\n print(preds.shape, preds_framebased.shape)\n print(\n \"genesis pred\", vid_pred_genesis, \"frame based pred\",\n frame_pred\n )\n print(\"-------------\")\n with open(\"evaluation_outputs.dat\", \"wb\") as outfile:\n pickle.dump((all_genesis_preds, all_frame_preds), outfile)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean", "numpy.unique", "tensorflow.keras.backend.set_image_data_format" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
pworinger/kornia
[ "a8bddbc5412694d778b1a7338e0d001910bb8024", "a8bddbc5412694d778b1a7338e0d001910bb8024", "a8bddbc5412694d778b1a7338e0d001910bb8024", "a8bddbc5412694d778b1a7338e0d001910bb8024" ]
[ "test/geometry/transform/crop/test_crop2d.py", "test/geometry/test_linalg.py", "test/geometry/transform/test_flip.py", "test/morphology/test_gradient.py" ]
[ "from typing import Tuple\n\nimport pytest\n\nimport kornia as kornia\nimport kornia.testing as utils # test utils\n\nimport torch\nfrom torch.testing import assert_allclose\nfrom torch.autograd import gradcheck\n\n\nclass TestBoundingBoxInferring:\n def test_bounding_boxes_dim_inferring(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n\n h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)\n assert (h, w) == (2, 3)\n\n def test_bounding_boxes_dim_inferring_batch(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ], [\n [2., 2.],\n [4., 2.],\n [4., 3.],\n [2., 3.],\n ]], device=device, dtype=dtype)\n h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)\n assert (h.unique().item(), w.unique().item()) == (2, 3)\n\n def test_gradcheck(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n boxes = utils.tensor_to_gradcheck_var(boxes)\n assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,\n (boxes,), raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.geometry.transform.crop.infer_box_shape\n op_script = torch.jit.script(op)\n # Define input\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n\n actual = op_script(boxes)\n expected = op(boxes)\n assert_allclose(actual, expected)\n\n\nclass TestCropAndResize:\n def test_align_corners_true(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 2, 3\n\n expected = torch.tensor(\n [[[[6.0000, 6.5000, 7.0000],\n [10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n # default should use align_coners True\n patches = kornia.crop_and_resize(inp, boxes, (height, width))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_align_corners_false(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 2, 3\n expected = torch.tensor(\n [[[[6.7222, 7.1667, 7.6111],\n [9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_crop_batch(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]], [[\n [1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]], [[\n [7., 15.],\n [8., 16.],\n ]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ], [\n [1., 2.],\n [3., 2.],\n [3., 3.],\n [1., 3.],\n ]], device=device, dtype=dtype) # 2x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (2, 2))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_crop_batch_broadcast(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]], [[\n [1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]], [[\n [6., 10.],\n [7., 11.],\n ]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (2, 2))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)\n img = utils.tensor_to_gradcheck_var(img) # to var\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False) # to var\n\n assert gradcheck(kornia.crop_and_resize,\n (img, boxes, (4, 2),),\n raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.crop_and_resize\n op_script = torch.jit.script(op)\n # Define input\n img = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n crop_height, crop_width = 4, 2\n actual = op_script(img, boxes, (crop_height, crop_width))\n expected = op(img, boxes, (crop_height, crop_width))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n\nclass TestCenterCrop:\n def test_center_crop_h2_w4(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (2, 4))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_center_crop_h4_w2(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 4, 2\n expected = torch.tensor([[[\n [2., 3.],\n [6., 7.],\n [10., 11.],\n [14., 15.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (height, width))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_center_crop_h4_w2_batch(self, device, dtype):\n inp = torch.tensor([\n [[[1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.]]],\n [[[1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.]]]\n ], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [2., 3.],\n [6., 7.],\n [10., 11.],\n [14., 15.],\n ]], [[\n [5., 9.],\n [6., 10.],\n [7., 11.],\n [8., 12.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (4, 2))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)\n img = utils.tensor_to_gradcheck_var(img) # to var\n\n assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.center_crop\n op_script = torch.jit.script(op)\n # Define input\n img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)\n\n actual = op_script(img, (4, 2))\n expected = op(img, (4, 2))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n def test_jit_trace(self, device, dtype):\n # Define script\n op = kornia.center_crop\n op_script = torch.jit.script(op)\n # Define input\n img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)\n op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))\n img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)\n # Run\n actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))\n expected = op(img, (2, 3))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n\nclass TestCropByBoxes:\n def test_crop_by_boxes_no_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n src = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n dst = torch.tensor([[\n [0., 0.],\n [1., 0.],\n [1., 1.],\n [0., 1.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)\n assert_allclose(patches, expected)\n\n def test_crop_by_boxes_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n src = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n dst = torch.tensor([[\n [0., 0.],\n [2., 0.],\n [2., 1.],\n [0., 1.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n expected = torch.tensor([[[\n [6., 6.5, 7.],\n [10., 10.5, 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)\n src = torch.tensor([[\n [1., 0.],\n [2., 0.],\n [2., 1.],\n [1., 1.]]], device=device, dtype=dtype)\n dst = torch.tensor([[\n [0., 0.],\n [1., 0.],\n [1., 1.],\n [0., 1.]]], device=device, dtype=dtype)\n\n inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var\n\n assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,\n (inp, src, dst,),\n raise_exception=True)\n\n\nclass TestCropByTransform:\n def test_crop_by_transform_no_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n transform = torch.tensor([[\n [1., 0., -1.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))\n assert_allclose(patches, expected)\n\n def test_crop_by_boxes_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n transform = torch.tensor([[\n [2., 0., -2.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n expected = torch.tensor([[[\n [6., 6.5, 7.],\n [10., 10.5, 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)\n transform = torch.tensor([[\n [2., 0., -2.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var\n\n assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,\n (inp, transform, (2, 2),),\n raise_exception=True)\n", "import pytest\n\nimport kornia\nimport kornia.testing as utils # test utils\n\nimport torch\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\n\ndef identity_matrix(batch_size, device, dtype):\n r\"\"\"Creates a batched homogeneous identity matrix\"\"\"\n return torch.eye(4, device=device, dtype=dtype).repeat(batch_size, 1, 1) # Nx4x4\n\n\ndef euler_angles_to_rotation_matrix(x, y, z):\n r\"\"\"Create a rotation matrix from x, y, z angles\"\"\"\n assert x.dim() == 1, x.shape\n assert x.shape == y.shape == z.shape\n ones, zeros = torch.ones_like(x), torch.zeros_like(x)\n # the rotation matrix for the x-axis\n rx_tmp = [\n ones, zeros, zeros, zeros,\n zeros, torch.cos(x), -torch.sin(x), zeros,\n zeros, torch.sin(x), torch.cos(x), zeros,\n zeros, zeros, zeros, ones]\n rx = torch.stack(rx_tmp, dim=-1).view(-1, 4, 4)\n # the rotation matrix for the y-axis\n ry_tmp = [\n torch.cos(y), zeros, torch.sin(y), zeros,\n zeros, ones, zeros, zeros,\n -torch.sin(y), zeros, torch.cos(y), zeros,\n zeros, zeros, zeros, ones]\n ry = torch.stack(ry_tmp, dim=-1).view(-1, 4, 4)\n # the rotation matrix for the z-axis\n rz_tmp = [\n torch.cos(z), -torch.sin(z), zeros, zeros,\n torch.sin(z), torch.cos(z), zeros, zeros,\n zeros, zeros, ones, zeros,\n zeros, zeros, zeros, ones]\n rz = torch.stack(rz_tmp, dim=-1).view(-1, 4, 4)\n return torch.matmul(rz, torch.matmul(ry, rx)) # Bx4x4\n\n\nclass TestTransformPoints:\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n @pytest.mark.parametrize(\"num_points\", [2, 3, 5])\n @pytest.mark.parametrize(\"num_dims\", [2, 3])\n def test_transform_points(\n self, batch_size, num_points, num_dims, device, dtype):\n # generate input data\n eye_size = num_dims + 1\n points_src = torch.rand(batch_size, num_points, num_dims, device=device, dtype=dtype)\n\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n dst_homo_src = dst_homo_src.to(device)\n\n # transform the points from dst to ref\n points_dst = kornia.transform_points(dst_homo_src, points_src)\n\n # transform the points from ref to dst\n src_homo_dst = torch.inverse(dst_homo_src)\n points_dst_to_src = kornia.transform_points(src_homo_dst, points_dst)\n\n # projected should be equal as initial\n assert_allclose(points_src, points_dst_to_src, atol=1e-4, rtol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n # generate input data\n batch_size, num_points, num_dims = 2, 3, 2\n eye_size = num_dims + 1\n points_src = torch.rand(batch_size, num_points, num_dims, device=device, dtype=dtype)\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n # evaluate function gradient\n points_src = utils.tensor_to_gradcheck_var(points_src) # to var\n dst_homo_src = utils.tensor_to_gradcheck_var(dst_homo_src) # to var\n assert gradcheck(kornia.geometry.transform_points, (dst_homo_src, points_src,),\n raise_exception=True)\n\n def test_jit(self, device, dtype):\n points = torch.ones(1, 2, 2, device=device, dtype=dtype)\n transform = kornia.eye_like(3, points)\n op = kornia.geometry.transform_points\n op_script = torch.jit.script(op)\n actual = op_script(transform, points)\n expected = op(transform, points)\n assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)\n\n\nclass TestTransformBoxes:\n\n def test_transform_boxes(self, device, dtype):\n\n boxes = torch.tensor([[139.2640, 103.0150, 397.3120, 410.5225]], device=device, dtype=dtype)\n\n expected = torch.tensor([[372.7360, 103.0150, 114.6880, 410.5225]], device=device, dtype=dtype)\n\n trans_mat = torch.tensor([[[-1., 0., 512.],\n [0., 1., 0.],\n [0., 0., 1.]]], device=device, dtype=dtype)\n\n out = kornia.transform_boxes(trans_mat, boxes)\n assert_allclose(out, expected, atol=1e-4, rtol=1e-4)\n\n def test_transform_multiple_boxes(self, device, dtype):\n\n boxes = torch.tensor([[139.2640, 103.0150, 397.3120, 410.5225],\n [1.0240, 80.5547, 512.0000, 512.0000],\n [165.2053, 262.1440, 510.6347, 508.9280],\n [119.8080, 144.2067, 257.0240, 410.1292]], device=device, dtype=dtype)\n\n boxes = boxes.repeat(2, 1, 1) # 2 x 4 x 4 two images 4 boxes each\n\n expected = torch.tensor([[[372.7360, 103.0150, 114.6880, 410.5225],\n [510.9760, 80.5547, 0.0000, 512.0000],\n [346.7947, 262.1440, 1.3653, 508.9280],\n [392.1920, 144.2067, 254.9760, 410.1292]],\n\n [[139.2640, 103.0150, 397.3120, 410.5225],\n [1.0240, 80.5547, 512.0000, 512.0000],\n [165.2053, 262.1440, 510.6347, 508.9280],\n [119.8080, 144.2067, 257.0240, 410.1292]]], device=device, dtype=dtype)\n\n trans_mat = torch.tensor([[[-1., 0., 512.],\n [0., 1., 0.],\n [0., 0., 1.]],\n\n [[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]]], device=device, dtype=dtype)\n\n out = kornia.transform_boxes(trans_mat, boxes)\n assert_allclose(out, expected, atol=1e-4, rtol=1e-4)\n\n def test_transform_boxes_wh(self, device, dtype):\n\n boxes = torch.tensor([[139.2640, 103.0150, 258.0480, 307.5075],\n [1.0240, 80.5547, 510.9760, 431.4453],\n [165.2053, 262.1440, 345.4293, 246.7840],\n [119.8080, 144.2067, 137.2160, 265.9225]], device=device, dtype=dtype)\n\n expected = torch.tensor([[372.7360, 103.0150, -258.0480, 307.5075],\n [510.9760, 80.5547, -510.9760, 431.4453],\n [346.7947, 262.1440, -345.4293, 246.7840],\n [392.1920, 144.2067, -137.2160, 265.9225]], device=device, dtype=dtype)\n\n trans_mat = torch.tensor([[[-1., 0., 512.],\n [0., 1., 0.],\n [0., 0., 1.]]], device=device, dtype=dtype)\n\n out = kornia.transform_boxes(trans_mat, boxes, mode='xywh')\n assert_allclose(out, expected, atol=1e-4, rtol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n\n boxes = torch.tensor([[139.2640, 103.0150, 258.0480, 307.5075],\n [1.0240, 80.5547, 510.9760, 431.4453],\n [165.2053, 262.1440, 345.4293, 246.7840],\n [119.8080, 144.2067, 137.2160, 265.9225]], device=device, dtype=dtype)\n\n trans_mat = torch.tensor([[[-1., 0., 512.],\n [0., 1., 0.],\n [0., 0., 1.]]], device=device, dtype=dtype)\n\n trans_mat = utils.tensor_to_gradcheck_var(trans_mat)\n boxes = utils.tensor_to_gradcheck_var(boxes)\n\n assert gradcheck(kornia.transform_boxes, (trans_mat, boxes), raise_exception=True)\n\n def test_jit(self, device, dtype):\n boxes = torch.tensor([[139.2640, 103.0150, 258.0480, 307.5075]], device=device, dtype=dtype)\n trans_mat = torch.tensor([[[-1., 0., 512.],\n [0., 1., 0.],\n [0., 0., 1.]]], device=device, dtype=dtype)\n args = (boxes, trans_mat)\n op = kornia.geometry.transform_points\n op_jit = torch.jit.script(op)\n assert_allclose(op(*args), op_jit(*args))\n\n\nclass TestComposeTransforms:\n\n def test_translation_4x4(self, device, dtype):\n offset = 10\n trans_01 = identity_matrix(batch_size=1, device=device, dtype=dtype)[0]\n trans_12 = identity_matrix(batch_size=1, device=device, dtype=dtype)[0]\n trans_12[..., :3, -1] += offset # add offset to translation vector\n\n trans_02 = kornia.compose_transformations(trans_01, trans_12)\n assert_allclose(trans_02, trans_12, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_translation_Bx4x4(self, batch_size, device, dtype):\n offset = 10\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_12 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_12[..., :3, -1] += offset # add offset to translation vector\n\n trans_02 = kornia.compose_transformations(trans_01, trans_12)\n assert_allclose(trans_02, trans_12, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_gradcheck(self, batch_size, device, dtype):\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_12 = identity_matrix(batch_size, device=device, dtype=dtype)\n\n trans_01 = utils.tensor_to_gradcheck_var(trans_01) # to var\n trans_12 = utils.tensor_to_gradcheck_var(trans_12) # to var\n assert gradcheck(kornia.compose_transformations, (trans_01, trans_12,),\n raise_exception=True)\n\n\nclass TestInverseTransformation:\n\n def test_translation_4x4(self, device, dtype):\n offset = 10\n trans_01 = identity_matrix(batch_size=1, device=device, dtype=dtype)[0]\n trans_01[..., :3, -1] += offset # add offset to translation vector\n\n trans_10 = kornia.inverse_transformation(trans_01)\n trans_01_hat = kornia.inverse_transformation(trans_10)\n assert_allclose(trans_01, trans_01_hat, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_translation_Bx4x4(self, batch_size, device, dtype):\n offset = 10\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_01[..., :3, -1] += offset # add offset to translation vector\n\n trans_10 = kornia.inverse_transformation(trans_01)\n trans_01_hat = kornia.inverse_transformation(trans_10)\n assert_allclose(trans_01, trans_01_hat, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_rotation_translation_Bx4x4(self, batch_size, device, dtype):\n offset = 10\n x, y, z = 0, 0, kornia.pi\n ones = torch.ones(batch_size, device=device, dtype=dtype)\n rmat_01 = euler_angles_to_rotation_matrix(x * ones, y * ones, z * ones)\n\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_01[..., :3, -1] += offset # add offset to translation vector\n trans_01[..., :3, :3] = rmat_01[..., :3, :3]\n\n trans_10 = kornia.inverse_transformation(trans_01)\n trans_01_hat = kornia.inverse_transformation(trans_10)\n assert_allclose(trans_01, trans_01_hat, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_gradcheck(self, batch_size, device, dtype):\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_01 = utils.tensor_to_gradcheck_var(trans_01) # to var\n assert gradcheck(kornia.inverse_transformation, (trans_01,),\n raise_exception=True)\n\n\nclass TestRelativeTransformation:\n\n def test_translation_4x4(self, device, dtype):\n offset = 10.\n trans_01 = identity_matrix(batch_size=1, device=device, dtype=dtype)[0]\n trans_02 = identity_matrix(batch_size=1, device=device, dtype=dtype)[0]\n trans_02[..., :3, -1] += offset # add offset to translation vector\n\n trans_12 = kornia.relative_transformation(trans_01, trans_02)\n trans_02_hat = kornia.compose_transformations(trans_01, trans_12)\n assert_allclose(trans_02_hat, trans_02, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_rotation_translation_Bx4x4(self, batch_size, device, dtype):\n offset = 10.\n x, y, z = 0., 0., kornia.pi\n ones = torch.ones(batch_size, device=device, dtype=dtype)\n rmat_02 = euler_angles_to_rotation_matrix(x * ones, y * ones, z * ones)\n\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_02 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_02[..., :3, -1] += offset # add offset to translation vector\n trans_02[..., :3, :3] = rmat_02[..., :3, :3]\n\n trans_12 = kornia.relative_transformation(trans_01, trans_02)\n trans_02_hat = kornia.compose_transformations(trans_01, trans_12)\n assert_allclose(trans_02_hat, trans_02, atol=1e-4, rtol=1e-4)\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n def test_gradcheck(self, batch_size, device, dtype):\n trans_01 = identity_matrix(batch_size, device=device, dtype=dtype)\n trans_02 = identity_matrix(batch_size, device=device, dtype=dtype)\n\n trans_01 = utils.tensor_to_gradcheck_var(trans_01) # to var\n trans_02 = utils.tensor_to_gradcheck_var(trans_02) # to var\n assert gradcheck(kornia.relative_transformation, (trans_01, trans_02,),\n raise_exception=True)\n\n\nclass TestTransformLAFs:\n\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n @pytest.mark.parametrize(\"num_points\", [2, 3, 5])\n def test_transform_points(\n self, batch_size, num_points, device, dtype):\n # generate input data\n eye_size = 3\n lafs_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)\n\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n\n # transform the points from dst to ref\n lafs_dst = kornia.perspective_transform_lafs(dst_homo_src, lafs_src)\n\n # transform the points from ref to dst\n src_homo_dst = torch.inverse(dst_homo_src)\n lafs_dst_to_src = kornia.perspective_transform_lafs(src_homo_dst, lafs_dst)\n\n # projected should be equal as initial\n assert_allclose(lafs_src, lafs_dst_to_src)\n\n def test_gradcheck(self, device, dtype):\n # generate input data\n batch_size, num_points, num_dims = 2, 3, 2\n eye_size = 3\n points_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n # evaluate function gradient\n points_src = utils.tensor_to_gradcheck_var(points_src) # to var\n dst_homo_src = utils.tensor_to_gradcheck_var(dst_homo_src) # to var\n assert gradcheck(kornia.perspective_transform_lafs, (dst_homo_src, points_src,),\n raise_exception=True)\n", "import kornia\nimport torch\nimport pytest\n\nimport kornia.testing as utils # test utils\n\nfrom torch.testing import assert_allclose\nfrom torch.autograd import gradcheck\n\n\nclass TestVflip:\n def smoke_test(self, device, dtype):\n f = kornia.Vflip()\n repr = \"Vflip()\"\n assert str(f) == repr\n\n def test_vflip(self, device, dtype):\n\n f = kornia.Vflip()\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n expected = torch.tensor([[0., 1., 1.],\n [0., 0., 0.],\n [0., 0., 0.]], device=device, dtype=dtype) # 3 x 3\n\n assert (f(input) == expected).all()\n\n def test_batch_vflip(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n f = kornia.Vflip()\n expected = torch.tensor([[[0., 1., 1.],\n [0., 0., 0.],\n [0., 0., 0.]]], device=device, dtype=dtype) # 1 x 3 x 3\n\n expected = expected.repeat(2, 1, 1) # 2 x 3 x 3\n\n assert (f(input) == expected).all()\n\n @pytest.mark.skip(reason=\"turn off all jit for a while\")\n def test_jit(self, device, dtype):\n @torch.jit.script\n def op_script(data: torch.Tensor) -> torch.Tensor:\n\n return kornia.vflip(data)\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n # Build jit trace\n op_trace = torch.jit.trace(op_script, (input, ))\n\n # Create new inputs\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [5., 5., 0.]], device=device, dtype=dtype) # 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n expected = torch.tensor([[[5., 5., 0.],\n [0., 0., 0.],\n [0., 0., 0.]]], device=device, dtype=dtype) # 3 x 3\n\n expected = expected.repeat(2, 1, 1)\n\n actual = op_trace(input)\n\n assert_allclose(actual, expected)\n\n def test_gradcheck(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n input = utils.tensor_to_gradcheck_var(input) # to var\n\n assert gradcheck(kornia.Vflip(), (input,), raise_exception=True)\n\n\nclass TestHflip:\n\n def smoke_test(self, device, dtype):\n f = kornia.Hflip()\n repr = \"Hflip()\"\n assert str(f) == repr\n\n def test_hflip(self, device, dtype):\n\n f = kornia.Hflip()\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n expected = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [1., 1., 0.]], device=device, dtype=dtype) # 3 x 3\n\n assert (f(input) == expected).all()\n\n def test_batch_hflip(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 1 x 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n f = kornia.Hflip()\n expected = torch.tensor([[[0., 0., 0.],\n [0., 0., 0.],\n [1., 1., 0.]]], device=device, dtype=dtype) # 3 x 3\n\n expected = expected.repeat(2, 1, 1) # 2 x 3 x 3\n\n assert (f(input) == expected).all()\n\n @pytest.mark.skip(reason=\"turn off all jit for a while\")\n def test_jit(self, device, dtype):\n @torch.jit.script\n def op_script(data: torch.Tensor) -> torch.Tensor:\n\n return kornia.hflip(data)\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n # Build jit trace\n op_trace = torch.jit.trace(op_script, (input, ))\n\n # Create new inputs\n input = torch.tensor([[0., 0., 0.],\n [5., 5., 0.],\n [0., 0., 0.]], device=device, dtype=dtype) # 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n expected = torch.tensor([[[0., 0., 0.],\n [0., 5., 5.],\n [0., 0., 0.]]], device=device, dtype=dtype) # 3 x 3\n\n expected = expected.repeat(2, 1, 1)\n\n actual = op_trace(input)\n\n assert_allclose(actual, expected)\n\n def test_gradcheck(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n input = utils.tensor_to_gradcheck_var(input) # to var\n\n assert gradcheck(kornia.Hflip(), (input,), raise_exception=True)\n\n\nclass TestRot180:\n\n def smoke_test(self, device, dtype):\n f = kornia.Rot180()\n repr = \"Rot180()\"\n assert str(f) == repr\n\n def test_rot180(self, device, dtype):\n\n f = kornia.Rot180()\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n expected = torch.tensor([[1., 1., 0.],\n [0., 0., 0.],\n [0., 0., 0.]], device=device, dtype=dtype) # 3 x 3\n\n assert (f(input) == expected).all()\n\n def test_batch_rot180(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n f = kornia.Rot180()\n expected = torch.tensor([[1., 1., 0.],\n [0., 0., 0.],\n [0., 0., 0.]], device=device, dtype=dtype) # 1 x 3 x 3\n\n expected = expected.repeat(2, 1, 1) # 2 x 3 x 3\n\n assert (f(input) == expected).all()\n\n @pytest.mark.skip(reason=\"turn off all jit for a while\")\n def test_jit(self, device, dtype):\n @torch.jit.script\n def op_script(data: torch.Tensor) -> torch.Tensor:\n\n return kornia.rot180(data)\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n # Build jit trace\n op_trace = torch.jit.trace(op_script, (input, ))\n\n # Create new inputs\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [5., 5., 0.]], device=device, dtype=dtype) # 3 x 3\n\n input = input.repeat(2, 1, 1) # 2 x 3 x 3\n\n expected = torch.tensor([[[0., 5., 5.],\n [0., 0., 0.],\n [0., 0., 0.]]], device=device, dtype=dtype) # 3 x 3\n\n expected = expected.repeat(2, 1, 1)\n\n actual = op_trace(input)\n\n assert_allclose(actual, expected)\n\n def test_gradcheck(self, device, dtype):\n\n input = torch.tensor([[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]], device=device, dtype=dtype) # 3 x 3\n\n input = utils.tensor_to_gradcheck_var(input) # to var\n\n assert gradcheck(kornia.Rot180(), (input,), raise_exception=True)\n", "import pytest\nimport torch\nfrom kornia.morphology.basic_operators import _se_to_mask\nfrom kornia.morphology.morphology import gradient\nimport kornia.testing as utils # test utils\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\n\nclass TestGradient():\n\n def test_smoke(self, device, dtype):\n kernel = torch.rand(3, 3, device=device, dtype=dtype)\n assert _se_to_mask(kernel) is not None\n\n @pytest.mark.parametrize(\n \"shape\", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 5, 5)])\n @pytest.mark.parametrize(\n \"kernel\", [(3, 3), (5, 5)])\n def test_cardinality(self, device, dtype, shape, kernel):\n img = torch.ones(shape, device=device, dtype=dtype)\n krnl = torch.ones(kernel, device=device, dtype=dtype)\n assert gradient(img, krnl).shape == shape\n\n def test_value(self, device, dtype):\n input = torch.tensor([[0.5, 1., 0.3], [0.7, 0.3, 0.8], [0.4, 0.9, 0.2]],\n device=device, dtype=dtype)[None, None, :, :]\n kernel = torch.tensor([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]], device=device, dtype=dtype)\n expected = torch.tensor([[0.5, 0.7, 0.7], [0.4, 0.7, 0.6], [0.5, 0.7, 0.7]],\n device=device, dtype=dtype)[None, None, :, :]\n assert_allclose(gradient(input, kernel), expected)\n\n def test_exception(self, device, dtype):\n input = torch.ones(1, 1, 3, 4, device=device, dtype=dtype)\n kernel = torch.ones(3, 3, device=device, dtype=dtype)\n\n with pytest.raises(TypeError):\n assert gradient([0.], kernel)\n\n with pytest.raises(TypeError):\n assert gradient(input, [0.])\n\n with pytest.raises(ValueError):\n test = torch.ones(2, 3, 4, device=device, dtype=dtype)\n assert gradient(test, kernel)\n\n with pytest.raises(ValueError):\n test = torch.ones(2, 3, 4, device=device, dtype=dtype)\n assert gradient(input, test)\n\n @pytest.mark.grad\n def test_gradcheck(self, device, dtype):\n input = torch.rand(2, 3, 4, 4, requires_grad=True, device=device, dtype=torch.float64)\n kernel = torch.rand(3, 3, requires_grad=True, device=device, dtype=torch.float64)\n assert gradcheck(gradient, (input, kernel), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n op = gradient\n op_script = torch.jit.script(op)\n\n input = torch.rand(1, 2, 7, 7, device=device, dtype=dtype)\n kernel = torch.ones(3, 3, device=device, dtype=dtype)\n\n actual = op_script(input, kernel)\n expected = op(input, kernel)\n\n assert_allclose(actual, expected)\n" ]
[ [ "torch.jit.script", "torch.ones", "torch.testing.assert_allclose", "torch.randn", "torch.tensor", "torch.rand", "torch.autograd.gradcheck" ], [ "torch.jit.script", "torch.ones", "torch.testing.assert_allclose", "torch.sin", "torch.zeros_like", "torch.eye", "torch.inverse", "torch.tensor", "torch.matmul", "torch.rand", "torch.stack", "torch.autograd.gradcheck", "torch.ones_like", "torch.cos" ], [ "torch.jit.trace", "torch.testing.assert_allclose", "torch.tensor" ], [ "torch.jit.script", "torch.ones", "torch.testing.assert_allclose", "torch.tensor", "torch.rand", "torch.autograd.gradcheck" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GonzalezDiazJ/pyviz_geoviews_clone
[ "cac9afd1bc0d25313c84ea617300bbe40207d044" ]
[ "geoviews/operation/projection.py" ]
[ "import param\nimport numpy as np\n\nfrom cartopy import crs as ccrs\nfrom cartopy.img_transform import warp_array, _determine_bounds\nfrom holoviews.core.util import cartesian_product, get_param_values\nfrom holoviews.operation import Operation\nfrom shapely.geometry import Polygon, LineString, MultiPolygon, MultiLineString\n\nfrom ..element import (Image, Shape, Polygons, Path, Points, Contours,\n RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField,\n HexTiles, Labels)\nfrom ..util import (\n project_extents, geom_to_array, wrap_path_data, is_multi_geometry,\n polygon_to_geom, path_to_geom\n)\n\n\nclass _project_operation(Operation):\n \"\"\"\n Baseclass for projection operations, projecting elements from their\n source coordinate reference system to the supplied projection.\n \"\"\"\n\n projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,\n class_=ccrs.Projection,\n instantiate=False, doc=\"\"\"\n Projection the shape type is projected to.\"\"\")\n\n # Defines the types of elements supported by the operation\n supported_types = []\n\n def _process(self, element, key=None):\n return element.map(self._process_element, self.supported_types)\n\n\nclass project_path(_project_operation):\n \"\"\"\n Projects Polygons and Path Elements from their source coordinate\n reference system to the supplied projection.\n \"\"\"\n\n supported_types = [Polygons, Path, Contours, EdgePaths]\n\n def _project_path(self, element, path, data, boundary, geom_type, multi_type):\n \"\"\"\n Handle case of continuously varying path\n \"\"\"\n xdim, ydim = path.kdims[:2]\n xs, ys = (path.dimension_values(i) for i in range(2))\n if not len(xs):\n return []\n\n proj_arr = self.p.projection.quick_vertices_transform(\n np.column_stack([xs, ys]), element.crs)\n\n if proj_arr is None:\n vertices = np.column_stack([xs, ys])\n if hasattr(element.crs, '_bbox_and_offset'):\n vertices = wrap_path_data(vertices, element.crs, element.crs)\n path = geom_type(vertices)\n if boundary:\n path = path.intersection(boundary)\n if not path:\n return []\n proj = self.p.projection.project_geometry(path, element.crs)\n proj_arr = geom_to_array(proj)\n data[xdim.name] = proj_arr[:, 0]\n data[ydim.name] = proj_arr[:, 1]\n return [data]\n\n def _project_contour(self, element, contour, data, boundary, geom_type, multi_type):\n \"\"\"\n Handle case of iso-contour\n \"\"\"\n xdim, ydim = contour.kdims[:2]\n data = {k: vals[0] for k, vals in data.items()}\n\n # Wrap longitudes\n vertices = contour.array([0, 1])\n if hasattr(element.crs, '_bbox_and_offset'):\n vertices = wrap_path_data(vertices, element.crs, element.crs)\n element = type(element)([vertices])\n to_geom = polygon_to_geom if isinstance(element, Polygon) else path_to_geom\n\n # Clip path to projection boundaries\n geoms = []\n for g in to_geom(element, multi=False, skip_invalid=False):\n if np.isinf(np.array(g.array_interface_base['data'])).sum():\n # Skip if infinity in path\n continue\n try:\n # Compute boundary intersections\n if boundary:\n g = g.intersection(boundary)\n except:\n continue\n if is_multi_geometry(g):\n for p in g:\n try:\n geoms.append(geom_type(p))\n except:\n continue\n else:\n geoms.append(g)\n\n # Project geometry\n projected = []\n for g in geoms:\n proj = self.p.projection.project_geometry(g, contour.crs)\n proj = proj if is_multi_geometry(proj) else [proj]\n for geom in proj:\n vertices = np.array(geom.array_interface_base['data']).reshape(-1, 2)\n xs, ys = vertices.T\n if len(xs):\n projected.append(dict(data, **{xdim.name: xs, ydim.name: ys}))\n return projected\n\n def _project_geodataframe(self, element):\n geoms = element.split(datatype='geom')\n projected = [self.p.projection.project_geometry(geom, element.crs)\n for geom in geoms]\n new_data = element.data.copy()\n new_data['geometry'] = projected\n return element.clone(new_data, crs=self.p.projection)\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n elif element.interface.datatype == 'geodataframe':\n return self._project_geodataframe(element)\n\n boundary = element.crs.project_geometry(Polygon(self.p.projection.boundary),\n self.p.projection)\n\n\n if isinstance(element, Polygons):\n multi_type, geom_type = MultiPolygon, Polygon\n else:\n multi_type, geom_type = MultiLineString, LineString\n\n projected = []\n paths = element.split()\n for path in paths:\n data = {vd.name: path.dimension_values(vd, expanded=False) for vd in path.vdims}\n if any(len(vals) > 1 for vals in data.values()):\n projected += self._project_path(element, path, data, boundary, geom_type, multi_type)\n else:\n projected += self._project_contour(element, path, data, boundary, geom_type, multi_type)\n\n if len(paths) and len(projected) == 0:\n self.warning('While projecting a %s element from a %s coordinate '\n 'reference system (crs) to a %s projection none of '\n 'the projected paths were contained within the bounds '\n 'specified by the projection. Ensure you have specified '\n 'the correct coordinate system for your data.' %\n (type(element).__name__, type(element.crs).__name__,\n type(self.p.projection).__name__))\n\n return element.clone(projected, crs=self.p.projection)\n\n\nclass project_shape(_project_operation):\n \"\"\"\n Projects Shape Element from the source coordinate reference system\n to the supplied projection.\n \"\"\"\n\n supported_types = [Shape]\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n geom = element.geom()\n vertices = geom_to_array(geom)\n if isinstance(geom, (MultiPolygon, Polygon)):\n obj = Polygons([vertices])\n else:\n obj = Path([vertices])\n geom = project_path(obj, projection=self.p.projection).geom()\n return element.clone(geom, crs=self.p.projection)\n\n\nclass project_points(_project_operation):\n\n supported_types = [Points, Nodes, VectorField, HexTiles, Labels]\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n xdim, ydim = element.dimensions()[:2]\n xs, ys = (element.dimension_values(i) for i in range(2))\n coordinates = self.p.projection.transform_points(element.crs, xs, ys)\n mask = np.isfinite(coordinates[:, 0])\n new_data = {k: v[mask] for k, v in element.columns().items()}\n new_data[xdim.name] = coordinates[mask, 0]\n new_data[ydim.name] = coordinates[mask, 1]\n datatype = [element.interface.datatype]+element.datatype\n\n if len(new_data[xdim.name]) == 0:\n self.warning('While projecting a %s element from a %s coordinate '\n 'reference system (crs) to a %s projection none of '\n 'the projected paths were contained within the bounds '\n 'specified by the projection. Ensure you have specified '\n 'the correct coordinate system for your data.' %\n (type(element).__name__, type(element.crs).__name__,\n type(self.p.projection).__name__))\n\n return element.clone(new_data, crs=self.p.projection,\n datatype=datatype)\n\n\nclass project_graph(_project_operation):\n\n supported_types = [Graph]\n\n def _process_element(self, element):\n nodes = project_points(element.nodes, projection=self.projection)\n data = (element.data, nodes)\n if element._edgepaths:\n data = data + (project_path(element.edgepaths, projection=self.projection),)\n return element.clone(data, crs=self.projection)\n\n\nclass project_quadmesh(_project_operation):\n\n supported_types = [QuadMesh]\n\n def _process_element(self, element):\n proj = self.p.projection\n irregular = any(element.interface.irregular(element, kd)\n for kd in element.kdims)\n zs = element.dimension_values(2, flat=False)\n if irregular:\n X, Y = [np.asarray(element.interface.coords(element, kd, expanded=True))\n for kd in element.kdims]\n else:\n X = element.dimension_values(0, expanded=True)\n Y = element.dimension_values(1, expanded=True)\n zs = zs.T\n\n coords = proj.transform_points(element.crs, X, Y)\n PX, PY = coords[..., 0], coords[..., 1]\n\n # Mask quads which are wrapping around the x-axis\n wrap_proj_types = (ccrs._RectangularProjection,\n ccrs._WarpedRectangularProjection,\n ccrs.InterruptedGoodeHomolosine,\n ccrs.Mercator)\n if isinstance(proj, wrap_proj_types):\n with np.errstate(invalid='ignore'):\n edge_lengths = np.hypot(\n np.diff(PX , axis=1),\n np.diff(PY, axis=1)\n )\n to_mask = (\n (edge_lengths >= abs(proj.x_limits[1] -\n proj.x_limits[0]) / 2) |\n np.isnan(edge_lengths)\n )\n if np.any(to_mask):\n mask = np.zeros(zs.shape, dtype=np.bool)\n mask[:, 1:][to_mask] = True\n mask[:, 2:][to_mask[:, :-1]] = True\n mask[:, :-1][to_mask] = True\n mask[:, :-2][to_mask[:, 1:]] = True\n mask[1:, 1:][to_mask[:-1]] = True\n mask[1:, :-1][to_mask[:-1]] = True\n mask[:-1, 1:][to_mask[1:]] = True\n mask[:-1, :-1][to_mask[1:]] = True\n zs[mask] = np.NaN\n\n params = get_param_values(element)\n if PX.ndim < 2:\n PX = PX.reshape(zs.shape)\n if PY.ndim < 2:\n PY = PY.reshape(zs.shape)\n return QuadMesh((PX, PY, zs), crs=self.projection, **params)\n\n\nclass project_image(_project_operation):\n \"\"\"\n Projects an geoviews Image to the specified projection,\n returning a regular HoloViews Image type. Works by\n regridding the data along projected bounds. Only supports\n rectangular projections.\n \"\"\"\n\n fast = param.Boolean(default=False, doc=\"\"\"\n Whether to enable fast reprojection with (much) better\n performance but poorer handling in polar regions.\"\"\")\n\n width = param.Integer(default=None, doc=\"\"\"\n Width of the reprojectd Image\"\"\")\n\n height = param.Integer(default=None, doc=\"\"\"\n Height of the reprojected Image\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying project_image, backends that support linked streams\n update RangeXY streams on the inputs of the operation.\"\"\")\n\n supported_types = [Image]\n\n def _process(self, img, key=None):\n if self.p.fast:\n return self._fast_process(img, key)\n proj = self.p.projection\n if proj == img.crs:\n return img\n x0, x1 = img.range(0)\n y0, y1 = img.range(1)\n xn, yn = img.interface.shape(img, gridded=True)[:2]\n px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),\n img.crs, proj)\n src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)\n arrays = []\n for vd in img.vdims:\n arr = img.dimension_values(vd, flat=False)\n if arr.size:\n projected, extents = warp_array(arr, proj, img.crs, (xn, yn),\n src_ext, trgt_ext)\n else:\n projected, extents = arr, trgt_ext\n arrays.append(projected)\n projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]\n data = np.flipud(projected)\n bounds = (extents[0], extents[2], extents[1], extents[3])\n return img.clone(data, bounds=bounds, kdims=img.kdims,\n vdims=img.vdims, crs=proj, xdensity=None,\n ydensity=None)\n\n def _fast_process(self, element, key=None):\n # Project coordinates\n proj = self.p.projection\n if proj == element.crs:\n return element\n\n h, w = element.interface.shape(element, gridded=True)[:2]\n xs = element.dimension_values(0)\n ys = element.dimension_values(1)\n if isinstance(element, RGB):\n rgb = element.rgb\n array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))\n for d in rgb.vdims])\n else:\n array = element.dimension_values(2, flat=False)\n\n (x0, y0, x1, y1) = element.bounds.lbrt()\n width = int(w) if self.p.width is None else self.p.width\n height = int(h) if self.p.height is None else self.p.height\n\n bounds = _determine_bounds(xs, ys, element.crs)\n yb = bounds['y']\n resampled = []\n xvalues = []\n for xb in bounds['x']:\n px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj)\n if len(bounds['x']) > 1:\n xfraction = (xb[1]-xb[0])/(x1-x0)\n fraction_width = int(width*xfraction)\n else:\n fraction_width = width\n xs = np.linspace(px0, px1, fraction_width)\n ys = np.linspace(py0, py1, height)\n cxs, cys = cartesian_product([xs, ys])\n\n pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T\n icxs = (((pxs-x0) / (x1-x0)) * w).astype(int)\n icys = (((pys-y0) / (y1-y0)) * h).astype(int)\n xvalues.append(xs)\n\n icxs[icxs<0] = 0\n icys[icys<0] = 0\n icxs[icxs>=w] = w-1\n icys[icys>=h] = h-1\n resampled_arr = array[icys, icxs]\n if isinstance(element, RGB):\n nvdims = len(element.vdims)\n resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2])\n else:\n resampled_arr = resampled_arr.reshape((fraction_width, height)).T\n resampled.append(resampled_arr)\n xs = np.concatenate(xvalues[::-1])\n resampled = np.hstack(resampled[::-1])\n datatypes = [element.interface.datatype, 'xarray', 'grid']\n data = (xs, ys)\n for i in range(len(element.vdims)):\n if resampled.ndim > 2:\n data = data + (resampled[::-1, :, i],)\n else:\n data = data + (resampled,)\n return element.clone(data, crs=proj, bounds=None, datatype=datatypes)\n\n\nclass project(Operation):\n \"\"\"\n Projects GeoViews Element types to the specified projection.\n \"\"\"\n\n projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,\n class_=ccrs.Projection,\n instantiate=False, doc=\"\"\"\n Projection the image type is projected to.\"\"\")\n\n _operations = [project_path, project_image, project_shape,\n project_graph, project_quadmesh, project_points]\n\n def _process(self, element, key=None):\n for op in self._operations:\n element = element.map(op.instance(projection=self.p.projection),\n op.supported_types)\n return element\n" ]
[ [ "numpy.hstack", "numpy.isfinite", "numpy.linspace", "numpy.isnan", "numpy.asarray", "numpy.flipud", "numpy.dstack", "numpy.concatenate", "numpy.diff", "numpy.any", "numpy.column_stack", "numpy.errstate", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
necromuralist/resampling
[ "0b48a51cb5f8e21a3f52508ecc74f12fa03d9b25" ]
[ "resampling/foundations_for_inference/gender_discrimination.py" ]
[ "\n# pandas standard library\nimport sys\n\n# third-party\nimport pandas\nimport matplotlib\nimport matplotlib.pyplot as plot\n\nmatplotlib.style.use('ggplot')\n\nGENDER_COUNT = 24\nMALES_PROMOTED = 21\nFEMALES_PROMOTED = 14\nGENDER_DIFFERENCE = MALES_PROMOTED - FEMALES_PROMOTED\nFEMALES_NOT_PROMOTED = GENDER_COUNT - FEMALES_PROMOTED\nMALES_NOT_PROMOTED = GENDER_COUNT - MALES_PROMOTED\n\nexperiment_data = pandas.DataFrame({\"Promoted\": [MALES_PROMOTED,\n FEMALES_PROMOTED],\n \"Not Promoted\": [MALES_NOT_PROMOTED,\n FEMALES_NOT_PROMOTED]},\n index='male female'.split(),\n columns=[\"Promoted\", \"Not Promoted\"])\n\nexperiment_frame = experiment_data.copy()\nexperiment_frame['Total'] = sum((experiment_frame[column] for column in\n experiment_frame.columns))\nlast_row = pandas.DataFrame(experiment_frame.sum()).transpose()\nlast_row.index = pandas.Index(['Total'])\nexperiment_frame = pandas.concat((experiment_frame, last_row))\n\nclass IndentOutput(object):\n \"\"\"Fake file output for csv-writing \"\"\"\n @classmethod\n def write(cls, line):\n \"\"\"Write line to stdout with three spaces prepended\"\"\"\n sys.stdout.write(\" {0}\".format(line))\n\nprint('.. csv-table:: Experiment Outcome')\nprint(' :header: ,{0}\\n'.format(','.join(experiment_frame.columns)))\n\nexperiment_frame.to_csv(IndentOutput, header=False)\n\nprint('.. csv-table:: Experiment proportions')\nprint(' :header: ,{0}\\n'.format(','.join(experiment_frame.columns)))\n\ntotals = pandas.Series([GENDER_COUNT, GENDER_COUNT, GENDER_COUNT * 2],\n index='male female Total'.split())\ntotal_frame = pandas.DataFrame({'Promoted': totals,\n \"Not Promoted\": totals,\n \"Total\": totals})\nproportions = experiment_frame/total_frame\nproportions.to_csv(IndentOutput, header=False,\n columns=['Promoted', 'Not Promoted', 'Total'],\n float_format=\"%.3f\")\n\npath = 'figures/gender_experiment_bar.svg'\nfigure = plot.figure()\naxe = figure.gca()\nexperiment_data.plot(kind='bar', ax=axe)\nfigure.savefig(path)\nprint('.. image:: {0}'.format(path))\n\nprint(\" \\\\frac{{{0}}}{{{2}}}- \\\\frac{{{1}}}{{{2}}}&=\\\\frac{{{3}}}{{{2}}}\\\\\\\\\".format(MALES_PROMOTED,\n FEMALES_PROMOTED,\n GENDER_COUNT,\n GENDER_DIFFERENCE))\nprint(\" &\\\\approx {:.3f}\\\\\\\\\".format(GENDER_DIFFERENCE/GENDER_COUNT))\n" ]
[ [ "pandas.concat", "matplotlib.style.use", "pandas.Index", "pandas.DataFrame", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
hugeinteger/InterFaceGAN
[ "59e75c0b4dcdbcea693b31ff11cf239c39e14ed1" ]
[ "utils/manipulator.py" ]
[ "# python3.7\n\"\"\"Utility functions for latent codes manipulation.\"\"\"\n\nimport numpy as np\nfrom sklearn import svm\n\nfrom .logger import setup_logger\n\n__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']\n\n\ndef train_boundary(latent_codes,\n scores,\n chosen_num_or_ratio=0.02,\n split_ratio=0.7,\n invalid_value=None,\n logger=None):\n \"\"\"Trains boundary in latent space with offline predicted attribute scores.\n\n Given a collection of latent codes and the attribute scores predicted from the\n corresponding images, this function will train a linear SVM by treating it as\n a bi-classification problem. Basically, the samples with highest attribute\n scores are treated as positive samples, while those with lowest scores as\n negative. For now, the latent code can ONLY be with 1 dimension.\n\n NOTE: The returned boundary is with shape (1, latent_space_dim), and also\n normalized with unit norm.\n\n Args:\n latent_codes: Input latent codes as training data.\n scores: Input attribute scores used to generate training labels.\n chosen_num_or_ratio: How many samples will be chosen as positive (negative)\n samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *\n latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,\n 0.5 * latent_codes_num)` will be used. (default: 0.02)\n split_ratio: Ratio to split training and validation sets. (default: 0.7)\n invalid_value: This field is used to filter out data. (default: None)\n logger: Logger for recording log messages. If set as `None`, a default\n logger, which prints messages from all levels to screen, will be created.\n (default: None)\n\n Returns:\n A decision boundary with type `numpy.ndarray`.\n\n Raises:\n ValueError: If the input `latent_codes` or `scores` are with invalid format.\n \"\"\"\n if not logger:\n logger = setup_logger(work_dir='', logger_name='train_boundary')\n\n if (not isinstance(latent_codes, np.ndarray) or\n not len(latent_codes.shape) == 2):\n raise ValueError(f'Input `latent_codes` should be with type'\n f'`numpy.ndarray`, and shape [num_samples, '\n f'latent_space_dim]!')\n num_samples = latent_codes.shape[0]\n latent_space_dim = latent_codes.shape[1]\n if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or\n not scores.shape[0] == num_samples or not scores.shape[1] == 1):\n raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '\n f'shape [num_samples, 1], where `num_samples` should be '\n f'exactly same as that of input `latent_codes`!')\n if chosen_num_or_ratio <= 0:\n raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '\n f'but {chosen_num_or_ratio} received!')\n\n logger.info(f'Filtering training data.')\n if invalid_value is not None:\n latent_codes = latent_codes[scores != invalid_value]\n scores = scores[scores != invalid_value]\n\n logger.info(f'Sorting scores to get positive and negative samples.')\n sorted_idx = np.argsort(scores, axis=0)[::-1, 0]\n latent_codes = latent_codes[sorted_idx]\n scores = scores[sorted_idx]\n num_samples = latent_codes.shape[0]\n if 0 < chosen_num_or_ratio <= 1:\n chosen_num = int(num_samples * chosen_num_or_ratio)\n else:\n chosen_num = chosen_num_or_ratio\n chosen_num = min(chosen_num, num_samples // 2)\n\n logger.info(f'Spliting training and validation sets:')\n train_num = int(chosen_num * split_ratio)\n val_num = chosen_num - train_num\n # Positive samples.\n positive_idx = np.arange(chosen_num)\n np.random.shuffle(positive_idx)\n positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]\n positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]\n # Negative samples.\n negative_idx = np.arange(chosen_num)\n np.random.shuffle(negative_idx)\n negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]\n negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]\n # Training set.\n train_data = np.concatenate([positive_train, negative_train], axis=0)\n train_label = np.concatenate([np.ones(train_num, dtype=np.int),\n np.zeros(train_num, dtype=np.int)], axis=0)\n logger.info(f' Training: {train_num} positive, {train_num} negative.')\n # Validation set.\n val_data = np.concatenate([positive_val, negative_val], axis=0)\n val_label = np.concatenate([np.ones(val_num, dtype=np.int),\n np.zeros(val_num, dtype=np.int)], axis=0)\n logger.info(f' Validation: {val_num} positive, {val_num} negative.')\n # Remaining set.\n remaining_num = num_samples - chosen_num * 2\n remaining_data = latent_codes[chosen_num:-chosen_num]\n remaining_scores = scores[chosen_num:-chosen_num]\n decision_value = (scores[0] + scores[-1]) / 2\n remaining_label = np.ones(remaining_num, dtype=np.int)\n remaining_label[remaining_scores.ravel() < decision_value] = 0\n remaining_positive_num = np.sum(remaining_label == 1)\n remaining_negative_num = np.sum(remaining_label == 0)\n logger.info(f' Remaining: {remaining_positive_num} positive, '\n f'{remaining_negative_num} negative.')\n\n logger.info(f'Training boundary.')\n clf = svm.SVC(kernel='linear')\n classifier = clf.fit(train_data, train_label)\n logger.info(f'Finish training.')\n\n if val_num:\n val_prediction = classifier.predict(val_data)\n correct_num = np.sum(val_label == val_prediction)\n logger.info(f'Accuracy for validation set: '\n f'{correct_num} / {val_num * 2} = '\n f'{correct_num / (val_num * 2):.6f}')\n\n if remaining_num:\n remaining_prediction = classifier.predict(remaining_data)\n correct_num = np.sum(remaining_label == remaining_prediction)\n logger.info(f'Accuracy for remaining set: '\n f'{correct_num} / {remaining_num} = '\n f'{correct_num / remaining_num:.6f}')\n\n a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)\n return a / np.linalg.norm(a)\n\n\ndef project_boundary(primal, *args):\n \"\"\"Projects the primal boundary onto condition boundaries.\n\n The function is used for conditional manipulation, where the projected vector\n will be subscribed from the normal direction of the original boundary. Here,\n all input boundaries are supposed to have already been normalized to unit\n norm, and with same shape [1, latent_space_dim].\n\n NOTE: For now, at most two condition boundaries are supported.\n\n Args:\n primal: The primal boundary.\n *args: Other boundaries as conditions.\n\n Returns:\n A projected boundary (also normalized to unit norm), which is orthogonal to\n all condition boundaries.\n\n Raises:\n NotImplementedError: If there are more than two condition boundaries.\n \"\"\"\n if len(args) > 2:\n raise NotImplementedError(f'This function supports projecting with at most '\n f'two conditions.')\n assert len(primal.shape) == 2 and primal.shape[0] == 1\n\n if not args:\n return primal\n if len(args) == 1:\n cond = args[0]\n assert (len(cond.shape) == 2 and cond.shape[0] == 1 and\n cond.shape[1] == primal.shape[1])\n new = primal - primal.dot(cond.T) * cond\n return new / np.linalg.norm(new)\n if len(args) == 2:\n cond_1 = args[0]\n cond_2 = args[1]\n assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and\n cond_1.shape[1] == primal.shape[1])\n assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and\n cond_2.shape[1] == primal.shape[1])\n primal_cond_1 = primal.dot(cond_1.T)\n primal_cond_2 = primal.dot(cond_2.T)\n cond_1_cond_2 = cond_1.dot(cond_2.T)\n alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / (\n 1 - cond_1_cond_2 ** 2 + 1e-8)\n beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / (\n 1 - cond_1_cond_2 ** 2 + 1e-8)\n new = primal - alpha * cond_1 - beta * cond_2\n return new / np.linalg.norm(new)\n\n raise NotImplementedError\n\n\ndef linear_interpolate(latent_code,\n boundary,\n start_distance=-3.0,\n end_distance=3.0,\n steps=10):\n \"\"\"Manipulates the given latent code with respect to a particular boundary.\n\n Basically, this function takes a latent code and a boundary as inputs, and\n outputs a collection of manipulated latent codes. For example, let `steps` to\n be 10, then the input `latent_code` is with shape [1, latent_space_dim], input\n `boundary` is with shape [1, latent_space_dim] and unit norm, the output is\n with shape [10, latent_space_dim]. The first output latent code is\n `start_distance` away from the given `boundary`, while the last output latent\n code is `end_distance` away from the given `boundary`. Remaining latent codes\n are linearly interpolated.\n\n Input `latent_code` can also be with shape [1, num_layers, latent_space_dim]\n to support W+ space in Style GAN. In this case, all features in W+ space will\n be manipulated same as each other. Accordingly, the output will be with shape\n [10, num_layers, latent_space_dim].\n\n NOTE: Distance is sign sensitive.\n\n Args:\n latent_code: The input latent code for manipulation.\n boundary: The semantic boundary as reference.\n start_distance: The distance to the boundary where the manipulation starts.\n (default: -3.0)\n end_distance: The distance to the boundary where the manipulation ends.\n (default: 3.0)\n steps: Number of steps to move the latent code from start position to end\n position. (default: 10)\n \"\"\"\n assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and\n len(boundary.shape) == 2 and\n boundary.shape[1] == latent_code.shape[-1])\n\n linspace = np.linspace(start_distance, end_distance, steps)\n if len(latent_code.shape) == 2:\n linspace = linspace - latent_code.dot(boundary.T)\n linspace = linspace.reshape(-1, 1).astype(np.float32)\n return latent_code + linspace * boundary\n if len(latent_code.shape) == 3:\n linspace = linspace.reshape(-1, 1, 1).astype(np.float32)\n return latent_code + linspace * boundary.reshape(1, 1, -1)\n raise ValueError(f'Input `latent_code` should be with shape '\n f'[1, latent_space_dim] or [1, N, latent_space_dim] for '\n f'W+ space in Style GAN!\\n'\n f'But {latent_code.shape} is received.')\n" ]
[ [ "numpy.linspace", "numpy.arange", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.ones", "numpy.concatenate", "sklearn.svm.SVC", "numpy.argsort", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NREL/reVX
[ "4d62eb2c003c3b53b959f7a58bdc342d18098884", "4d62eb2c003c3b53b959f7a58bdc342d18098884" ]
[ "reVX/least_cost_xmission/least_cost_xmission.py", "reVX/offshore/dist_to_ports_converter.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nModule to compute least cost xmission paths, distances, and costs one or\nmore SC points\n\"\"\"\nfrom concurrent.futures import as_completed\nimport geopandas as gpd\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nfrom pyproj.crs import CRS\nimport rasterio\nfrom scipy.spatial import cKDTree\nfrom shapely.geometry import Point\nimport time\n\nfrom reV.handlers.exclusions import ExclusionLayers\nfrom reV.supply_curve.points import SupplyCurveExtent\nfrom rex.utilities.execution import SpawnProcessPool\nfrom rex.utilities.loggers import log_mem\n\nfrom reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT,\n SINK_CAT, SUBSTATION_CAT)\nfrom reVX.least_cost_xmission.least_cost_paths import LeastCostPaths\nfrom reVX.least_cost_xmission.trans_cap_costs import TransCapCosts\n\nlogger = logging.getLogger(__name__)\n\n\nclass LeastCostXmission(LeastCostPaths):\n \"\"\"\n Compute Least Cost tie-line paths and full transmission cap cost\n for all possible connections to all supply curve points\n -\n \"\"\"\n REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions']\n\n def __init__(self, cost_fpath, features_fpath, resolution=128,\n xmission_config=None):\n \"\"\"\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n resolution : int, optional\n SC point resolution, by default 128\n xmission_config : str | dict | XmissionConfig, optional\n Path to Xmission config .json, dictionary of Xmission config\n .jsons, or preloaded XmissionConfig objects, by default None\n \"\"\"\n self._check_layers(cost_fpath)\n self._config = TransCapCosts._parse_config(\n xmission_config=xmission_config)\n\n (self._sc_points, self._features,\n self._sub_lines_mapping, self._shape) =\\\n self._map_to_costs(cost_fpath, features_fpath,\n resolution=resolution)\n self._cost_fpath = cost_fpath\n self._tree = None\n self._sink_coords = None\n self._min_line_len = (resolution * 0.09) / 2\n\n logger.debug('{} initialized'.format(self))\n\n def __repr__(self):\n msg = (\"{} to be computed for {} sc_points and {} features\"\n .format(self.__class__.__name__,\n len(self.sc_points),\n len(self.features)))\n\n return msg\n\n @property\n def sc_points(self):\n \"\"\"\n Table of supply curve points\n\n Returns\n -------\n gpd.GeoDataFrame\n \"\"\"\n return self._sc_points\n\n @property\n def features(self):\n \"\"\"\n Table of features to compute paths for\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return self._features\n\n @property\n def sub_lines_mapping(self):\n \"\"\"\n Series mapping substations to the transmission lines connected\n to each substation\n\n Returns\n -------\n pandas.Series\n \"\"\"\n return self._sub_lines_mapping\n\n @property\n def sink_coords(self):\n \"\"\"\n Inf sink coordinates (row, col)\n\n Returns\n -------\n ndarray\n \"\"\"\n if self._sink_coords is None:\n mask = self.features['category'] == SINK_CAT\n self._sink_coords = self.features.loc[mask, ['row', 'col']].values\n\n return self._sink_coords\n\n @property\n def sink_tree(self):\n \"\"\"\n cKDTree for infinite sinks\n\n Returns\n -------\n cKDTree\n \"\"\"\n if self._tree is None:\n self._tree = cKDTree(self.sink_coords)\n\n return self._tree\n\n @staticmethod\n def _load_trans_feats(features_fpath):\n \"\"\"\n Load existing transmission features from disk. Substations will be\n loaded from cache file if it exists\n\n Parameters\n ----------\n features_fpath : str\n Path to geopackage with trans features\n\n Returns\n -------\n features : gpd.GeoDataFrame\n DataFrame of transmission features\n sub_line_map : pandas.Series\n Mapping of sub-station trans_gid to connected tranmission line\n trans_gids\n \"\"\"\n logger.debug('Loading transmission features')\n features = gpd.read_file(features_fpath)\n features = features.drop(columns=['bgid', 'egid', 'cap_left'],\n errors='ignore')\n mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'}\n features = features.rename(columns=mapping)\n\n features['min_volts'] = 0\n features['max_volts'] = 0\n\n # Transmission lines\n mask = features['category'] == TRANS_LINE_CAT\n voltage = features.loc[mask, 'voltage'].values\n features.loc[mask, 'min_volts'] = voltage\n features.loc[mask, 'max_volts'] = voltage\n\n # Load Center and Sinks\n mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT])\n features.loc[mask, 'min_volts'] = 1\n features.loc[mask, 'max_volts'] = 9999\n\n sub_lines_map = {}\n mask = features['category'] == SUBSTATION_CAT\n bad_subs = np.zeros(len(features), dtype=bool)\n for idx, row in features.loc[mask].iterrows():\n gid = row['trans_gid']\n lines = row['trans_line_gids']\n if isinstance(lines, str):\n lines = json.loads(lines)\n\n sub_lines_map[gid] = lines\n lines_mask = features['trans_gid'].isin(lines)\n voltage = features.loc[lines_mask, 'voltage'].values\n\n if np.max(voltage) >= 69:\n features.loc[idx, 'min_volts'] = np.min(voltage)\n features.loc[idx, 'max_volts'] = np.max(voltage)\n else:\n bad_subs[idx] = True\n\n if any(bad_subs):\n msg = (\"The following sub-stations do not have the minimum \"\n \"required voltage of 69 kV and will be dropped:\\n{}\"\n .format(features.loc[bad_subs, 'trans_gid']))\n logger.warning(msg)\n features = features.loc[~bad_subs].reset_index(drop=True)\n\n return features, pd.Series(sub_lines_map)\n\n @staticmethod\n def _create_sc_points(cost_fpath, resolution=128):\n \"\"\"\n Load SC points, covert row/col to array wide, and determine x/y for\n reV projection\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n resolution : int, optional\n SC point resolution, by default 128\n\n Returns\n sc_points : gpd.GeoDataFrame\n SC points\n \"\"\"\n logger.debug('Loading Supply Curve Points')\n sce = SupplyCurveExtent(cost_fpath, resolution=resolution)\n sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind',\n 'col_ind': 'sc_col_ind'})\n shape = sce.excl_shape\n sc_points['sc_point_gid'] = sc_points.index.values\n\n row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2)\n row = np.where(row >= shape[0], shape[0] - 1, row)\n sc_points['row'] = row.astype(int)\n\n col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2)\n col = np.where(col >= shape[1], shape[1] - 1, col)\n sc_points['col'] = col.astype(int)\n\n return sc_points\n\n @staticmethod\n def _get_feature_cost_indices(features, crs, transform, shape):\n \"\"\"\n Map features to cost row, col indicies using rasterio transform\n\n Parameters\n ----------\n features : gpd.GeoDataFrame\n GeoDataFrame of features to map to cost raster\n crs : pyproj.crs.CRS\n CRS of cost raster\n transform : raster.Affine\n Transform of cost raster\n shape : tuple\n Cost raster shape\n\n Returns\n -------\n row : ndarray\n Vector of row indicies for each feature\n col : ndarray\n Vector of col indicies for each features\n mask : ndarray\n Boolean mask of features with indicies outside of cost raster\n \"\"\"\n row, col, mask = super(LeastCostXmission,\n LeastCostXmission)._get_feature_cost_indices(\n features, crs, transform, shape)\n\n t_lines = features['category'] == TRANS_LINE_CAT\n mask |= t_lines\n\n row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0)\n row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines],\n shape[0] - 1)\n col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0)\n col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines],\n shape[1] - 1)\n\n return row, col, mask\n\n @classmethod\n def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128):\n \"\"\"\n Map supply curve points and transmission features to cost array pixel\n indices\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n resolution : int, optional\n SC point resolution, by default 128\n\n Returns\n -------\n sc_point : gpd.GeoDataFrame\n Table of supply curve points to connect to tranmission\n features : gpd.GeoDataFrame\n Table of transmission features\n sub_lines_map : pandas.Series\n Series mapping substations to the transmission lines connected\n to each substation\n \"\"\"\n with ExclusionLayers(cost_fpath) as f:\n crs = CRS.from_string(f.crs)\n transform = rasterio.Affine(*f.profile['transform'])\n shape = f.shape\n regions = f['ISO_regions']\n\n features, sub_lines_map = cls._load_trans_feats(features_fpath)\n row, col, mask = cls._get_feature_cost_indices(features, crs,\n transform, shape)\n if any(~mask):\n msg = (\"The following features are outside of the cost exclusion \"\n \"domain and will be dropped:\\n{}\"\n .format(features.loc[~mask, 'trans_gid']))\n logger.warning(msg)\n row = row[mask]\n col = col[mask]\n features = features.loc[mask].reset_index(drop=True)\n\n features['row'] = row\n features['col'] = col\n features['region'] = regions[row, col]\n\n logger.debug('Converting SC points to GeoDataFrame')\n sc_points = cls._create_sc_points(cost_fpath, resolution=resolution)\n x, y = rasterio.transform.xy(transform, sc_points['row'].values,\n sc_points['col'].values)\n geo = [Point(xy) for xy in zip(x, y)]\n sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs,\n geometry=geo)\n\n return sc_points, features, sub_lines_map, shape\n\n def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2,\n clipping_buffer=1.05):\n \"\"\"\n Clip costs raster to AOI around SC point, and get substations,\n load centers, and sinks within the clipped region.\n\n Parameters\n ----------\n sc_point : gpd.GeoSeries\n SC point to clip raster around\n nn_sinks : int, optional\n Number of nearest neighbor sinks to clip to\n clipping_buffer : float, optional\n Buffer to increase clipping radius by, by default 1.05\n\n Returns\n -------\n radius : int\n Clipping radius in cost raster pixels\n x_feats : pd.DataFrame\n Substatations, load centers, sinks, and nearest points on t-lines\n to SC point\n \"\"\"\n logger.debug('Clipping features to sc_point {}'.format(sc_point.name))\n if len(self.sink_coords) > 2:\n row, col = sc_point[['row', 'col']].values\n _, pos = self.sink_tree.query([row, col], k=nn_sinks)\n radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max()\n radius = int(np.ceil(radius * clipping_buffer))\n\n logger.debug('Radius to {} nearest sink is: {}'\n .format(nn_sinks, radius))\n row_min = max(row - radius, 0)\n row_max = min(row + radius, self._shape[0])\n col_min = max(col - radius, 0)\n col_max = min(col + radius, self._shape[1])\n logger.debug('Extracting all transmission features in the row '\n 'slice {}:{} and column slice {}:{}'\n .format(row_min, row_max, col_min, col_max))\n\n # Clip transmission features\n mask = self.features['row'] >= row_min\n mask &= self.features['row'] < row_max\n mask &= self.features['col'] >= col_min\n mask &= self.features['col'] < col_max\n sc_features = self.features.loc[mask].copy(deep=True)\n logger.debug('{} transmission features found in clipped area with '\n 'radius {}'\n .format(len(sc_features), radius))\n else:\n radius = None\n sc_features = self.features.copy(deep=True)\n\n mask = self.features['max_volts'] >= tie_line_voltage\n sc_features = sc_features.loc[mask].copy(deep=True)\n logger.debug('{} transmission features found in clipped area with '\n 'minimum max voltage of {}'\n .format(len(sc_features), tie_line_voltage))\n\n # Find t-lines connected to substations within clip\n logger.debug('Collecting transmission lines connected to substations')\n mask = sc_features['category'] == SUBSTATION_CAT\n if mask.any():\n trans_gids = sc_features.loc[mask, 'trans_gid'].values\n trans_gids = \\\n np.concatenate(self.sub_lines_mapping.loc[trans_gids].values)\n trans_gids = np.unique(trans_gids)\n line_mask = self.features['trans_gid'].isin(trans_gids)\n trans_lines = self.features.loc[line_mask].copy(deep=True)\n line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid'])\n trans_lines = trans_lines.loc[~line_mask]\n logger.debug('Adding all {} transmission lines connected to '\n 'substations with minimum max voltage of {}'\n .format(len(trans_lines), tie_line_voltage))\n sc_features = sc_features.append(trans_lines)\n\n return sc_features, radius\n\n def process_sc_points(self, capacity_class, sc_point_gids=None, nn_sinks=2,\n clipping_buffer=1.05, barrier_mult=100,\n max_workers=None):\n \"\"\"\n Compute Least Cost Tranmission for desired sc_points\n\n Parameters\n ----------\n capacity_class : str | int\n Capacity class of transmission features to connect supply curve\n points to\n sc_point_gids : list, optional\n List of sc_point_gids to connect to, by default None\n nn_sinks : int, optional\n Number of nearest neighbor sinks to use for clipping radius\n calculation, by default 2\n clipping_buffer : float, optional\n Buffer to expand clipping radius by, by default 1.05\n barrier_mult : int, optional\n Tranmission barrier multiplier, used when computing the least\n cost tie-line path, by default 100\n max_workers : int, optional\n Number of workers to use for processing, if 1 run in serial,\n if None use all available cores, by default None\n\n Returns\n -------\n least_costs : pandas.DataFrame\n Least cost connections between all supply curve points and the\n transmission features with the given capacity class that are within\n \"nn_sink\" nearest infinite sinks\n \"\"\"\n max_workers = os.cpu_count() if max_workers is None else max_workers\n\n if sc_point_gids is None:\n sc_point_gids = self.sc_points['sc_point_gid'].values\n\n tie_line_voltage = self._config.capacity_to_kv(capacity_class)\n least_costs = []\n if max_workers > 1:\n logger.info('Computing Least Cost Transmission for SC points in '\n 'parallel on {} workers'.format(max_workers))\n loggers = [__name__, 'reV', 'reVX']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n for _, sc_point in self.sc_points.iterrows():\n gid = sc_point['sc_point_gid']\n if gid in sc_point_gids:\n sc_features, radius = self._clip_to_sc_point(\n sc_point, tie_line_voltage, nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer)\n\n future = exe.submit(TransCapCosts.run,\n self._cost_fpath,\n sc_point.copy(deep=True),\n sc_features, capacity_class,\n radius=radius,\n xmission_config=self._config,\n barrier_mult=barrier_mult,\n min_line_length=self._min_line_len)\n futures.append(future)\n\n for i, future in enumerate(as_completed(futures)):\n sc_costs = future.result()\n if sc_costs is not None:\n least_costs.append(sc_costs)\n\n logger.debug('SC point {} of {} complete!'\n .format(i + 1, len(futures)))\n log_mem(logger)\n\n else:\n logger.info('Computing Least Cost Transmission for SC points in '\n 'serial')\n i = 1\n for _, sc_point in self.sc_points.iterrows():\n gid = sc_point['sc_point_gid']\n if gid in sc_point_gids:\n sc_features, radius = self._clip_to_sc_point(\n sc_point, tie_line_voltage, nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer)\n\n sc_costs = TransCapCosts.run(\n self._cost_fpath, sc_point.copy(deep=True),\n sc_features, capacity_class,\n radius=radius,\n xmission_config=self._config,\n barrier_mult=barrier_mult,\n min_line_length=self._min_line_len)\n\n if sc_costs is not None:\n least_costs.append(sc_costs)\n\n logger.debug('SC point {} of {} complete!'\n .format(i, len(sc_point_gids)))\n log_mem(logger)\n i += 1\n\n least_costs = pd.concat(least_costs).sort_values(['sc_point_gid',\n 'trans_gid'])\n capacity_class = self._config._parse_cap_class(capacity_class)\n least_costs['max_cap'] = self._config['power_classes'][capacity_class]\n lcp_frac = (len(least_costs['sc_point_gid'].unique())\n / len(sc_point_gids) * 100)\n logger.info('{:.4f}% of requested sc point gids were succesfully '\n 'mapped to transmission features'.format(lcp_frac))\n\n return least_costs.reset_index(drop=True)\n\n @classmethod\n def run(cls, cost_fpath, features_fpath, capacity_class, resolution=128,\n xmission_config=None, sc_point_gids=None, nn_sinks=2,\n clipping_buffer=1.05, barrier_mult=100, max_workers=None):\n \"\"\"\n Find Least Cost Tranmission connections between desired sc_points to\n given tranmission features for desired capacity class\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n capacity_class : str | int\n Capacity class of transmission features to connect supply curve\n points to\n resolution : int, optional\n SC point resolution, by default 128\n xmission_config : str | dict | XmissionConfig, optional\n Path to Xmission config .json, dictionary of Xmission config\n .jsons, or preloaded XmissionConfig objects, by default None\n sc_point_gids : list, optional\n List of sc_point_gids to connect to, by default None\n nn_sinks : int, optional\n Number of nearest neighbor sinks to use for clipping radius\n calculation, by default 2\n clipping_buffer : float, optional\n Buffer to expand clipping radius by, by default 1.05\n barrier_mult : int, optional\n Tranmission barrier multiplier, used when computing the least\n cost tie-line path, by default 100\n max_workers : int, optional\n Number of workers to use for processing, if 1 run in serial,\n if None use all available cores, by default None\n\n Returns\n -------\n least_costs : pandas.DataFrame\n Least cost connections between all supply curve points and the\n transmission features with the given capacity class that are within\n \"nn_sink\" nearest infinite sinks\n \"\"\"\n ts = time.time()\n lcx = cls(cost_fpath, features_fpath, resolution=resolution,\n xmission_config=xmission_config)\n least_costs = lcx.process_sc_points(capacity_class,\n sc_point_gids=sc_point_gids,\n nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer,\n barrier_mult=barrier_mult,\n max_workers=max_workers)\n\n logger.info('{} connections were made to {} SC points in {:.4f} '\n 'minutes'\n .format(len(least_costs),\n len(least_costs['sc_point_gid'].unique()),\n (time.time() - ts) / 60))\n\n return least_costs\n", "# -*- coding: utf-8 -*-\n\"\"\"\nConvert dist_to_ports geotiff to .h5 exclusion layers\n\"\"\"\nimport logging\nimport numpy as np\nimport os\nfrom warnings import warn\n\nfrom reV.handlers.exclusions import ExclusionLayers\n\nfrom reVX.handlers.geotiff import Geotiff\nfrom reVX.handlers.outputs import Outputs\nfrom reVX.utilities.exclusions_converter import ExclusionsConverter\n\nlogger = logging.getLogger(__name__)\n\n\nclass DistToPortsConverter(ExclusionsConverter):\n \"\"\"\n Convert Distance to Ports goetiff(s) to excl .h5 layers\n \"\"\"\n @classmethod\n def _parse_dist_to_ports(cls, dist_to_ports, chunks=(128, 128)):\n \"\"\"\n Load dist_to_ports, combine multiple dist_to_ports by state if needed\n\n Parameters\n ----------\n dist_to_ports : list\n List of paths to dist_to_ports geotiffs to load and combine\n chunks : tuple, optional\n Chunk size of exclusions in Geotiff, by default (128, 128)\n\n Returns\n -------\n values : ndarray\n dist_to_ports exclusion array\n \"\"\"\n values = None\n for geotiff in dist_to_ports:\n v = cls._parse_tiff(geotiff, chunks=chunks, check_tiff=False)[1]\n if not np.any(v):\n msg = ('{} is invalid and only contains zeros and will be '\n 'skipped. It is advised to recreate the file and '\n 'update the distance to ports layer!'\n .format(geotiff))\n logger.warning(msg)\n warn(msg)\n else:\n if values is None:\n values = v\n else:\n values = np.minimum(values, v)\n\n return values\n\n def dist_to_ports_to_layer(self, layer, dist_to_ports, check_tiff=True,\n transform_atol=0.01, coord_atol=0.001,\n description=None, scale_factor=None,\n dtype='float32'):\n \"\"\"\n Transfer geotiff exclusions to h5 confirming they match existing layers\n\n Parameters\n ----------\n layer : str\n Layer to create\n dist_to_ports : str\n Path to directory containing distance to port geotiff files or\n a list of the distance to port geotiff filepaths\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n description : str, optional\n Description of exclusion layer, by default None\n scale_factor : int | float, optional\n Scale factor to use to scale geotiff data when added to the .h5\n file, by default None\n dtype : str, optional\n Dtype to save geotiff data as in the .h5 file. Only used when\n 'scale_factor' is not None, by default 'float32'\n \"\"\"\n if os.path.isdir(dist_to_ports):\n dist_to_ports = [os.path.join(dist_to_ports, file)\n for file in os.listdir(dist_to_ports)\n if file.endswith('.tif')]\n else:\n dist_to_ports = [dist_to_ports]\n\n logger.debug('\\t- Computing minimum distance to ports from {}'\n .format(dist_to_ports))\n\n if not os.path.exists(self._excl_h5):\n self._init_h5(self._excl_h5, dist_to_ports[0],\n chunks=self._chunks)\n\n if check_tiff:\n self._check_geotiff(self._excl_h5, dist_to_ports[0],\n chunks=self._chunks,\n transform_atol=transform_atol,\n coord_atol=coord_atol)\n\n with Geotiff(dist_to_ports[0], chunks=self._chunks) as tif:\n profile = tif.profile\n\n dist_to_ports = self._parse_dist_to_ports(dist_to_ports,\n chunks=self._chunks)\n if layer in self.layers:\n msg = (\"{} is already present in {} and will be updated\"\n .format(layer, self._excl_h5))\n logger.warning(msg)\n warn(msg)\n with ExclusionLayers(self._excl_h5) as exc:\n dist_to_ports = np.minimum(dist_to_ports, exc[layer])\n\n if scale_factor is not None:\n dist_to_ports = Outputs._check_data_dtype(\n dist_to_ports, dtype, scale_factor=scale_factor)\n\n self._write_layer(self._excl_h5, layer, profile, dist_to_ports,\n chunks=self._chunks, description=description,\n scale_factor=scale_factor)\n\n @classmethod\n def layers_to_h5(cls, excl_h5, layers, chunks=(128, 128),\n replace=True, check_tiff=True,\n transform_atol=0.01, coord_atol=0.001,\n descriptions=None, scale_factors=None):\n \"\"\"\n Create exclusions .h5 file, or load layers into existing exclusion .h5\n file from provided dist_to_ports\n\n Parameters\n ----------\n excl_h5 : str\n Path to .h5 file containing or to contain exclusion layers\n layers : dict\n Dictionary mapping goetiffs to the layers to load\n chunks : tuple, optional\n Chunk size of exclusions in Geotiff, by default (128, 128)\n replace : bool, optional\n Flag to replace existing layers if needed, by default True\n check_tiff : bool, optional\n Flag to check tiff profile and coordinates against exclusion .h5\n profile and coordinates, by default True\n transform_atol : float, optional\n Absolute tolerance parameter when comparing geotiff transform data,\n by default 0.01\n coord_atol : float, optional\n Absolute tolerance parameter when comparing new un-projected\n geotiff coordinates against previous coordinates, by default 0.001\n description : dict, optional\n Description of exclusion layers, by default None\n scale_factor : dict, optional\n Scale factors and dtypes to use when scaling given layers,\n by default None\n \"\"\"\n if scale_factors is None:\n scale_factors = {}\n\n if descriptions is None:\n descriptions = {}\n\n if isinstance(layers, list):\n layers = {os.path.basename(lyr).split('.')[0]: lyr\n for lyr in layers}\n\n excls = cls(excl_h5, chunks=chunks, replace=replace)\n logger.info('Creating {}'.format(excl_h5))\n for layer, dist_to_ports in layers.items():\n logger.info('- Transfering {}'.format(layer))\n scale = scale_factors.get(layer, None)\n if scale is not None:\n scale_factor = scale['scale_factor']\n dtype = scale['dtype']\n else:\n scale_factor = None\n dtype = None\n\n description = descriptions.get(layer, None)\n\n excls.dist_to_ports_to_layer(layer, dist_to_ports,\n check_tiff=check_tiff,\n transform_atol=transform_atol,\n coord_atol=coord_atol,\n description=description,\n scale_factor=scale_factor,\n dtype=dtype)\n" ]
[ [ "pandas.concat", "pandas.Series", "numpy.unique", "numpy.min", "numpy.round", "numpy.concatenate", "numpy.max", "numpy.ceil", "numpy.array", "numpy.where", "scipy.spatial.cKDTree" ], [ "numpy.minimum", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Nikoula86/organoidSegment
[ "b5d00256c15302ccd76b8b7a412852750476504b" ]
[ "morgana/GUIs/fluo.py" ]
[ "from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,\n QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas \nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport numpy as np\nimport warnings, os, time\nfrom skimage.io import imsave\nimport scipy.ndimage as ndi\nfrom matplotlib.figure import Figure\nfrom scipy.interpolate import interp1d\nimport matplotlib as mpl\nwarnings.filterwarnings(\"ignore\")\nfrom matplotlib import rc\nrc('font', size=12)\nrc('font', family='Arial')\n# rc('font', serif='Times')\nrc('pdf', fonttype=42)\n# rc('text', usetex=True)\n\n\nclass profileAP_condMode(QWidget):\n def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):\n super(profileAP_condMode, self).__init__(parent)\n\n self.data_all = data_all\n self.channel = channel\n self.colors = colors\n self.profileType = profileType\n self.ylabel = ylabel\n\n self.make()\n\n def make(self):\n self.figure = Figure(figsize=(4, 2.5), dpi=100)\n self.canvas = FigureCanvas(self.figure)\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel(self.ylabel)\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n self.canvas.draw()\n\n self.YnormBtn = QComboBox()\n self.YnormBtn.addItem('No normalization')\n self.YnormBtn.addItem('Global percentile')\n self.YnormBtn.addItem('Group percentile')\n self.YnormBtn.addItem('Folder percentile')\n self.YnormBtn.addItem('Manual')\n\n self.XnormBtn = QCheckBox('')\n self.XnormBtn.setChecked(False)\n self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)\n\n self.bckgBtn = QComboBox()\n self.bckgBtn.addItem('None')\n self.bckgBtn.addItem('Background')\n self.bckgBtn.addItem('Minimum')\n\n self.orientationBtn = QComboBox()\n self.orientationBtn.addItem('Signal based')\n self.orientationBtn.addItem('NO')\n\n self.alignmentBtn = QComboBox()\n self.alignmentBtn.addItem('Left')\n self.alignmentBtn.addItem('Right')\n self.alignmentBtn.addItem('Center')\n\n self.groupSelection = self.makeGroupSelectionBtns()\n\n self.applyBtn = QPushButton('Apply Settings')\n self.applyBtn.clicked.connect(self.remakePlot)\n\n lay = QGridLayout(self)\n lay.setSpacing(10)\n lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)\n lay.addWidget(self.canvas,1,0,1,2)\n lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)\n lay.addWidget(self.bckgBtn,2,1,1,1)\n lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)\n lay.addWidget(self.YnormBtn,4,1,1,1)\n lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)\n lay.addWidget(self.XnormBtn,5,1,1,1)\n lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)\n lay.addWidget(self.orientationBtn,6,1,1,1)\n lay.addWidget(QLabel('Alignment:'),7,0,1,1)\n lay.addWidget(self.alignmentBtn,7,1,1,1)\n lay.addWidget(self.groupSelection,8,0,1,2)\n lay.addWidget(self.applyBtn,9,0,1,2)\n\n self.remakePlot()\n\n self.setWindowTitle('Channel')\n QApplication.setStyle('Fusion')\n\n def onCheckingXnormBtn(self):\n if self.XnormBtn.isChecked():\n self.alignmentBtn.setEnabled(False)\n else:\n self.alignmentBtn.setEnabled(True)\n\n def makeGroupSelectionBtns(self):\n group = QGroupBox(\"Groups to plot\")\n self.groupPlotBtn = []\n for i in range(len(self.data_all)):\n self.groupPlotBtn.append(QCheckBox('Group '+str(i)))\n self.groupPlotBtn[-1].setChecked(True)\n \n self.legendBtn = QCheckBox('Legend')\n self.legendBtn.setChecked(False)\n\n self.rawBtn = QCheckBox('Plot raw data')\n self.rawBtn.setChecked(True)\n\n lay = QGridLayout()\n for i in range(len(self.data_all)):\n lay.addWidget(self.groupPlotBtn[i],i,0,1,1)\n lay.addWidget(self.legendBtn,0,1,1,1)\n lay.addWidget(self.rawBtn,1,1,1,1)\n\n group.setLayout(lay)\n return group\n\n def remakePlot(self):\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel(self.ylabel)\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n\n n_groups = len(self.data_all)\n n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]\n n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]\n\n # rearrange dataset\n profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])\n # subtract background or not\n if self.bckgBtn.currentText() == 'Background':\n profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]\n if self.bckgBtn.currentText() == 'Minimum':\n profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])\n\n # normalize fluorescence intensity accordingly\n if self.YnormBtn.currentText() == 'Global percentile':\n flat = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat.append(l)\n percs = np.percentile(np.array(flat),(.3,99.7))\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)\n elif self.YnormBtn.currentText() == 'Group percentile':\n flat = [[]for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i].append(l)\n percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)\n elif self.YnormBtn.currentText() == 'Folder percentile':\n flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i][j].append(l)\n percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i][j])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)\n \n # normalize AP axis if necessary\n if self.XnormBtn.isChecked():\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = profiles_all[i][j][k]\n x = np.linspace(0,1,len(profile))\n fun = interp1d(x,profile)\n new_x = np.linspace(0,1,101)\n profiles_all[i][j][k] = fun(new_x)\n\n # compute length of longest gastruloid\n max_length = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n max_length.append(len(profiles_all[i][j][k]))\n max_length = np.max(max_length)\n\n # orient plots according to setting\n if self.orientationBtn.currentText() == 'Signal based':\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]\n n_p = len(y)\n if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):\n profiles_all[i][j][k] = profiles_all[i][j][k][::-1]\n\n # pad array to the right or left\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n w = max_length-len(profiles_all[i][j][k])\n if self.alignmentBtn.currentText() == 'Left':\n pad_width = (0,w)\n if self.alignmentBtn.currentText() == 'Right':\n pad_width = (w,0)\n elif self.alignmentBtn.currentText() == 'Center':\n if 2*int(w/2)==w:\n pad_width = (int(w/2),int(w/2))\n else:\n pad_width = (int(w/2)+1,int(w/2))\n profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)\n\n ### make plot\n lines = []\n for i in range(n_groups):\n # plot this group only if the button is checked\n if self.groupPlotBtn[i].isChecked():\n ydata_group = []\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n ydata_group.append(profiles_all[i][j][k])\n # plot the raw data if the button is checked\n if self.rawBtn.isChecked():\n ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)\n # compute and plot mean and std\n max_length = np.max([len(d) for d in ydata_group])\n _mean = np.zeros(max_length)\n _std = np.zeros(max_length)\n for j in range(max_length):\n datapoint = []\n for data in ydata_group:\n datapoint.append(data[j])\n _mean[j] = np.nanmean(datapoint)\n _std[j] = np.nanstd(datapoint)\n line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]\n ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')\n lines.append(line)\n \n # adjust axes lims\n ax.set_ylim(0,None)\n ax.set_xlim(0,None)\n if self.XnormBtn.isChecked():\n ax.set_xlim(0,100)\n if self.YnormBtn.currentText() != 'No normalization':\n ax.set_ylim(0,1)\n\n # add legend\n if self.legendBtn.isChecked():\n l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])\n l.get_frame().set_linewidth(0.0)\n\n self.canvas.draw()\n\nclass profileAP_tlMode(QWidget):\n #############\n # TO BE IMPLEMENTED!!!\n #############\n def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None):\n super(profileAP_tlMode, self).__init__(parent)\n\n self.data_all = data_all\n self.n_groups = len(data_all)\n self.channel = channel\n self.colors = colors\n self.profileType = profileType\n\n self.make()\n\n def make(self):\n\n self.figure = Figure(figsize=(4, 2.5), dpi=100)\n self.canvas = FigureCanvas(self.figure)\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel('Time')\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n self.canvas.draw()\n\n ###############################################\n settings_group = QGroupBox('Plot settings')\n\n self.YnormBtn = QComboBox()\n self.YnormBtn.addItem('No normalization')\n self.YnormBtn.addItem('Global percentile')\n self.YnormBtn.addItem('Group percentile')\n self.YnormBtn.addItem('Folder percentile')\n self.YnormBtn.addItem('Manual')\n\n self.XnormBtn = QCheckBox('')\n self.XnormBtn.setChecked(False)\n self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)\n\n self.bckgBtn = QComboBox()\n self.bckgBtn.addItem('None')\n self.bckgBtn.addItem('Background')\n self.bckgBtn.addItem('Minimum')\n\n self.orientationBtn = QComboBox()\n self.orientationBtn.addItem('Signal based')\n self.orientationBtn.addItem('NO')\n\n self.alignmentBtn = QComboBox()\n self.alignmentBtn.addItem('Left')\n self.alignmentBtn.addItem('Right')\n self.alignmentBtn.addItem('Center')\n\n self.aspectRatioBtn = QCheckBox('')\n self.aspectRatioBtn.setChecked(True)\n\n self.groupPlotBtn = QComboBox()\n for i in range(len(self.data_all)):\n self.groupPlotBtn.addItem('Group '+str(i+1))\n\n lay = QGridLayout(self)\n lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)\n lay.addWidget(self.bckgBtn,2,1,1,1)\n lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)\n lay.addWidget(self.YnormBtn,4,1,1,1)\n lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)\n lay.addWidget(self.XnormBtn,5,1,1,1)\n lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)\n lay.addWidget(self.orientationBtn,6,1,1,1)\n lay.addWidget(QLabel('Alignment:'),7,0,1,1)\n lay.addWidget(self.alignmentBtn,7,1,1,1)\n lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)\n lay.addWidget(self.aspectRatioBtn,8,1,1,1)\n lay.addWidget(QLabel('Current group:'),9,0,1,1)\n lay.addWidget(self.groupPlotBtn,9,1,1,2)\n settings_group.setLayout(lay)\n\n #######################\n\n self.applyBtn = QPushButton('Apply Settings')\n self.applyBtn.clicked.connect(self.remakePlot)\n\n self.saveBtn = QPushButton('Save Tif image')\n self.saveBtn.clicked.connect(self.save_tif)\n\n lay = QGridLayout(self)\n lay.setSpacing(10)\n lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)\n lay.addWidget(self.canvas,1,0,1,2)\n lay.addWidget(settings_group,2,0,1,2) \n lay.addWidget(self.applyBtn,3,0,1,2)\n lay.addWidget(self.saveBtn,4,0,1,2)\n\n self.remakePlot()\n\n self.setWindowTitle('Channel')\n QApplication.setStyle('Macintosh')\n\n def onCheckingXnormBtn(self):\n if self.XnormBtn.isChecked():\n self.alignmentBtn.setEnabled(False)\n else:\n self.alignmentBtn.setEnabled(True)\n\n def remakePlot(self):\n\n n_groups = len(self.data_all)\n n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]\n n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]\n\n # rearrange dataset\n profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])\n # subtract background or not\n if self.bckgBtn.currentText() == 'Background':\n profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]\n if self.bckgBtn.currentText() == 'Minimum':\n profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])\n\n # normalize fluorescence intensity accordingly\n percs = [None,None]\n if self.YnormBtn.currentText() == 'Global percentile':\n flat = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat.append(l)\n percs = np.percentile(np.array(flat),(.3,99.7))\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)\n elif self.YnormBtn.currentText() == 'Group percentile':\n flat = [[]for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i].append(l)\n percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)\n elif self.YnormBtn.currentText() == 'Folder percentile':\n flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i][j].append(l)\n percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i][j])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)\n self.percs = percs\n \n # normalize AP axis if necessary\n if self.XnormBtn.isChecked():\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = profiles_all[i][j][k]\n x = np.linspace(0,1,len(profile))\n fun = interp1d(x,profile)\n new_x = np.linspace(0,1,101)\n profiles_all[i][j][k] = fun(new_x)\n\n # compute length of longest gastruloid\n max_length = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n max_length.append(len(profiles_all[i][j][k]))\n max_length = np.max(max_length)\n\n # orient plots according to setting\n if self.orientationBtn.currentText() == 'Signal based':\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]\n n_p = len(y)\n if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):\n profiles_all[i][j][k] = profiles_all[i][j][k][::-1]\n\n # pad array to the right or left\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n w = max_length-len(profiles_all[i][j][k])\n if self.alignmentBtn.currentText() == 'Left':\n pad_width = (0,w)\n if self.alignmentBtn.currentText() == 'Right':\n pad_width = (w,0)\n elif self.alignmentBtn.currentText() == 'Center':\n if 2*int(w/2)==w:\n pad_width = (int(w/2),int(w/2))\n else:\n pad_width = (int(w/2)+1,int(w/2))\n profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)\n\n ### make plot\n # lines = []\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel('Time')\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off') \n \n # plot the selected group only\n i = self.groupPlotBtn.currentIndex()\n\n # compute and plot mean and std of the selected group\n # prepare blank image\n max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])\n max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])\n\n data_mean = np.zeros((max_t,max_l))\n data_count = np.zeros((max_t,max_l))\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n data = np.nan_to_num(profiles_all[i][j][k])\n data_mean[k,:] += data \n data_count[k,:] += data!=0 \n # plot the raw data if the button is checked\n # if self.rawBtn.isChecked():\n # ax.plot(data_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)\n data_mean = data_mean.astype(np.float)/data_count.astype(np.float)\n data_mean = np.nan_to_num(data_mean)\n\n aspect = 'auto'\n if self.aspectRatioBtn.isChecked():\n aspect = 'equal'\n \n ax.imshow(data_mean, aspect=aspect)\n ax.set_title('Group '+str(i+1))\n self.tif_data = data_mean\n\n self.canvas.draw()\n \n def save_tif(self):\n name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')\n if name != '':\n ### check file extension: allow to save in other formats, but bias towards tif\n if os.path.splitext(name)[-1]!='.tif':\n buttonReply = QMessageBox.question(self,'File format warning!','File format not recommended. Do you want to save the image as tif?')\n if buttonReply == QMessageBox.Yes:\n name = os.path.splitext(name)[0]+'.tif'\n \n # convert the image into int16 with the right brightness and contrast\n if self.percs[0]!=None:\n self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])\n imsave(name+'', self.tif_data.astype(np.uint16))\n\n\n" ]
[ [ "numpy.pad", "numpy.linspace", "matplotlib.figure.Figure", "numpy.min", "numpy.clip", "numpy.isnan", "matplotlib.backends.backend_qt5agg.FigureCanvas", "numpy.nan_to_num", "numpy.max", "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "scipy.interpolate.interp1d", "numpy.nanmean", "numpy.nanstd", "numpy.array", "numpy.zeros", "matplotlib.rc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
libinruan/hierarchical_bayesian_target_encoder
[ "7510028a8ad1dea308802c4ca3d3a05533a9c89b" ]
[ "BayCatEncoder/code.py" ]
[ "#%%\nimport numpy as np\nimport pandas as pd\nimport time\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom collections import defaultdict\nfrom sklearn.model_selection import KFold, StratifiedKFold \n\nclass Timer:\n def __enter__(self):\n self.start=time.time()\n return self\n def __exit__(self, *args):\n self.end=time.time()\n self.hour, temp = divmod((self.end - self.start), 3600)\n self.min, self.second = divmod(temp, 60)\n self.hour, self.min, self.second = int(self.hour), int(self.min), round(self.second, 2)\n return self\n\nclass BayCatEncoder(BaseEstimator, TransformerMixin):\n def __init__(self, \n group_cols, \n target_col='target', \n N_min=1, # the higher, the more regularization is introduced into the update.\n CV=True, \n n_fold=5,\n verbosity=True,\n delimiter='.',\n drop_original=False, \n drop_intermediate=False,\n random_seed=2020):\n self.group_cols = [group_cols] if isinstance(group_cols, str) else group_cols # List of column names combination: e.g. ['n1.n2.n4', 'n3.n4', 'n2'].\n self.target_col = target_col # String: 'target' by default.\n self.stats = defaultdict(dict) # key: column names combination; value: corresponding info about n, N, and computed code.\n self.N_min = N_min # regularization control\n self.drop_original = drop_original # toggle key for whether to drop original column name(s) or not.\n self.CV = CV # Bool\n self.n_fold = n_fold\n self.drop_intermediate = drop_intermediate\n self.delimiter = delimiter\n self.verbosity = verbosity # Bool\n self.seed = random_seed\n self.set_original_col = set()\n\n def fit(self, X, y): \n self.col_subsets = self._generate_subsets(self.group_cols)\n df = pd.concat([X.copy(), y.copy()], axis=1)\n assert(isinstance(self.target_col, str))\n df.columns = X.columns.tolist() + [self.target_col] \n assert(self._check_col_consistency(X))\n if not self.CV:\n self._single_fit(df)\n else:\n self._cv_fit(df)\n return self\n\n def _single_fit(self, df):\n size_col_subsets = len(self.col_subsets)\n count_subset = 0 \n print(f'start bayesian target encoding on cross features in the following order: {self.col_subsets}') \n for subset in self.col_subsets:\n count_subset += 1\n with Timer() as t:\n if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets}')\n df_stat, stat, cross_features = self._update(df, subset)\n features_encoded = cross_features + '_code'\n self.stats[cross_features] = pd.merge(\n stat, \n df_stat.groupby(subset)[features_encoded].mean(), \n left_index=True, \n right_index=True) \n if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds') \n return self \n\n def _cv_fit(self, df):\n kf = StratifiedKFold(n_splits = self.n_fold, shuffle = True, random_state=self.seed)\n size_col_subsets = len(self.col_subsets)\n count_subset = 0\n for subset in self.col_subsets:\n count_subset += 1\n with Timer() as t:\n for i, (tr_idx, val_idx) in enumerate(kf.split(df.drop(self.target_col, axis=1), df[self.target_col])):\n if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets} - Round {i+1}/{self.n_fold}')\n df_tr, df_val = df.iloc[tr_idx].copy(), df.iloc[val_idx].copy() # Vital for avoid \"A value is trying to be set on a copy of a slice from a DataFrame.\" warning.\n df_stat, stat, cross_features = self._update(df_tr, subset)\n features_encoded = cross_features + '_code'\n df.loc[df.index[val_idx], features_encoded] = pd.merge(\n df_val[subset], \n df_stat.groupby(subset)[features_encoded].mean(),\n left_on=subset,\n right_index=True,\n how='left'\n )[features_encoded].copy() \\\n .fillna(df[self.target_col].mean()) \n self.stats[cross_features] = df.groupby(subset)[features_encoded].mean().to_frame()\n if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds') \n return self \n\n def _update(self, df, subset):\n self.global_prior_mean = df[self.target_col].mean()\n if len(subset) == 1:\n self.set_original_col.add(*subset)\n upper_level_cols = 'global'\n if not upper_level_cols + '_prior_mean' in df.columns:\n df.loc[:, upper_level_cols + '_prior_mean'] = self.global_prior_mean\n else:\n upper_level_cols = self.delimiter.join(subset[:-1]) # e.g. the n1.n2 subset's upper level feature is `n1`.\n if not upper_level_cols + '_prior_mean' in df.columns: \n df.loc[:, upper_level_cols + '_prior_mean'] = pd.merge(\n df[subset[:-1]], \n self.stats[upper_level_cols][upper_level_cols + '_code'], \n left_on=subset[:-1], \n right_index=True, \n how='left'\n )[upper_level_cols + '_code'].copy()\n \n stat = df.groupby(subset).agg(\n n=(self.target_col, 'sum'),\n N=(self.target_col, 'count'),\n prior_mean=(upper_level_cols + '_prior_mean', 'mean')\n )\n # Calculate posterior mean\n df_stat = pd.merge(df[subset], stat, left_on=subset, right_index=True, how='left')\n df_stat['n'].mask(df_stat['n'].isnull(), df_stat['prior_mean'], inplace=True) \n df_stat['N'].fillna(1., inplace=True)\n df_stat.loc[:, 'N_prior'] = df_stat['N'].map(lambda x: max(self.N_min - x, 0))\n df_stat.loc[:, 'alpha_prior'] = df_stat['prior_mean'] * df_stat['N_prior']\n df_stat.loc[:, 'beta_prior'] = (1. - df_stat['prior_mean']) * df_stat['N_prior'] # Large N -> zero N_prior -> zero alpha_prior and zero beta_prior -> if n is zero as well -> alpha prior, beta prior both zero -> alpha zero -> posterior mean = zero as well. \n if len(subset) == 1:\n cross_features = subset[0]\n else:\n cross_features = self.delimiter.join(subset)\n df_stat.loc[:, cross_features + '_code'] = df_stat.apply(self._stat_mean, axis=1) # core # TEST set!!\n return df_stat, stat, cross_features\n\n def _generate_subsets(self, groups, delimiter='.'):\n subsets = defaultdict(list) \n for g in groups:\n chain = g.split(delimiter)\n for i in range(len(chain)):\n if chain[i] and not chain[:i+1] in subsets[i]: subsets[i].append(chain[:i+1])\n ret = []\n for _, v in subsets.items():\n if not v in ret: ret.extend(v)\n return ret \n\n def _stat_mean(self, X):\n df = X.copy()\n alpha = df['alpha_prior'] + df['n']\n beta = df['beta_prior'] + df['N'] - df['n']\n return alpha / (alpha + beta)\n\n def _check_col_consistency(self, df): \n \"\"\"Check whether columns specified in `self.group_cols` are all included in `df`.\n \"\"\" \n s = set()\n for col_subset in self.col_subsets:\n s |= set(col_subset)\n for col in s:\n if not col in df.columns: return False\n return True \n\n def transform(self, X):\n assert(self._check_col_consistency(X))\n for subset in self.col_subsets:\n key = '.'.join(subset)\n X = pd.merge(\n X, \n self.stats[key][key + '_code'], \n left_on=subset, \n right_index=True, \n how='left')\n if len(subset) == 1:\n X[key + '_code'].fillna(self.global_prior_mean, inplace=True)\n else:\n parent_key = '.'.join(subset[:-1]) + '_code' \n X[key + '_code'].fillna(X[parent_key].mask(X[parent_key] > self.global_prior_mean, self.global_prior_mean), inplace=True)\n if self.drop_original:\n for col in self.set_original_col:\n X.drop(col, axis=1, inplace=True)\n X.rename(columns={col+'_code': col}, inplace=True)\n if self.drop_intermediate: \n for col in X.columns:\n if col.endswith('_code') and not col.strip('_code') in self.group_cols:\n X.drop(col, axis=1, inplace=True)\n return X\n\n#%%\nif __name__ == '__main__':\n np.random.seed(1)\n k = 15\n n1 = np.random.choice(['a','b'], k)\n n2 = np.random.choice(['c','d'], k)\n n3 = np.random.choice(['e','f'], k)\n target = np.random.randint(0, 2, size=k)\n train = pd.DataFrame(\n {'n1': n1, 'n2': n2, 'n3':n3, 'target': target}, \n columns=['n1', 'n2', 'n3', 'target']\n )\n train.columns = ['n1','n2','n3', 'target']\n \n train\n\n k = 6\n n4 = np.random.choice(['a','b'], k)\n n5 = np.random.choice(['c','d'], k)\n n6 = np.random.choice(['e','f'], k)\n test = pd.DataFrame({'n4': n4, 'n2': n5, 'n3':n6})\n test.columns = ['n1','n2','n3']\n \n test\n \n te = BayCatEncoder(\n 'n1.n2.n3', #['n1.n2.n3', 'n2.n3', 'n3'], \n target_col='target', \n drop_original=False, \n drop_intermediate=False,\n CV=False\n ) \\\n .fit(train.drop('target', axis=1), train.target) \n # te.transform(test)\n te.transform(test)\n\n# %%\n" ]
[ [ "pandas.merge", "numpy.random.seed", "numpy.random.choice", "sklearn.model_selection.StratifiedKFold", "pandas.DataFrame", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
xinwang1/Quantum
[ "0f56e36e9e6111547547ae1b6cd5df307b41c1ac" ]
[ "paddle_quantum/QAOA/example/main.py" ]
[ "# Copyright (c) 2020 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nmain\n\"\"\"\n\nfrom paddle import fluid\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom paddle_quantum.utils import pauli_str_to_matrix\nfrom paddle_quantum.QAOA.Paddle_QAOA import Paddle_QAOA\nfrom paddle_quantum.QAOA.QAOA_Prefunc import generate_graph, H_generator\n\n\ndef main(N=4):\n # number of qubits or number of nodes in the graph\n N = 4\n classical_graph, classical_graph_adjacency = generate_graph(N, GRAPHMETHOD=1)\n print(classical_graph_adjacency)\n\n # Convert the Hamiltonian's list form to matrix form\n H_matrix = pauli_str_to_matrix(H_generator(N, classical_graph_adjacency), N)\n\n H_diag = np.diag(H_matrix).real\n H_max = np.max(H_diag)\n H_min = np.min(H_diag)\n\n print(H_diag)\n print('H_max:', H_max, ' H_min:', H_min)\n\n pos = nx.circular_layout(classical_graph)\n nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold')\n plt.show()\n\n classical_graph, classical_graph_adjacency = generate_graph(N, 1)\n\n opt_cir = Paddle_QAOA(classical_graph_adjacency, N=4, P=4, METHOD=1, ITR=120, LR=0.1)\n\n # Load the data of QAOA\n x1 = np.load('./output/summary_data.npz')\n\n H_min = np.ones([len(x1['iter'])]) * H_min\n\n # Plot loss\n loss_QAOA, = plt.plot(x1['iter'], x1['energy'], alpha=0.7, marker='', linestyle=\"--\", linewidth=2, color='m')\n benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=\":\", linewidth=2, color='b')\n plt.xlabel('Number of iteration')\n plt.ylabel('Performance of the loss function for QAOA')\n\n plt.legend(handles=[\n loss_QAOA,\n benchmark\n ],\n labels=[\n r'Loss function $\\left\\langle {\\psi \\left( {\\bf{\\theta }} \\right)} '\n r'\\right|H\\left| {\\psi \\left( {\\bf{\\theta }} \\right)} \\right\\rangle $',\n 'The benchmark result',\n ], loc='best')\n\n # Show the plot\n plt.show()\n\n with fluid.dygraph.guard():\n # Measure the output state of the QAOA circuit for 1024 shots by default\n prob_measure = opt_cir.measure(plot=True)\n\n # Find the max value in measured probability of bitstrings\n max_prob = max(prob_measure.values())\n # Find the bitstring with max probability\n solution_list = [result[0] for result in prob_measure.items() if result[1] == max_prob]\n print(\"The output bitstring:\", solution_list)\n\n # Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem\n head_bitstring = solution_list[0]\n\n node_cut = [\"blue\" if head_bitstring[node] == \"1\" else \"red\" for node in classical_graph]\n\n edge_cut = [\n \"solid\" if head_bitstring[node_row] == head_bitstring[node_col] else \"dashed\"\n for node_row, node_col in classical_graph.edges()\n ]\n nx.draw(\n classical_graph,\n pos,\n node_color=node_cut,\n style=edge_cut,\n width=4,\n with_labels=True,\n font_weight=\"bold\",\n )\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.diag", "matplotlib.pyplot.legend", "numpy.min", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.xlabel", "numpy.load", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Rishank2610/gammapy
[ "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76", "3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76" ]
[ "gammapy/utils/testing.py", "gammapy/datasets/tests/test_datasets.py", "gammapy/maps/wcs/core.py", "gammapy/irf/psf/tests/test_map.py", "gammapy/estimators/flux.py", "gammapy/modeling/models/tests/test_spectral.py", "gammapy/estimators/tests/test_parameter_estimator.py", "docs/astro/source/plot_pwn_evolution.py", "examples/models/temporal/plot_expdecay_temporal.py", "gammapy/irf/edisp/core.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utilities for testing\"\"\"\nimport os\nimport sys\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\n\n__all__ = [\n \"requires_dependency\",\n \"requires_data\",\n \"mpl_plot_check\",\n \"assert_quantity_allclose\",\n \"assert_skycoord_allclose\",\n \"assert_time_allclose\",\n \"Checker\",\n]\n\n# Cache for `requires_dependency`\n_requires_dependency_cache = {}\n\n\ndef requires_dependency(name):\n \"\"\"Decorator to declare required dependencies for tests.\n\n Examples\n --------\n ::\n\n from gammapy.utils.testing import requires_dependency\n\n @requires_dependency('scipy')\n def test_using_scipy():\n import scipy\n ...\n \"\"\"\n import pytest\n\n if name in _requires_dependency_cache:\n skip_it = _requires_dependency_cache[name]\n else:\n try:\n __import__(name)\n skip_it = False\n except ImportError:\n skip_it = True\n\n _requires_dependency_cache[name] = skip_it\n\n reason = f\"Missing dependency: {name}\"\n return pytest.mark.skipif(skip_it, reason=reason)\n\n\ndef has_data(name):\n \"\"\"Is a certain set of data available?\"\"\"\n if name == \"gammapy-extra\":\n return \"GAMMAPY_EXTRA\" in os.environ\n elif name == \"gammapy-data\":\n return \"GAMMAPY_DATA\" in os.environ\n elif name == \"gamma-cat\":\n return \"GAMMA_CAT\" in os.environ\n elif name == \"fermi-lat\":\n return \"GAMMAPY_FERMI_LAT_DATA\" in os.environ\n else:\n raise ValueError(f\"Invalid name: {name}\")\n\n\ndef requires_data(name=\"gammapy-data\"):\n \"\"\"Decorator to declare required data for tests.\n\n Examples\n --------\n ::\n\n from gammapy.utils.testing import requires_data\n\n @requires_data()\n def test_using_data_files():\n filename = \"$GAMMAPY_DATA/...\"\n ...\n \"\"\"\n import pytest\n\n if not isinstance(name, str):\n raise TypeError(\n \"You must call @requires_data with a name (str). \"\n \"Usually this: @requires_data()\"\n )\n\n skip_it = not has_data(name)\n\n reason = f\"Missing data: {name}\"\n return pytest.mark.skipif(skip_it, reason=reason)\n\n\ndef run_cli(cli, args, exit_code=0):\n \"\"\"Run Click command line tool.\n\n Thin wrapper around `click.testing.CliRunner`\n that prints info to stderr if the command fails.\n\n Parameters\n ----------\n cli : click.Command\n Click command\n args : list of str\n Argument list\n exit_code : int\n Expected exit code of the command\n\n Returns\n -------\n result : `click.testing.Result`\n Result\n \"\"\"\n from click.testing import CliRunner\n\n result = CliRunner().invoke(cli, args, catch_exceptions=False)\n\n if result.exit_code != exit_code:\n sys.stderr.write(\"Exit code mismatch!\\n\")\n sys.stderr.write(\"Output:\\n\")\n sys.stderr.write(result.output)\n\n return result\n\n\ndef assert_skycoord_allclose(actual, desired):\n \"\"\"Assert all-close for `astropy.coordinates.SkyCoord` objects.\n\n - Frames can be different, aren't checked at the moment.\n \"\"\"\n assert isinstance(actual, SkyCoord)\n assert isinstance(desired, SkyCoord)\n assert_allclose(actual.data.lon.deg, desired.data.lon.deg)\n assert_allclose(actual.data.lat.deg, desired.data.lat.deg)\n\n\ndef assert_time_allclose(actual, desired, atol=1e-3):\n \"\"\"Assert all-close for `astropy.time.Time` objects.\n\n atol is absolute tolerance in seconds.\n \"\"\"\n assert isinstance(actual, Time)\n assert isinstance(desired, Time)\n assert actual.scale == desired.scale\n assert actual.format == desired.format\n dt = actual - desired\n assert_allclose(dt.sec, 0, rtol=0, atol=atol)\n\n\ndef assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):\n \"\"\"Assert all-close for `astropy.units.Quantity` objects.\n\n Requires that ``unit`` is identical, not just that quantities\n are allclose taking different units into account.\n\n We prefer this kind of assert for testing, since units\n should only change on purpose, so this tests more behaviour.\n \"\"\"\n # TODO: change this later to explicitly check units are the same!\n # assert actual.unit == desired.unit\n args = _unquantify_allclose_arguments(actual, desired, rtol, atol)\n assert_allclose(*args, **kwargs)\n\n\ndef _unquantify_allclose_arguments(actual, desired, rtol, atol):\n actual = u.Quantity(actual, subok=True, copy=False)\n\n desired = u.Quantity(desired, subok=True, copy=False)\n try:\n desired = desired.to(actual.unit)\n except u.UnitsError:\n raise u.UnitsError(\n \"Units for 'desired' ({}) and 'actual' ({}) \"\n \"are not convertible\".format(desired.unit, actual.unit)\n )\n\n if atol is None:\n # by default, we assume an absolute tolerance of 0\n atol = u.Quantity(0)\n else:\n atol = u.Quantity(atol, subok=True, copy=False)\n try:\n atol = atol.to(actual.unit)\n except u.UnitsError:\n raise u.UnitsError(\n \"Units for 'atol' ({}) and 'actual' ({}) \"\n \"are not convertible\".format(atol.unit, actual.unit)\n )\n\n rtol = u.Quantity(rtol, subok=True, copy=False)\n try:\n rtol = rtol.to(u.dimensionless_unscaled)\n except Exception:\n raise u.UnitsError(\"`rtol` should be dimensionless\")\n\n return actual.value, desired.value, rtol.value, atol.value\n\n\ndef mpl_plot_check():\n \"\"\"Matplotlib plotting test context manager.\n\n It create a new figure on __enter__ and calls savefig for the\n current figure in __exit__. This will trigger a render of the\n Figure, which can sometimes raise errors if there is a problem.\n\n This is writing to an in-memory byte buffer, i.e. is faster\n than writing to disk.\n \"\"\"\n from io import BytesIO\n import matplotlib.pyplot as plt\n\n class MPLPlotCheck:\n def __enter__(self):\n plt.figure()\n\n def __exit__(self, type, value, traceback):\n plt.savefig(BytesIO(), format=\"png\")\n plt.close()\n\n return MPLPlotCheck()\n\n\nclass Checker:\n \"\"\"Base class for checker classes in Gammapy.\"\"\"\n\n def run(self, checks=\"all\"):\n if checks == \"all\":\n checks = self.CHECKS.keys()\n\n unknown_checks = sorted(set(checks).difference(self.CHECKS.keys()))\n if unknown_checks:\n raise ValueError(f\"Unknown checks: {unknown_checks!r}\")\n\n for check in checks:\n method = getattr(self, self.CHECKS[check])\n yield from method()\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling.tests.test_fit import MyDataset\n\n\[email protected](scope=\"session\")\ndef datasets():\n return Datasets([MyDataset(name=\"test-1\"), MyDataset(name=\"test-2\")])\n\n\ndef test_datasets_init(datasets):\n # Passing a Python list of `Dataset` objects should work\n Datasets(list(datasets))\n\n # Passing an existing `Datasets` object should work\n Datasets(datasets)\n\n\ndef test_datasets_types(datasets):\n assert datasets.is_all_same_type\n\n\ndef test_datasets_likelihood(datasets):\n likelihood = datasets.stat_sum()\n assert_allclose(likelihood, 14472200.0002)\n\n\ndef test_datasets_str(datasets):\n assert \"Datasets\" in str(datasets)\n\n\ndef test_datasets_getitem(datasets):\n assert datasets[\"test-1\"].name == \"test-1\"\n assert datasets[\"test-2\"].name == \"test-2\"\n\n\ndef test_names(datasets):\n assert datasets.names == [\"test-1\", \"test-2\"]\n\n\ndef test_Datasets_mutation():\n dat = MyDataset(name=\"test-1\")\n dats = Datasets([MyDataset(name=\"test-2\"), MyDataset(name=\"test-3\")])\n dats2 = Datasets([MyDataset(name=\"test-4\"), MyDataset(name=\"test-5\")])\n\n dats.insert(0, dat)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\"]\n\n dats.extend(dats2)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-4\", \"test-5\"]\n\n dat3 = dats[3]\n dats.remove(dats[3])\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-5\"]\n dats.append(dat3)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-5\", \"test-4\"]\n dats.pop(3)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-4\"]\n\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.append(dat)\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.insert(0, dat)\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.extend(dats2)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport json\nimport numpy as np\nfrom astropy.io import fits\nfrom ..core import Map\nfrom ..io import JsonQuantityEncoder, find_bands_hdu, find_hdu\nfrom .geom import WcsGeom\nfrom .io import identify_wcs_format\n\n__all__ = [\"WcsMap\"]\n\n\nclass WcsMap(Map):\n \"\"\"Base class for WCS map classes.\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom`\n A WCS geometry object.\n data : `~numpy.ndarray`\n Data array.\n \"\"\"\n\n @classmethod\n def create(\n cls,\n map_type=\"wcs\",\n npix=None,\n binsz=0.1,\n width=None,\n proj=\"CAR\",\n frame=\"icrs\",\n refpix=None,\n axes=None,\n skydir=None,\n dtype=\"float32\",\n meta=None,\n unit=\"\",\n ):\n \"\"\"Factory method to create an empty WCS map.\n\n Parameters\n ----------\n map_type : {'wcs', 'wcs-sparse'}\n Map type. Selects the class that will be used to\n instantiate the map.\n npix : int or tuple or list\n Width of the map in pixels. A tuple will be interpreted as\n parameters for longitude and latitude axes. For maps with\n non-spatial dimensions, list input can be used to define a\n different map width in each image plane. This option\n supersedes width.\n width : float or tuple or list\n Width of the map in degrees. A tuple will be interpreted\n as parameters for longitude and latitude axes. For maps\n with non-spatial dimensions, list input can be used to\n define a different map width in each image plane.\n binsz : float or tuple or list\n Map pixel size in degrees. A tuple will be interpreted\n as parameters for longitude and latitude axes. For maps\n with non-spatial dimensions, list input can be used to\n define a different bin size in each image plane.\n skydir : tuple or `~astropy.coordinates.SkyCoord`\n Sky position of map center. Can be either a SkyCoord\n object or a tuple of longitude and latitude in deg in the\n coordinate system of the map.\n frame : {\"icrs\", \"galactic\"}, optional\n Coordinate system, either Galactic (\"galactic\") or Equatorial (\"icrs\").\n axes : list\n List of non-spatial axes.\n proj : string, optional\n Any valid WCS projection type. Default is 'CAR' (cartesian).\n refpix : tuple\n Reference pixel of the projection. If None then this will\n be chosen to be center of the map.\n dtype : str, optional\n Data type, default is float32\n meta : `dict`\n Dictionary to store meta data.\n unit : str or `~astropy.units.Unit`\n The unit of the map\n\n Returns\n -------\n map : `~WcsMap`\n A WCS map object.\n \"\"\"\n from .ndmap import WcsNDMap\n\n geom = WcsGeom.create(\n npix=npix,\n binsz=binsz,\n width=width,\n proj=proj,\n skydir=skydir,\n frame=frame,\n refpix=refpix,\n axes=axes,\n )\n\n if map_type == \"wcs\":\n return WcsNDMap(geom, dtype=dtype, meta=meta, unit=unit)\n elif map_type == \"wcs-sparse\":\n raise NotImplementedError\n else:\n raise ValueError(f\"Invalid map type: {map_type!r}\")\n\n @classmethod\n def from_hdulist(cls, hdu_list, hdu=None, hdu_bands=None, format=\"gadf\"):\n \"\"\"Make a WcsMap object from a FITS HDUList.\n\n Parameters\n ----------\n hdu_list : `~astropy.io.fits.HDUList`\n HDU list containing HDUs for map data and bands.\n hdu : str\n Name or index of the HDU with the map data.\n hdu_bands : str\n Name or index of the HDU with the BANDS table.\n format : {'gadf', 'fgst-ccube', 'fgst-template'}\n FITS format convention.\n\n Returns\n -------\n wcs_map : `WcsMap`\n Map object\n \"\"\"\n if hdu is None:\n hdu = find_hdu(hdu_list)\n else:\n hdu = hdu_list[hdu]\n\n if hdu_bands is None:\n hdu_bands = find_bands_hdu(hdu_list, hdu)\n\n if hdu_bands is not None:\n hdu_bands = hdu_list[hdu_bands]\n\n format = identify_wcs_format(hdu_bands)\n\n wcs_map = cls.from_hdu(hdu, hdu_bands, format=format)\n\n if wcs_map.unit.is_equivalent(\"\"):\n if format == \"fgst-template\":\n if \"GTI\" in hdu_list: # exposure maps have an additional GTI hdu\n wcs_map.unit = \"cm2 s\"\n else:\n wcs_map.unit = \"cm-2 s-1 MeV-1 sr-1\"\n\n return wcs_map\n\n def to_hdulist(self, hdu=None, hdu_bands=None, sparse=False, format=\"gadf\"):\n \"\"\"Convert to `~astropy.io.fits.HDUList`.\n\n Parameters\n ----------\n hdu : str\n Name or index of the HDU with the map data.\n hdu_bands : str\n Name or index of the HDU with the BANDS table.\n sparse : bool\n Sparsify the map by only writing pixels with non-zero\n amplitude.\n format : {'gadf', 'fgst-ccube','fgst-template'}\n FITS format convention.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n\n \"\"\"\n if sparse:\n hdu = \"SKYMAP\" if hdu is None else hdu.upper()\n else:\n hdu = \"PRIMARY\" if hdu is None else hdu.upper()\n\n if sparse and hdu == \"PRIMARY\":\n raise ValueError(\"Sparse maps cannot be written to the PRIMARY HDU.\")\n\n if format in [\"fgst-ccube\", \"fgst-template\"]:\n if self.geom.axes[0].name != \"energy\" or len(self.geom.axes) > 1:\n raise ValueError(\n \"All 'fgst' formats don't support extra axes except for energy.\"\n )\n\n if hdu_bands is None:\n hdu_bands = f\"{hdu.upper()}_BANDS\"\n\n if self.geom.axes:\n hdu_bands_out = self.geom.to_bands_hdu(hdu_bands=hdu_bands, format=format)\n hdu_bands = hdu_bands_out.name\n else:\n hdu_bands = None\n\n hdu_out = self.to_hdu(hdu=hdu, hdu_bands=hdu_bands, sparse=sparse)\n\n hdu_out.header[\"META\"] = json.dumps(self.meta, cls=JsonQuantityEncoder)\n\n hdu_out.header[\"BUNIT\"] = self.unit.to_string(\"fits\")\n\n if hdu == \"PRIMARY\":\n hdulist = [hdu_out]\n else:\n hdulist = [fits.PrimaryHDU(), hdu_out]\n\n if self.geom.axes:\n hdulist += [hdu_bands_out]\n\n return fits.HDUList(hdulist)\n\n def to_hdu(self, hdu=\"SKYMAP\", hdu_bands=None, sparse=False):\n \"\"\"Make a FITS HDU from this map.\n\n Parameters\n ----------\n hdu : str\n The HDU extension name.\n hdu_bands : str\n The HDU extension name for BANDS table.\n sparse : bool\n Set INDXSCHM to SPARSE and sparsify the map by only\n writing pixels with non-zero amplitude.\n\n Returns\n -------\n hdu : `~astropy.io.fits.BinTableHDU` or `~astropy.io.fits.ImageHDU`\n HDU containing the map data.\n \"\"\"\n header = self.geom.to_header()\n\n if self.is_mask:\n data = self.data.astype(int)\n else:\n data = self.data\n\n if hdu_bands is not None:\n header[\"BANDSHDU\"] = hdu_bands\n\n if sparse:\n hdu_out = self._make_hdu_sparse(data, self.geom.npix, hdu, header)\n elif hdu == \"PRIMARY\":\n hdu_out = fits.PrimaryHDU(data, header=header)\n else:\n hdu_out = fits.ImageHDU(data, header=header, name=hdu)\n\n return hdu_out\n\n @staticmethod\n def _make_hdu_sparse(data, npix, hdu, header):\n shape = data.shape\n\n # We make a copy, because below we modify `data` to handle non-finite entries\n # TODO: The code below could probably be simplified to use expressions\n # that create new arrays instead of in-place modifications\n # But first: do we want / need the non-finite entry handling at all and always cast to 64-bit float?\n data = data.copy()\n\n if len(shape) == 2:\n data_flat = np.ravel(data)\n non_zero = np.where(~(data_flat == 0))\n value = data_flat[non_zero].astype(float)\n cols = [\n fits.Column(\"PIX\", \"J\", array=non_zero[0]),\n fits.Column(\"VALUE\", \"E\", array=value),\n ]\n elif npix[0].size == 1:\n shape_flat = shape[:-2] + (shape[-1] * shape[-2],)\n data_flat = np.ravel(data).reshape(shape_flat)\n nonzero = np.where(~(data_flat == 0))\n channel = np.ravel_multi_index(nonzero[:-1], shape[:-2])\n value = data_flat[nonzero].astype(float)\n cols = [\n fits.Column(\"PIX\", \"J\", array=nonzero[-1]),\n fits.Column(\"CHANNEL\", \"I\", array=channel),\n fits.Column(\"VALUE\", \"E\", array=value),\n ]\n else:\n data_flat = []\n channel = []\n pix = []\n for i, _ in np.ndenumerate(npix[0]):\n data_i = np.ravel(data[i[::-1]])\n pix_i = np.where(~(data_i == 0))\n data_i = data_i[pix_i]\n data_flat += [data_i]\n pix += pix_i\n channel += [\n np.ones(data_i.size, dtype=int)\n * np.ravel_multi_index(i[::-1], shape[:-2])\n ]\n\n pix = np.concatenate(pix)\n channel = np.concatenate(channel)\n value = np.concatenate(data_flat).astype(float)\n\n cols = [\n fits.Column(\"PIX\", \"J\", array=pix),\n fits.Column(\"CHANNEL\", \"I\", array=channel),\n fits.Column(\"VALUE\", \"E\", array=value),\n ]\n\n return fits.BinTableHDU.from_columns(cols, header=header, name=hdu)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.units import Unit\nfrom gammapy.data import DataStore\nfrom gammapy.irf import PSF3D, EffectiveAreaTable2D, PSFMap\nfrom gammapy.makers.utils import make_map_exposure_true_energy, make_psf_map\nfrom gammapy.maps import Map, MapAxis, MapCoord, RegionGeom, WcsGeom\nfrom gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n\n\[email protected](scope=\"session\")\ndef data_store():\n return DataStore.from_dir(\"$GAMMAPY_DATA/hess-dl3-dr1/\")\n\n\ndef fake_psf3d(sigma=0.15 * u.deg, shape=\"gauss\"):\n offset_axis = MapAxis.from_nodes([0, 1, 2, 3] * u.deg, name=\"offset\")\n\n energy_axis_true = MapAxis.from_energy_bounds(\n \"0.1 TeV\", \"10 TeV\", nbin=4, name=\"energy_true\"\n )\n\n rad = np.linspace(0, 1.0, 101) * u.deg\n rad_axis = MapAxis.from_edges(rad, name=\"rad\")\n\n O, R, E = np.meshgrid(offset_axis.center, rad_axis.edges, energy_axis_true.center)\n\n Rmid = 0.5 * (R[:-1] + R[1:])\n if shape == \"gauss\":\n val = np.exp(-0.5 * Rmid ** 2 / sigma ** 2)\n else:\n val = Rmid < sigma\n\n drad = 2 * np.pi * (np.cos(R[:-1]) - np.cos(R[1:])) * u.Unit(\"sr\")\n psf_value = val / ((val * drad).sum(0)[0])\n\n return PSF3D(\n axes=[energy_axis_true, offset_axis, rad_axis],\n data=psf_value.T.value,\n unit=psf_value.unit,\n )\n\n\ndef fake_aeff2d(area=1e6 * u.m ** 2):\n energy_axis_true = MapAxis.from_energy_bounds(\n \"0.1 TeV\", \"10 TeV\", nbin=4, name=\"energy_true\"\n )\n\n offset_axis = MapAxis.from_edges([0.0, 1.0, 2.0, 3.0] * u.deg, name=\"offset\")\n\n return EffectiveAreaTable2D(\n axes=[energy_axis_true, offset_axis], data=area.value, unit=area.unit\n )\n\n\ndef test_make_psf_map():\n psf = fake_psf3d(0.3 * u.deg)\n\n pointing = SkyCoord(0, 0, unit=\"deg\")\n energy_axis = MapAxis(\n nodes=[0.2, 0.7, 1.5, 2.0, 10.0], unit=\"TeV\", name=\"energy_true\"\n )\n rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit=\"deg\", name=\"rad\")\n\n geom = WcsGeom.create(\n skydir=pointing, binsz=0.2, width=5, axes=[rad_axis, energy_axis]\n )\n\n psfmap = make_psf_map(psf, pointing, geom)\n\n assert psfmap.psf_map.geom.axes[0] == rad_axis\n assert psfmap.psf_map.geom.axes[1] == energy_axis\n assert psfmap.psf_map.unit == \"deg-2\"\n assert psfmap.psf_map.data.shape == (4, 50, 25, 25)\n\n\ndef make_test_psfmap(size, shape=\"gauss\"):\n psf = fake_psf3d(size, shape)\n aeff2d = fake_aeff2d()\n\n pointing = SkyCoord(0, 0, unit=\"deg\")\n energy_axis = MapAxis(\n nodes=[0.2, 0.7, 1.5, 2.0, 10.0], unit=\"TeV\", name=\"energy_true\"\n )\n rad_axis = MapAxis.from_edges(\n edges=np.linspace(0.0, 1, 101), unit=\"deg\", name=\"rad\"\n )\n\n geom = WcsGeom.create(\n skydir=pointing, binsz=0.2, width=5, axes=[rad_axis, energy_axis]\n )\n\n exposure_geom = geom.squash(axis_name=\"rad\")\n\n exposure_map = make_map_exposure_true_energy(pointing, \"1 h\", aeff2d, exposure_geom)\n\n return make_psf_map(psf, pointing, geom, exposure_map)\n\n\ndef test_psf_map_containment_radius():\n psf_map = make_test_psfmap(0.15 * u.deg)\n psf = fake_psf3d(0.15 * u.deg)\n\n position = SkyCoord(0, 0, unit=\"deg\")\n\n # Check that containment radius is consistent between psf_table and psf3d\n assert_allclose(\n psf_map.containment_radius(\n energy_true=1 * u.TeV, position=position, fraction=0.9\n ),\n psf.containment_radius(energy_true=1 * u.TeV, offset=0 * u.deg, fraction=0.9),\n rtol=1e-2,\n )\n assert_allclose(\n psf_map.containment_radius(\n energy_true=1 * u.TeV, position=position, fraction=0.5\n ),\n psf.containment_radius(energy_true=1 * u.TeV, offset=0 * u.deg, fraction=0.5),\n rtol=1e-2,\n )\n\n\ndef test_psf_map_containment():\n psf_map = make_test_psfmap(0.15 * u.deg)\n assert_allclose(psf_map.containment(rad=10 * u.deg, energy_true=[10] * u.TeV), 1)\n\n\ndef test_psfmap_to_psf_kernel():\n psfmap = make_test_psfmap(0.15 * u.deg)\n\n energy_axis = psfmap.psf_map.geom.axes[1]\n # create PSFKernel\n kern_geom = WcsGeom.create(binsz=0.02, width=5.0, axes=[energy_axis])\n psfkernel = psfmap.get_psf_kernel(\n position=SkyCoord(1, 1, unit=\"deg\"), geom=kern_geom, max_radius=1 * u.deg\n )\n assert_allclose(psfkernel.psf_kernel_map.geom.width, 2.02 * u.deg)\n assert_allclose(psfkernel.psf_kernel_map.data.sum(axis=(1, 2)), 1.0, atol=1e-7)\n\n psfkernel = psfmap.get_psf_kernel(\n position=SkyCoord(1, 1, unit=\"deg\"),\n geom=kern_geom,\n )\n assert_allclose(psfkernel.psf_kernel_map.geom.width, 1.14 * u.deg)\n assert_allclose(psfkernel.psf_kernel_map.data.sum(axis=(1, 2)), 1.0, atol=1e-7)\n\n\ndef test_psfmap_to_from_hdulist():\n psfmap = make_test_psfmap(0.15 * u.deg)\n hdulist = psfmap.to_hdulist()\n assert \"PSF\" in hdulist\n assert \"PSF_BANDS\" in hdulist\n assert \"PSF_EXPOSURE\" in hdulist\n assert \"PSF_EXPOSURE_BANDS\" in hdulist\n\n new_psfmap = PSFMap.from_hdulist(hdulist)\n assert_allclose(psfmap.psf_map.data, new_psfmap.psf_map.data)\n assert new_psfmap.psf_map.geom == psfmap.psf_map.geom\n assert new_psfmap.exposure_map.geom == psfmap.exposure_map.geom\n\n\ndef test_psfmap_read_write(tmp_path):\n psfmap = make_test_psfmap(0.15 * u.deg)\n\n psfmap.write(tmp_path / \"tmp.fits\")\n new_psfmap = PSFMap.read(tmp_path / \"tmp.fits\")\n\n assert_allclose(psfmap.psf_map.quantity, new_psfmap.psf_map.quantity)\n\n\ndef test_containment_radius_map():\n psf = fake_psf3d(0.15 * u.deg)\n pointing = SkyCoord(0, 0, unit=\"deg\")\n energy_axis = MapAxis(nodes=[0.2, 1, 2], unit=\"TeV\", name=\"energy_true\")\n psf_theta_axis = MapAxis(nodes=np.linspace(0.0, 0.6, 30), unit=\"deg\", name=\"rad\")\n geom = WcsGeom.create(\n skydir=pointing, binsz=0.5, width=(4, 3), axes=[psf_theta_axis, energy_axis]\n )\n\n psfmap = make_psf_map(psf=psf, pointing=pointing, geom=geom)\n m = psfmap.containment_radius_map(energy_true=1 * u.TeV)\n coord = SkyCoord(0.3, 0, unit=\"deg\")\n val = m.interp_by_coord(coord)\n assert_allclose(val, 0.226477, rtol=1e-2)\n\n\ndef test_psfmap_stacking():\n psfmap1 = make_test_psfmap(0.1 * u.deg, shape=\"flat\")\n psfmap2 = make_test_psfmap(0.1 * u.deg, shape=\"flat\")\n psfmap2.exposure_map.quantity *= 2\n\n psfmap_stack = psfmap1.copy()\n psfmap_stack.stack(psfmap2)\n mask = psfmap_stack.psf_map.data > 0\n assert_allclose(psfmap_stack.psf_map.data[mask], psfmap1.psf_map.data[mask])\n assert_allclose(psfmap_stack.exposure_map.data, psfmap1.exposure_map.data * 3)\n\n psfmap3 = make_test_psfmap(0.3 * u.deg, shape=\"flat\")\n\n psfmap_stack = psfmap1.copy()\n psfmap_stack.stack(psfmap3)\n\n assert_allclose(psfmap_stack.psf_map.data[0, 40, 20, 20], 0.0)\n assert_allclose(psfmap_stack.psf_map.data[0, 20, 20, 20], 1.768388, rtol=1e-6)\n assert_allclose(psfmap_stack.psf_map.data[0, 0, 20, 20], 17.683883, rtol=1e-6)\n\n\n# TODO: add a test comparing make_mean_psf and PSFMap.stack for a set of observations in an Observations\n\n\ndef test_sample_coord():\n psf_map = make_test_psfmap(0.1 * u.deg, shape=\"gauss\")\n\n coords_in = MapCoord(\n {\"lon\": [0, 0] * u.deg, \"lat\": [0, 0.5] * u.deg, \"energy_true\": [1, 3] * u.TeV},\n frame=\"icrs\",\n )\n\n coords = psf_map.sample_coord(map_coord=coords_in)\n assert coords.frame == \"icrs\"\n assert len(coords.lon) == 2\n assert_allclose(coords.lon, [0.074855, 0.042655], rtol=1e-3)\n assert_allclose(coords.lat, [-0.101561, 0.347365], rtol=1e-3)\n\n\ndef test_sample_coord_gauss():\n psf_map = make_test_psfmap(0.1 * u.deg, shape=\"gauss\")\n\n lon, lat = np.zeros(10000) * u.deg, np.zeros(10000) * u.deg\n energy = np.ones(10000) * u.TeV\n coords_in = MapCoord.create(\n {\"lon\": lon, \"lat\": lat, \"energy_true\": energy}, frame=\"icrs\"\n )\n coords = psf_map.sample_coord(coords_in)\n\n assert_allclose(np.mean(coords.skycoord.data.lon.wrap_at(\"180d\").deg), 0, atol=2e-3)\n assert_allclose(np.mean(coords.lat), 0, atol=2e-3)\n\n\ndef make_psf_map_obs(geom, obs):\n exposure_map = make_map_exposure_true_energy(\n geom=geom.squash(axis_name=\"rad\"),\n pointing=obs.pointing_radec,\n aeff=obs.aeff,\n livetime=obs.observation_live_time_duration,\n )\n\n psf_map = make_psf_map(\n geom=geom, psf=obs.psf, pointing=obs.pointing_radec, exposure_map=exposure_map\n )\n return psf_map\n\n\n@requires_data()\[email protected](\n \"pars\",\n [\n {\n \"energy\": None,\n \"rad\": None,\n \"energy_shape\": 32,\n \"psf_energy\": 0.8659643,\n \"rad_shape\": 144,\n \"psf_rad\": 0.0015362848,\n \"psf_exposure\": 3.14711e12 * u.Unit(\"cm2 s\"),\n \"psf_value_shape\": (32, 144),\n \"psf_value\": 4369.96391 * u.Unit(\"sr-1\"),\n },\n {\n \"energy\": MapAxis.from_energy_bounds(1, 10, 100, \"TeV\", name=\"energy_true\"),\n \"rad\": None,\n \"energy_shape\": 100,\n \"psf_energy\": 1.428893959,\n \"rad_shape\": 144,\n \"psf_rad\": 0.0015362848,\n \"psf_exposure\": 4.723409e12 * u.Unit(\"cm2 s\"),\n \"psf_value_shape\": (100, 144),\n \"psf_value\": 3714.303683 * u.Unit(\"sr-1\"),\n },\n {\n \"energy\": None,\n \"rad\": MapAxis.from_nodes(np.arange(0, 2, 0.002), unit=\"deg\", name=\"rad\"),\n \"energy_shape\": 32,\n \"psf_energy\": 0.8659643,\n \"rad_shape\": 1000,\n \"psf_rad\": 0.000524,\n \"psf_exposure\": 3.14711e12 * u.Unit(\"cm2 s\"),\n \"psf_value_shape\": (32, 1000),\n \"psf_value\": 7.902016 * u.Unit(\"deg-2\"),\n },\n {\n \"energy\": MapAxis.from_energy_bounds(1, 10, 100, \"TeV\", name=\"energy_true\"),\n \"rad\": MapAxis.from_nodes(np.arange(0, 2, 0.002), unit=\"deg\", name=\"rad\"),\n \"energy_shape\": 100,\n \"psf_energy\": 1.428893959,\n \"rad_shape\": 1000,\n \"psf_rad\": 0.000524,\n \"psf_exposure\": 4.723409e12 * u.Unit(\"cm2 s\"),\n \"psf_value_shape\": (100, 1000),\n \"psf_value\": 6.868102 * u.Unit(\"deg-2\"),\n },\n ],\n)\ndef test_make_psf(pars, data_store):\n obs = data_store.obs(23523)\n psf = obs.psf\n\n if pars[\"energy\"] is None:\n energy_axis = psf.axes[\"energy_true\"]\n else:\n energy_axis = pars[\"energy\"]\n\n if pars[\"rad\"] is None:\n rad_axis = psf.axes[\"rad\"]\n else:\n rad_axis = pars[\"rad\"]\n\n position = SkyCoord(83.63, 22.01, unit=\"deg\")\n\n geom = WcsGeom.create(\n skydir=position, npix=(3, 3), axes=[rad_axis, energy_axis], binsz=0.2\n )\n\n psf_map = make_psf_map_obs(geom, obs)\n psf = psf_map.to_region_nd_map(position)\n\n axis = psf.psf_map.geom.axes[\"energy_true\"]\n assert axis.unit == \"TeV\"\n assert axis.nbin == pars[\"energy_shape\"]\n assert_allclose(axis.center.value[15], pars[\"psf_energy\"], rtol=1e-3)\n\n rad_axis = psf.psf_map.geom.axes[\"rad\"]\n assert rad_axis.unit == \"deg\"\n assert rad_axis.nbin == pars[\"rad_shape\"]\n assert_allclose(rad_axis.center.to_value(\"rad\")[15], pars[\"psf_rad\"], rtol=1e-3)\n\n exposure = psf.exposure_map.quantity.squeeze()\n assert exposure.unit == \"m2 s\"\n assert exposure.shape == (pars[\"energy_shape\"],)\n assert_allclose(exposure[15], pars[\"psf_exposure\"], rtol=1e-3)\n\n data = psf.psf_map.quantity.squeeze()\n assert data.unit == \"deg-2\"\n assert data.shape == pars[\"psf_value_shape\"]\n assert_allclose(data[15, 50], pars[\"psf_value\"], rtol=1e-3)\n\n\n@requires_data()\ndef test_make_mean_psf(data_store):\n observations = data_store.get_observations([23523, 23526])\n position = SkyCoord(83.63, 22.01, unit=\"deg\")\n\n psf = observations[0].psf\n\n geom = WcsGeom.create(\n skydir=position,\n npix=(3, 3),\n axes=psf.axes[[\"rad\", \"energy_true\"]],\n binsz=0.2,\n )\n\n psf_map_1 = make_psf_map_obs(geom, observations[0])\n psf_map_2 = make_psf_map_obs(geom, observations[1])\n\n stacked_psf = psf_map_1.copy()\n stacked_psf.stack(psf_map_2)\n\n psf = stacked_psf.to_region_nd_map(position).psf_map\n\n assert not np.isnan(psf.quantity.squeeze()).any()\n assert_allclose(psf.quantity.squeeze()[22, 22], 12206.1665 / u.sr, rtol=1e-3)\n\n\n@requires_data()\[email protected](\"position\", [\"0d 0d\", \"180d 0d\", \"0d 90d\", \"180d -90d\"])\ndef test_psf_map_read(position):\n position = SkyCoord(position)\n filename = \"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz\"\n psf = PSFMap.read(filename, format=\"gtpsf\")\n\n value = psf.containment(position=position, energy_true=100 * u.GeV, rad=0.1 * u.deg)\n\n assert_allclose(value, 0.682022, rtol=1e-5)\n assert psf.psf_map.unit == \"sr-1\"\n\n\ndef test_psf_map_write_gtpsf(tmpdir):\n energy_axis_true = MapAxis.from_energy_bounds(\n \"1 TeV\", \"10 TeV\", nbin=3, name=\"energy_true\"\n )\n geom = RegionGeom.create(\"icrs;circle(0, 0, 0.1)\")\n psf = PSFMap.from_gauss(\n energy_axis_true=energy_axis_true, sigma=[0.1, 0.2, 0.3] * u.deg, geom=geom\n )\n psf.exposure_map = Map.from_geom(geom.to_cube([energy_axis_true]), unit=\"cm2 s\")\n\n filename = tmpdir / \"test_psf.fits\"\n psf.write(filename, format=\"gtpsf\")\n\n psf = PSFMap.read(filename, format=\"gtpsf\")\n\n value = psf.containment_radius(energy_true=energy_axis_true.center, fraction=0.394)\n\n assert_allclose(value, [0.1, 0.2, 0.3] * u.deg, rtol=1e-5)\n assert psf.psf_map.unit == \"sr-1\"\n\n\ndef test_to_image():\n psfmap = make_test_psfmap(0.15 * u.deg)\n\n psf2D = psfmap.to_image()\n assert_allclose(psf2D.psf_map.geom.data_shape, (1, 100, 25, 25))\n assert_allclose(psf2D.exposure_map.geom.data_shape, (1, 1, 25, 25))\n assert_allclose(psf2D.psf_map.data[0][0][12][12], 7.068315, rtol=1e-2)\n\n\ndef test_psf_map_from_gauss():\n energy_axis = MapAxis.from_nodes(\n [1, 3, 10], name=\"energy_true\", interp=\"log\", unit=\"TeV\"\n )\n rad = np.linspace(0, 1.5, 100) * u.deg\n rad_axis = MapAxis.from_nodes(rad, name=\"rad\", unit=\"deg\")\n\n # define sigmas starting at 0.1 in steps of 0.1 deg\n sigma = [0.1, 0.2, 0.4] * u.deg\n\n # with energy-dependent sigma\n psfmap = PSFMap.from_gauss(energy_axis, rad_axis, sigma)\n\n assert psfmap.psf_map.geom.axes[0] == rad_axis\n assert psfmap.psf_map.geom.axes[1] == energy_axis\n assert psfmap.exposure_map.geom.axes[\"rad\"].nbin == 1\n assert psfmap.exposure_map.geom.axes[\"energy_true\"] == psfmap.psf_map.geom.axes[1]\n assert psfmap.psf_map.unit == \"sr-1\"\n assert psfmap.psf_map.data.shape == (3, 100, 1, 2)\n\n radius = psfmap.containment_radius(fraction=0.394, energy_true=[1, 3, 10] * u.TeV)\n assert_allclose(radius, sigma, rtol=0.01)\n\n # test that it won't work with different number of sigmas and energies\n with pytest.raises(ValueError):\n PSFMap.from_gauss(energy_axis, rad_axis, sigma=[1, 2] * u.deg)\n\n\ndef test_psf_map_from_gauss_const_sigma():\n energy_axis = MapAxis.from_nodes(\n [1, 3, 10], name=\"energy_true\", interp=\"log\", unit=\"TeV\"\n )\n rad = np.linspace(0, 1.5, 100) * u.deg\n rad_axis = MapAxis.from_nodes(rad, name=\"rad\", unit=\"deg\")\n\n # with constant sigma\n psfmap = PSFMap.from_gauss(energy_axis, rad_axis, sigma=0.1 * u.deg)\n assert psfmap.psf_map.geom.axes[0] == rad_axis\n assert psfmap.psf_map.geom.axes[1] == energy_axis\n assert psfmap.psf_map.unit == Unit(\"sr-1\")\n assert psfmap.psf_map.data.shape == (3, 100, 1, 2)\n\n radius = psfmap.containment_radius(energy_true=[1, 3, 10] * u.TeV, fraction=0.394)\n assert_allclose(radius, 0.1 * u.deg, rtol=0.01)\n\n\n@requires_data()\n@requires_dependency(\"matplotlib\")\ndef test_psf_map_plot_containment_radius():\n filename = \"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz\"\n psf = PSFMap.read(filename, format=\"gtpsf\")\n\n with mpl_plot_check():\n psf.plot_containment_radius_vs_energy()\n\n\n@requires_data()\n@requires_dependency(\"matplotlib\")\ndef test_psf_map_plot_psf_vs_rad():\n filename = \"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz\"\n psf = PSFMap.read(filename, format=\"gtpsf\")\n\n with mpl_plot_check():\n psf.plot_psf_vs_rad()\n\n\n@requires_data()\ndef test_psf_containment_coords():\n # regression test to check the cooordinate conversion for PSFMap.containment\n psf = PSFMap.read(\"$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz\", hdu=\"PSF\")\n\n position = SkyCoord(\"266.415d\", \"-29.006d\", frame=\"icrs\")\n\n radius = psf.containment_radius(\n energy_true=1 * u.TeV, fraction=0.99, position=position\n )\n\n assert_allclose(radius, 0.10575 * u.deg, rtol=1e-5)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.estimators.parameter import ParameterEstimator\nfrom gammapy.maps import Map, MapAxis\nfrom gammapy.modeling import Parameter\nfrom gammapy.modeling.models import ScaleSpectralModel\n\nlog = logging.getLogger(__name__)\n\n\nclass FluxEstimator(ParameterEstimator):\n \"\"\"Flux estimator.\n\n Estimates flux for a given list of datasets with their model in a given energy range.\n\n To estimate the model flux the amplitude of the reference spectral model is\n fitted within the energy range. The amplitude is re-normalized using the \"norm\" parameter,\n which specifies the deviation of the flux from the reference model in this\n energy range.\n\n Parameters\n ----------\n source : str or int\n For which source in the model to compute the flux.\n norm_min : float\n Minimum value for the norm used for the fit statistic profile evaluation.\n norm_max : float\n Maximum value for the norm used for the fit statistic profile evaluation.\n norm_n_values : int\n Number of norm values used for the fit statistic profile.\n norm_values : `numpy.ndarray`\n Array of norm values to be used for the fit statistic profile.\n n_sigma : int\n Sigma to use for asymmetric error computation.\n n_sigma_ul : int\n Sigma to use for upper limit computation.\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed\n * \"errn-errp\": estimate asymmetric errors.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optional steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is False.\n \"\"\"\n\n tag = \"FluxEstimator\"\n\n def __init__(\n self,\n source=0,\n norm_min=0.2,\n norm_max=5,\n norm_n_values=11,\n norm_values=None,\n n_sigma=1,\n n_sigma_ul=2,\n selection_optional=None,\n fit=None,\n reoptimize=False,\n ):\n self.norm_values = norm_values\n self.norm_min = norm_min\n self.norm_max = norm_max\n self.norm_n_values = norm_n_values\n self.source = source\n super().__init__(\n null_value=0,\n n_sigma=n_sigma,\n n_sigma_ul=n_sigma_ul,\n selection_optional=selection_optional,\n fit=fit,\n reoptimize=reoptimize,\n )\n\n def _set_norm_parameter(self, norm=None, scaled_parameter=None):\n \"\"\"Define properties of the norm spectral parameter.\"\"\"\n if norm is None:\n norm = Parameter(\"norm\", 1, unit=\"\", interp=\"log\")\n\n norm.value = 1.0\n norm.frozen = False\n\n norm.min = scaled_parameter.min / scaled_parameter.value\n norm.max = scaled_parameter.max / scaled_parameter.value\n norm.interp = scaled_parameter.interp\n norm.scan_values = self.norm_values\n norm.scan_min = self.norm_min\n norm.scan_max = self.norm_max\n norm.scan_n_values = self.norm_n_values\n return norm\n\n def get_scale_model(self, models):\n \"\"\"Set scale model\n\n Parameters\n ----------\n models : `Models`\n Models\n\n Returns\n -------\n model : `ScaleSpectralModel`\n Scale spectral model\n \"\"\"\n ref_model = models[self.source].spectral_model\n scale_model = ScaleSpectralModel(ref_model)\n\n if hasattr(ref_model, \"amplitude\"):\n scaled_parameter = ref_model.amplitude\n else:\n scaled_parameter = ref_model.norm\n\n scale_model.norm = self._set_norm_parameter(scale_model.norm, scaled_parameter)\n return scale_model\n\n def estimate_npred_excess(self, datasets):\n \"\"\"Estimate npred excess for the source.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked npred excess.\n \"\"\"\n npred_excess = []\n\n for dataset in datasets:\n name = datasets.models[self.source].name\n npred_signal = dataset.npred_signal(model_name=name)\n npred = Map.from_geom(dataset.mask.geom)\n npred.stack(npred_signal)\n npred_excess.append(npred.data[dataset.mask].sum())\n\n return {\"npred_excess\": np.array(npred_excess), \"datasets\": datasets.names}\n\n def run(self, datasets):\n \"\"\"Estimate flux for a given energy range.\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.SpectrumDataset`\n Spectrum datasets.\n\n Returns\n -------\n result : dict\n Dict with results for the flux point.\n \"\"\"\n datasets = Datasets(datasets)\n models = datasets.models.copy()\n\n model = self.get_scale_model(models)\n\n energy_min, energy_max = datasets.energy_ranges\n energy_axis = MapAxis.from_energy_edges([energy_min.min(), energy_max.max()])\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n result = model.reference_fluxes(energy_axis=energy_axis)\n # convert to scalar values\n result = {key: value.item() for key, value in result.items()}\n\n models[self.source].spectral_model = model\n datasets.models = models\n result.update(super().run(datasets, model.norm))\n\n # TODO: find a cleaner way of including the npred_excess info\n datasets.models[self.source].spectral_model.norm.value = result[\"norm\"]\n result.update(self.estimate_npred_excess(datasets=datasets))\n return result\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport operator\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom gammapy.maps import MapAxis\nfrom gammapy.modeling.models import (\n SPECTRAL_MODEL_REGISTRY,\n BrokenPowerLawSpectralModel,\n CompoundSpectralModel,\n ConstantSpectralModel,\n EBLAbsorptionNormSpectralModel,\n ExpCutoffPowerLaw3FGLSpectralModel,\n ExpCutoffPowerLawNormSpectralModel,\n ExpCutoffPowerLawSpectralModel,\n GaussianSpectralModel,\n LogParabolaNormSpectralModel,\n LogParabolaSpectralModel,\n Model,\n NaimaSpectralModel,\n PiecewiseNormSpectralModel,\n PowerLaw2SpectralModel,\n PowerLawNormSpectralModel,\n PowerLawSpectralModel,\n SmoothBrokenPowerLawSpectralModel,\n SuperExpCutoffPowerLaw4FGLSpectralModel,\n TemplateSpectralModel,\n)\nfrom gammapy.utils.testing import (\n assert_quantity_allclose,\n mpl_plot_check,\n requires_data,\n requires_dependency,\n)\n\n\ndef table_model():\n energy = MapAxis.from_energy_bounds(0.1 * u.TeV, 100 * u.TeV, 1000).center\n\n model = PowerLawSpectralModel(\n index=2.3, amplitude=\"4 cm-2 s-1 TeV-1\", reference=\"1 TeV\"\n )\n dnde = model(energy)\n\n return TemplateSpectralModel(energy, dnde)\n\n\nTEST_MODELS = [\n dict(\n name=\"powerlaw\",\n model=PowerLawSpectralModel(\n index=2.3 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.9227116204223784, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(6.650836884969039, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"powerlaw\",\n model=PowerLawSpectralModel(\n index=2 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(1.0, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(3.6, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(9.210340371976184, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"norm-powerlaw\",\n model=PowerLawNormSpectralModel(\n tilt=2 * u.Unit(\"\"),\n norm=4.0 * u.Unit(\"\"),\n reference=1 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(1.0, \"\"),\n integral_1_10TeV=u.Quantity(3.6, \"TeV\"),\n eflux_1_10TeV=u.Quantity(9.210340371976184, \"TeV2\"),\n ),\n dict(\n name=\"powerlaw2\",\n model=PowerLaw2SpectralModel(\n amplitude=u.Quantity(2.9227116204223784, \"cm-2 s-1\"),\n index=2.3 * u.Unit(\"\"),\n emin=1 * u.TeV,\n emax=10 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.9227116204223784, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(6.650836884969039, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"ecpl\",\n model=ExpCutoffPowerLawSpectralModel(\n index=1.6 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n lambda_=0.1 / u.TeV,\n ),\n val_at_2TeV=u.Quantity(1.080321705479446, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(3.765838739678921, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(9.901735870666526, \"TeV cm-2 s-1\"),\n e_peak=4 * u.TeV,\n ),\n dict(\n name=\"norm-ecpl\",\n model=ExpCutoffPowerLawNormSpectralModel(\n index=1.6 * u.Unit(\"\"),\n norm=4 * u.Unit(\"\"),\n reference=1 * u.TeV,\n lambda_=0.1 / u.TeV,\n ),\n val_at_2TeV=u.Quantity(1.080321705479446, \"\"),\n integral_1_10TeV=u.Quantity(3.765838739678921, \"TeV\"),\n eflux_1_10TeV=u.Quantity(9.901735870666526, \"TeV2\"),\n ),\n dict(\n name=\"ecpl_3fgl\",\n model=ExpCutoffPowerLaw3FGLSpectralModel(\n index=2.3 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n ecut=10 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(0.7349563611124971, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.6034046173089, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(5.340285560055799, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"plsec_4fgl\",\n model=SuperExpCutoffPowerLaw4FGLSpectralModel(\n index_1=1.5,\n index_2=2,\n amplitude=1 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n expfactor=1e-2,\n ),\n val_at_2TeV=u.Quantity(0.3431043087721737, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(1.2125247, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(3.38072082, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"logpar\",\n model=LogParabolaSpectralModel(\n alpha=2.3 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n beta=0.5 * u.Unit(\"\"),\n ),\n val_at_2TeV=u.Quantity(0.6387956571420305, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.255689748270628, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(3.9586515834989267, \"TeV cm-2 s-1\"),\n e_peak=0.74082 * u.TeV,\n ),\n dict(\n name=\"norm-logpar\",\n model=LogParabolaNormSpectralModel(\n alpha=2.3 * u.Unit(\"\"),\n norm=4 * u.Unit(\"\"),\n reference=1 * u.TeV,\n beta=0.5 * u.Unit(\"\"),\n ),\n val_at_2TeV=u.Quantity(0.6387956571420305, \"\"),\n integral_1_10TeV=u.Quantity(2.255689748270628, \"TeV\"),\n eflux_1_10TeV=u.Quantity(3.9586515834989267, \"TeV2\"),\n ),\n dict(\n name=\"logpar10\",\n model=LogParabolaSpectralModel.from_log10(\n alpha=2.3 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n beta=1.151292546497023 * u.Unit(\"\"),\n ),\n val_at_2TeV=u.Quantity(0.6387956571420305, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.255689748270628, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(3.9586515834989267, \"TeV cm-2 s-1\"),\n e_peak=0.74082 * u.TeV,\n ),\n dict(\n name=\"constant\",\n model=ConstantSpectralModel(const=4 / u.cm ** 2 / u.s / u.TeV),\n val_at_2TeV=u.Quantity(4, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(35.9999999999999, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(198.00000000000006, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"powerlaw_index1\",\n model=PowerLawSpectralModel(\n index=1 * u.Unit(\"\"),\n amplitude=2 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(1.0, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(4.605170185, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(18.0, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"ecpl_2\",\n model=ExpCutoffPowerLawSpectralModel(\n index=2.0 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n lambda_=0.1 / u.TeV,\n ),\n val_at_2TeV=u.Quantity(0.81873075, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.83075297, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(6.41406327, \"TeV cm-2 s-1\"),\n e_peak=np.nan * u.TeV,\n ),\n dict(\n name=\"GaussianSpectralModel\",\n model=GaussianSpectralModel(\n norm=4 / u.cm ** 2 / u.s, mean=2 * u.TeV, sigma=0.2 * u.TeV\n ),\n val_at_2TeV=u.Quantity(7.978845608028654, \"cm-2 s-1 TeV-1\"),\n val_at_3TeV=u.Quantity(2.973439029468601e-05, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(3.9999988533937123, \"cm-2 s-1\"),\n integral_infinity=u.Quantity(4, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(7.999998896163037, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"ecpl\",\n model=ExpCutoffPowerLawSpectralModel(\n index=1.8 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n reference=1 * u.TeV,\n lambda_=0.1 / u.TeV,\n alpha=0.8,\n ),\n val_at_2TeV=u.Quantity(0.871694294554192, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(3.026342, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(7.38652453, \"TeV cm-2 s-1\"),\n e_peak=1.7677669529663684 * u.TeV,\n ),\n dict(\n name=\"bpl\",\n model=BrokenPowerLawSpectralModel(\n index1=1.5 * u.Unit(\"\"),\n index2=2.5 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n ebreak=0.5 * u.TeV,\n ),\n val_at_2TeV=u.Quantity(0.125, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(0.45649740094103286, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(0.9669999668731384, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"sbpl\",\n model=SmoothBrokenPowerLawSpectralModel(\n index1=1.5 * u.Unit(\"\"),\n index2=2.5 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n ebreak=0.5 * u.TeV,\n reference=1 * u.TeV,\n beta=1,\n ),\n val_at_2TeV=u.Quantity(0.28284271247461906, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(0.9956923907948155, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(2.2372256145972207, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"sbpl-hard\",\n model=SmoothBrokenPowerLawSpectralModel(\n index1=2.5 * u.Unit(\"\"),\n index2=1.5 * u.Unit(\"\"),\n amplitude=4 / u.cm ** 2 / u.s / u.TeV,\n ebreak=0.5 * u.TeV,\n reference=1 * u.TeV,\n beta=1,\n ),\n val_at_2TeV=u.Quantity(3.5355339059327378, \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(13.522782989735022, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(40.06681812966845, \"TeV cm-2 s-1\"),\n ),\n dict(\n name=\"pbpl\",\n model=PiecewiseNormSpectralModel(\n energy=[1, 3, 7, 10] * u.TeV,\n norms=[1, 5, 3, 0.5] * u.Unit(\"\"),\n ),\n val_at_2TeV=u.Quantity(2.76058404, \"\"),\n integral_1_10TeV=u.Quantity(24.758255, \"TeV\"),\n eflux_1_10TeV=u.Quantity(117.745068, \"TeV2\"),\n ),\n]\n\n# Add compound models\n\nTEST_MODELS.append(\n dict(\n name=\"compound3\",\n model=TEST_MODELS[0][\"model\"] + TEST_MODELS[0][\"model\"],\n val_at_2TeV=TEST_MODELS[0][\"val_at_2TeV\"] * 2,\n integral_1_10TeV=TEST_MODELS[0][\"integral_1_10TeV\"] * 2,\n eflux_1_10TeV=TEST_MODELS[0][\"eflux_1_10TeV\"] * 2,\n )\n)\n\nTEST_MODELS.append(\n dict(\n name=\"compound6\",\n model=TEST_MODELS[11][\"model\"] + u.Quantity(4, \"cm-2 s-1 TeV-1\"),\n val_at_2TeV=TEST_MODELS[11][\"val_at_2TeV\"] * 2,\n integral_1_10TeV=TEST_MODELS[11][\"integral_1_10TeV\"] * 2,\n eflux_1_10TeV=TEST_MODELS[11][\"eflux_1_10TeV\"] * 2,\n )\n)\n\nTEST_MODELS.append(\n dict(\n name=\"table_model\",\n model=table_model(),\n # Values took from power law expectation\n val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), \"cm-2 s-1 TeV-1\"),\n integral_1_10TeV=u.Quantity(2.9227116204223784, \"cm-2 s-1\"),\n eflux_1_10TeV=u.Quantity(6.650836884969039, \"TeV cm-2 s-1\"),\n )\n)\n\n\n@requires_dependency(\"scipy\")\[email protected](\"spectrum\", TEST_MODELS, ids=lambda _: _[\"name\"])\ndef test_models(spectrum):\n model = spectrum[\"model\"]\n for p in model.parameters:\n assert p._type == \"spectral\"\n energy = 2 * u.TeV\n value = model(energy)\n energies = [2, 3] * u.TeV\n values = model(energies)\n assert_quantity_allclose(value, spectrum[\"val_at_2TeV\"], rtol=1e-7)\n if \"val_at_3TeV\" in spectrum:\n energy = 3 * u.TeV\n value = model(energy)\n assert_quantity_allclose(value, spectrum[\"val_at_3TeV\"], rtol=1e-7)\n\n energy_min = 1 * u.TeV\n energy_max = 10 * u.TeV\n assert_quantity_allclose(\n model.integral(energy_min=energy_min, energy_max=energy_max),\n spectrum[\"integral_1_10TeV\"],\n rtol=1e-5,\n )\n assert_quantity_allclose(\n model.energy_flux(energy_min=energy_min, energy_max=energy_max),\n spectrum[\"eflux_1_10TeV\"],\n rtol=1e-5,\n )\n\n if \"e_peak\" in spectrum:\n assert_quantity_allclose(model.e_peak, spectrum[\"e_peak\"], rtol=1e-2)\n\n # inverse for ConstantSpectralModel is irrelevant.\n # inverse for Gaussian and PiecewiseNormSpectralModel have a degeneracy\n if not (\n isinstance(model, ConstantSpectralModel)\n or spectrum[\"name\"] == \"compound6\"\n or spectrum[\"name\"] == \"GaussianSpectralModel\"\n or spectrum[\"name\"] == \"pbpl\"\n ):\n assert_quantity_allclose(model.inverse(value), energy, rtol=0.01)\n inverse = model.inverse_all(values)\n for ke, ener in enumerate(energies):\n assert_quantity_allclose(inverse[ke], energies[ke], rtol=0.01)\n\n if \"integral_infinity\" in spectrum:\n energy_min = 0 * u.TeV\n energy_max = 10000 * u.TeV\n assert_quantity_allclose(\n model.integral(energy_min=energy_min, energy_max=energy_max),\n spectrum[\"integral_infinity\"],\n )\n\n model.to_dict()\n\n assert \"\" in str(model)\n\n # check that an array evaluation works (otherwise e.g. plotting raises an error)\n e_array = [2, 10, 20] * u.TeV\n e_array = e_array[:, np.newaxis, np.newaxis]\n val = model(e_array)\n assert val.shape == e_array.shape\n assert_quantity_allclose(val[0], spectrum[\"val_at_2TeV\"])\n\n\ndef test_model_unit():\n pwl = PowerLawSpectralModel()\n value = pwl(500 * u.MeV)\n assert value.unit == \"cm-2 s-1 TeV-1\"\n\n\n@requires_dependency(\"matplotlib\")\ndef test_model_plot():\n pwl = PowerLawSpectralModel(\n amplitude=1e-12 * u.Unit(\"TeV-1 cm-2 s-1\"), reference=1 * u.Unit(\"TeV\"), index=2\n )\n pwl.amplitude.error = 0.1e-12 * u.Unit(\"TeV-1 cm-2 s-1\")\n\n with mpl_plot_check():\n pwl.plot((1 * u.TeV, 10 * u.TeV))\n\n with mpl_plot_check():\n pwl.plot_error((1 * u.TeV, 10 * u.TeV))\n\n\n@requires_dependency(\"matplotlib\")\ndef test_model_plot_sed_type():\n pwl = PowerLawSpectralModel(\n amplitude=1e-12 * u.Unit(\"TeV-1 cm-2 s-1\"), reference=1 * u.Unit(\"TeV\"), index=2\n )\n pwl.amplitude.error = 0.1e-12 * u.Unit(\"TeV-1 cm-2 s-1\")\n\n with mpl_plot_check():\n ax1 = pwl.plot((1 * u.TeV, 100 * u.TeV), sed_type=\"dnde\")\n ax2 = pwl.plot_error((1 * u.TeV, 100 * u.TeV), sed_type=\"dnde\")\n assert ax1.axes.axes.get_ylabel() == \"dnde [1 / (cm2 s TeV)]\"\n assert ax2.axes.axes.get_ylabel() == \"dnde [1 / (cm2 s TeV)]\"\n\n with mpl_plot_check():\n ax1 = pwl.plot((1 * u.TeV, 100 * u.TeV), sed_type=\"e2dnde\")\n ax2 = pwl.plot_error((1 * u.TeV, 100 * u.TeV), sed_type=\"e2dnde\")\n assert ax1.axes.axes.get_ylabel() == \"e2dnde [erg / (cm2 s)]\"\n assert ax2.axes.axes.get_ylabel() == \"e2dnde [erg / (cm2 s)]\"\n\n with mpl_plot_check():\n ax1 = pwl.plot((1 * u.TeV, 100 * u.TeV), sed_type=\"flux\")\n ax2 = pwl.plot_error((1 * u.TeV, 100 * u.TeV), sed_type=\"flux\")\n assert ax1.axes.axes.get_ylabel() == \"flux [1 / (cm2 s)]\"\n assert ax2.axes.axes.get_ylabel() == \"flux [1 / (cm2 s)]\"\n\n with mpl_plot_check():\n ax1 = pwl.plot((1 * u.TeV, 100 * u.TeV), sed_type=\"eflux\")\n ax2 = pwl.plot_error((1 * u.TeV, 100 * u.TeV), sed_type=\"eflux\")\n assert ax1.axes.axes.get_ylabel() == \"eflux [erg / (cm2 s)]\"\n assert ax2.axes.axes.get_ylabel() == \"eflux [erg / (cm2 s)]\"\n\n\ndef test_to_from_dict():\n spectrum = TEST_MODELS[0]\n model = spectrum[\"model\"]\n\n model_dict = model.to_dict()\n # Here we reverse the order of parameters list to ensure assignment is correct\n model_dict[\"spectral\"][\"parameters\"].reverse()\n\n model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict[\"spectral\"][\"type\"])\n new_model = model_class.from_dict(model_dict)\n\n assert isinstance(new_model, PowerLawSpectralModel)\n\n actual = [par.value for par in new_model.parameters]\n desired = [par.value for par in model.parameters]\n assert_quantity_allclose(actual, desired)\n\n actual = [par.frozen for par in new_model.parameters]\n desired = [par.frozen for par in model.parameters]\n assert_allclose(actual, desired)\n\n new_model = Model.from_dict(model_dict)\n\n assert isinstance(new_model, PowerLawSpectralModel)\n\n actual = [par.value for par in new_model.parameters]\n desired = [par.value for par in model.parameters]\n assert_quantity_allclose(actual, desired)\n\n actual = [par.frozen for par in new_model.parameters]\n desired = [par.frozen for par in model.parameters]\n assert_allclose(actual, desired)\n\n\ndef test_to_from_dict_partial_input(caplog):\n spectrum = TEST_MODELS[0]\n model = spectrum[\"model\"]\n\n model_dict = model.to_dict()\n # Here we remove the reference energy\n model_dict[\"spectral\"][\"parameters\"].remove(model_dict[\"spectral\"][\"parameters\"][2])\n\n model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict[\"spectral\"][\"type\"])\n new_model = model_class.from_dict(model_dict)\n\n assert isinstance(new_model, PowerLawSpectralModel)\n\n actual = [par.value for par in new_model.parameters]\n desired = [par.value for par in model.parameters]\n assert_quantity_allclose(actual, desired)\n\n actual = [par.frozen for par in new_model.parameters]\n desired = [par.frozen for par in model.parameters]\n assert_allclose(actual, desired)\n assert \"WARNING\" in [_.levelname for _ in caplog.records]\n assert \"Parameter reference not defined. Using default value: 1.0 TeV\" in [\n _.message for _ in caplog.records\n ]\n\n\ndef test_to_from_dict_compound():\n spectrum = TEST_MODELS[-2]\n model = spectrum[\"model\"]\n assert spectrum[\"name\"] == \"compound6\"\n model_dict = model.to_dict()\n assert model_dict[\"spectral\"][\"operator\"] == \"add\"\n model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict[\"spectral\"][\"type\"])\n new_model = model_class.from_dict(model_dict)\n\n assert isinstance(new_model, CompoundSpectralModel)\n\n actual = [par.value for par in new_model.parameters]\n desired = [par.value for par in model.parameters]\n assert_quantity_allclose(actual, desired)\n\n\n@requires_dependency(\"matplotlib\")\n@requires_data()\ndef test_table_model_from_file():\n filename = \"$GAMMAPY_DATA/ebl/ebl_franceschini.fits.gz\"\n absorption_z03 = TemplateSpectralModel.read_xspec_model(\n filename=filename, param=0.3\n )\n value = absorption_z03(1 * u.TeV)\n assert_allclose(value, 1)\n\n\n@requires_data()\ndef test_absorption():\n # absorption values for given redshift\n redshift = 0.117\n absorption = EBLAbsorptionNormSpectralModel.read_builtin(\n \"dominguez\", redshift=redshift\n )\n\n # Spectral model corresponding to PKS 2155-304 (quiescent state)\n index = 3.53\n amplitude = 1.81 * 1e-12 * u.Unit(\"cm-2 s-1 TeV-1\")\n reference = 1 * u.TeV\n pwl = PowerLawSpectralModel(index=index, amplitude=amplitude, reference=reference)\n\n # EBL + PWL model\n model = pwl * absorption\n desired = u.Quantity(5.140765e-13, \"TeV-1 s-1 cm-2\")\n assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)\n assert model.model2.alpha_norm.value == 1.0\n\n # EBL + PWL model: test if norm of EBL=0: it mean model =pwl\n model.parameters[\"alpha_norm\"].value = 0\n assert_quantity_allclose(model(1 * u.TeV), pwl(1 * u.TeV), rtol=1e-3)\n\n # EBL + PWL model: Test with a norm different of 1\n absorption = EBLAbsorptionNormSpectralModel.read_builtin(\n \"dominguez\", redshift=redshift, alpha_norm=1.5\n )\n model = pwl * absorption\n desired = u.Quantity(2.739695e-13, \"TeV-1 s-1 cm-2\")\n assert model.model2.alpha_norm.value == 1.5\n assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)\n\n # Test error propagation\n model.model1.amplitude.error = 0.1 * model.model1.amplitude.value\n dnde, dnde_err = model.evaluate_error(1 * u.TeV)\n assert_allclose(dnde_err / dnde, 0.1)\n\n\n@requires_data()\ndef test_absorbed_extrapolate():\n ebl_model = \"dominguez\"\n z = 0.0001\n alpha_norm = 1\n absorption = EBLAbsorptionNormSpectralModel.read_builtin(ebl_model)\n\n values = absorption.evaluate(1 * u.TeV, z, alpha_norm)\n assert_allclose(values, 1)\n\n\ndef test_ecpl_integrate():\n # regression test to check the numerical integration for small energy bins\n ecpl = ExpCutoffPowerLawSpectralModel()\n value = ecpl.integral(1 * u.TeV, 1.1 * u.TeV)\n assert value.isscalar\n assert_quantity_allclose(value, 8.380714e-14 * u.Unit(\"s-1 cm-2\"))\n\n\ndef test_pwl_pivot_energy():\n pwl = PowerLawSpectralModel(amplitude=\"5.35510540e-11 cm-2 s-1 TeV-1\")\n\n pwl.covariance = [\n [0.0318377 ** 2, 6.56889442e-14, 0],\n [6.56889442e-14, 0, 0],\n [0, 0, 0],\n ]\n\n assert_quantity_allclose(pwl.pivot_energy, 3.3540034240210987 * u.TeV)\n\n\ndef test_TemplateSpectralModel_evaluate_tiny():\n energy = np.array([1.00000000e06, 1.25892541e06, 1.58489319e06, 1.99526231e06])\n values = np.array([4.39150790e-38, 1.96639562e-38, 8.80497507e-39, 3.94262401e-39])\n\n model = TemplateSpectralModel(\n energy=energy, values=values * u.Unit(\"MeV-1 s-1 sr-1\")\n )\n result = model.evaluate(energy)\n tiny = np.finfo(np.float32).tiny\n mask = abs(values) - tiny > tiny\n np.testing.assert_allclose(\n values[mask] / values.max(), result[mask].value / values.max()\n )\n mask = abs(result.value) - tiny <= tiny\n assert np.all(result[mask] == 0.0)\n\n\ndef test_TemplateSpectralModel_compound():\n energy = [1.00e06, 1.25e06, 1.58e06, 1.99e06] * u.MeV\n values = [4.39e-7, 1.96e-7, 8.80e-7, 3.94e-7] * u.Unit(\"MeV-1 s-1 sr-1\")\n\n template = TemplateSpectralModel(energy=energy, values=values)\n correction = PowerLawNormSpectralModel(norm=2)\n model = CompoundSpectralModel(template, correction, operator=operator.mul)\n assert np.allclose(model(energy), 2 * values)\n\n model_mul = template * correction\n assert isinstance(model_mul, CompoundSpectralModel)\n assert np.allclose(model_mul(energy), 2 * values)\n\n model_dict = model.to_dict()\n assert model_dict[\"spectral\"][\"operator\"] == \"mul\"\n model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict[\"spectral\"][\"type\"])\n new_model = model_class.from_dict(model_dict)\n assert isinstance(new_model, CompoundSpectralModel)\n assert np.allclose(new_model(energy), 2 * values)\n\n\n@requires_dependency(\"naima\")\nclass TestNaimaModel:\n # Used to test model value at 2 TeV\n energy = 2 * u.TeV\n\n # Used to test model integral and energy flux\n energy_min = 1 * u.TeV\n energy_max = 10 * u.TeV\n\n # Used to that if array evaluation works\n e_array = [2, 10, 20] * u.TeV\n e_array = e_array[:, np.newaxis, np.newaxis]\n\n def test_pion_decay(self):\n import naima\n\n particle_distribution = naima.models.PowerLaw(\n amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=2.5\n )\n radiative_model = naima.radiative.PionDecay(\n particle_distribution, nh=1 * u.cm ** -3\n )\n model = NaimaSpectralModel(radiative_model)\n for p in model.parameters:\n assert p._type == \"spectral\"\n\n val_at_2TeV = 9.725347355450884e-14 * u.Unit(\"cm-2 s-1 TeV-1\")\n integral_1_10TeV = 3.530537143620737e-13 * u.Unit(\"cm-2 s-1\")\n eflux_1_10TeV = 7.643559573105779e-13 * u.Unit(\"TeV cm-2 s-1\")\n\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV)\n assert_quantity_allclose(\n model.integral(energy_min=self.energy_min, energy_max=self.energy_max),\n integral_1_10TeV,\n )\n assert_quantity_allclose(\n model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),\n eflux_1_10TeV,\n )\n val = model(self.e_array)\n assert val.shape == self.e_array.shape\n\n model.amplitude.error = 0.1 * model.amplitude.value\n\n out = model.evaluate_error(1 * u.TeV)\n assert_allclose(out.data, [5.266068e-13, 5.266068e-14], rtol=1e-3)\n\n def test_ic(self):\n import naima\n\n particle_distribution = naima.models.ExponentialCutoffBrokenPowerLaw(\n amplitude=2e33 / u.eV,\n e_0=10 * u.TeV,\n alpha_1=2.5,\n alpha_2=2.7,\n e_break=900 * u.GeV,\n e_cutoff=10 * u.TeV,\n )\n radiative_model = naima.radiative.InverseCompton(\n particle_distribution, seed_photon_fields=[\"CMB\"]\n )\n\n model = NaimaSpectralModel(radiative_model)\n for p in model.parameters:\n assert p._type == \"spectral\"\n\n val_at_2TeV = 4.347836316893546e-12 * u.Unit(\"cm-2 s-1 TeV-1\")\n integral_1_10TeV = 1.595813e-11 * u.Unit(\"cm-2 s-1\")\n eflux_1_10TeV = 2.851283e-11 * u.Unit(\"TeV cm-2 s-1\")\n\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV)\n assert_quantity_allclose(\n model.integral(energy_min=self.energy_min, energy_max=self.energy_max),\n integral_1_10TeV,\n rtol=1e-5,\n )\n assert_quantity_allclose(\n model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),\n eflux_1_10TeV,\n rtol=1e-5,\n )\n val = model(self.e_array)\n assert val.shape == self.e_array.shape\n\n def test_synchrotron(self):\n import naima\n\n particle_distribution = naima.models.LogParabola(\n amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=1.3, beta=0.5\n )\n radiative_model = naima.radiative.Synchrotron(particle_distribution, B=2 * u.G)\n\n model = NaimaSpectralModel(radiative_model)\n for p in model.parameters:\n assert p._type == \"spectral\"\n\n val_at_2TeV = 1.0565840392550432e-24 * u.Unit(\"cm-2 s-1 TeV-1\")\n integral_1_10TeV = 4.449186e-13 * u.Unit(\"cm-2 s-1\")\n eflux_1_10TeV = 4.594121e-13 * u.Unit(\"TeV cm-2 s-1\")\n\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV)\n assert_quantity_allclose(\n model.integral(energy_min=self.energy_min, energy_max=self.energy_max),\n integral_1_10TeV,\n rtol=1e-5,\n )\n assert_quantity_allclose(\n model.energy_flux(energy_min=self.energy_min, energy_max=self.energy_max),\n eflux_1_10TeV,\n rtol=1e-5,\n )\n val = model(self.e_array)\n assert val.shape == self.e_array.shape\n\n model.B.value = 3 # update B\n val_at_2TeV = 5.1985064062296e-16 * u.Unit(\"cm-2 s-1 TeV-1\")\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV)\n\n def test_ssc(self):\n import naima\n\n ECBPL = naima.models.ExponentialCutoffBrokenPowerLaw(\n amplitude=3.699e36 / u.eV,\n e_0=1 * u.TeV,\n e_break=0.265 * u.TeV,\n alpha_1=1.5,\n alpha_2=3.233,\n e_cutoff=1863 * u.TeV,\n beta=2.0,\n )\n\n radiative_model = naima.radiative.InverseCompton(\n ECBPL,\n seed_photon_fields=[\n \"CMB\",\n [\"FIR\", 70 * u.K, 0.5 * u.eV / u.cm ** 3],\n [\"NIR\", 5000 * u.K, 1 * u.eV / u.cm ** 3],\n ],\n Eemax=50 * u.PeV,\n Eemin=0.1 * u.GeV,\n )\n B = 125 * u.uG\n radius = 2.1 * u.pc\n nested_models = {\"SSC\": {\"B\": B, \"radius\": radius}}\n model = NaimaSpectralModel(radiative_model, nested_models=nested_models)\n assert_quantity_allclose(model.B.quantity, B)\n assert_quantity_allclose(model.radius.quantity, radius)\n val_at_2TeV = 1.6703761561806372e-11 * u.Unit(\"cm-2 s-1 TeV-1\")\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV, rtol=1e-5)\n\n model.parameters[\"B\"].value = 100\n val_at_2TeV = 1.441331153167876e-11 * u.Unit(\"cm-2 s-1 TeV-1\")\n value = model(self.energy)\n assert_quantity_allclose(value, val_at_2TeV, rtol=1e-5)\n\n def test_bad_init(self):\n import naima\n\n particle_distribution = naima.models.PowerLaw(\n amplitude=2e33 / u.eV, e_0=10 * u.TeV, alpha=2.5\n )\n radiative_model = naima.radiative.PionDecay(\n particle_distribution, nh=1 * u.cm ** -3\n )\n model = NaimaSpectralModel(radiative_model)\n\n with pytest.raises(NotImplementedError):\n NaimaSpectralModel.from_dict(model.to_dict())\n with pytest.raises(NotImplementedError):\n NaimaSpectralModel.from_parameters(model.parameters)\n\n\nclass TestSpectralModelErrorPropagation:\n \"\"\"Test spectral model error propagation.\n\n https://github.com/gammapy/gammapy/blob/master/docs/development/pigs/pig-014.rst#proposal\n https://nbviewer.jupyter.org/github/gammapy/gammapy-extra/blob/master/experiments/uncertainty_estimation_prototype.ipynb\n \"\"\"\n\n def setup(self):\n self.model = LogParabolaSpectralModel(\n amplitude=3.76e-11 * u.Unit(\"cm-2 s-1 TeV-1\"),\n reference=1 * u.TeV,\n alpha=2.44,\n beta=0.25,\n )\n self.model.covariance = [\n [1.31e-23, 0, -6.80e-14, 3.04e-13],\n [0, 0, 0, 0],\n [-6.80e-14, 0, 0.00899, 0.00904],\n [3.04e-13, 0, 0.00904, 0.0284],\n ]\n\n def test_evaluate_error_scalar(self):\n # evaluate_error on scalar\n out = self.model.evaluate_error(1 * u.TeV)\n assert isinstance(out, u.Quantity)\n assert out.unit == \"cm-2 s-1 TeV-1\"\n assert out.shape == (2,)\n assert_allclose(out.data, [3.7600e-11, 3.6193e-12], rtol=1e-3)\n\n def test_evaluate_error_array(self):\n out = self.model.evaluate_error([1, 100] * u.TeV)\n assert out.shape == (2, 2)\n expected = [[3.76e-11, 2.469e-18], [3.619e-12, 9.375e-18]]\n assert_allclose(out.data, expected, rtol=1e-3)\n\n def test_evaluate_error_unit(self):\n out = self.model.evaluate_error(1e6 * u.MeV)\n assert out.unit == \"cm-2 s-1 TeV-1\"\n assert_allclose(out.data, [3.760e-11, 3.6193e-12], rtol=1e-3)\n\n def test_integral_error(self):\n out = self.model.integral_error(1 * u.TeV, 10 * u.TeV)\n assert out.unit == \"cm-2 s-1\"\n assert out.shape == (2,)\n assert_allclose(out.data, [2.197e-11, 2.796e-12], rtol=1e-3)\n\n def test_energy_flux_error(self):\n out = self.model.energy_flux_error(1 * u.TeV, 10 * u.TeV)\n assert out.unit == \"TeV cm-2 s-1\"\n assert out.shape == (2,)\n assert_allclose(out.data, [4.119e-11, 8.157e-12], rtol=1e-3)\n\n\ndef test_dnde_error_ecpl_model():\n # Regression test for ECPL model\n # https://github.com/gammapy/gammapy/issues/2007\n model = ExpCutoffPowerLawSpectralModel(\n amplitude=2.076183759227292e-12 * u.Unit(\"cm-2 s-1 TeV-1\"),\n index=1.8763343736076483,\n lambda_=0.08703226432146616 * u.Unit(\"TeV-1\"),\n reference=1 * u.TeV,\n )\n model.covariance = [\n [0.00204191498, -1.507724e-14, 0.0, -0.001834819, 0.0],\n [-1.507724e-14, 1.6864740e-25, 0.0, 1.854251e-14, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [-0.001834819175, 1.8542517e-14, 0.0, 0.0032559101, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n\n out = model.evaluate_error(1 * u.TeV)\n assert_allclose(out.data, [1.903129e-12, 2.979976e-13], rtol=1e-3)\n\n out = model.evaluate_error(0.1 * u.TeV)\n assert_allclose(out.data, [1.548176e-10, 1.933612e-11], rtol=1e-3)\n\n\ndef test_integral_error_power_law():\n energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)\n energy_min = energy[:-1]\n energy_max = energy[1:]\n\n powerlaw = PowerLawSpectralModel()\n powerlaw.parameters[\"index\"].error = 0.4\n powerlaw.parameters[\"amplitude\"].error = 1e-13\n\n flux, flux_error = powerlaw.integral_error(energy_min, energy_max)\n\n assert_allclose(flux.value[0] / 1e-13, 5.0, rtol=1e-3)\n assert_allclose(flux_error.value[0] / 1e-14, 7.915984, rtol=1e-3)\n\n\ndef test_integral_error_exp_cut_off_power_law():\n energy = np.linspace(1 * u.TeV, 10 * u.TeV, 10)\n energy_min = energy[:-1]\n energy_max = energy[1:]\n\n exppowerlaw = ExpCutoffPowerLawSpectralModel()\n exppowerlaw.parameters[\"index\"].error = 0.4\n exppowerlaw.parameters[\"amplitude\"].error = 1e-13\n exppowerlaw.parameters[\"lambda_\"].error = 0.03\n\n flux, flux_error = exppowerlaw.integral_error(energy_min, energy_max)\n\n assert_allclose(flux.value[0] / 1e-13, 5.05855622, rtol=0.01)\n assert_allclose(flux_error.value[0] / 1e-14, 8.552617, rtol=0.01)\n\n\ndef test_energy_flux_error_power_law():\n energy_min = 1 * u.TeV\n energy_max = 10 * u.TeV\n\n powerlaw = PowerLawSpectralModel()\n powerlaw.parameters[\"index\"].error = 0.4\n powerlaw.parameters[\"amplitude\"].error = 1e-13\n\n enrg_flux, enrg_flux_error = powerlaw.energy_flux_error(energy_min, energy_max)\n assert_allclose(enrg_flux.value / 1e-12, 2.303, rtol=0.001)\n assert_allclose(enrg_flux_error.value / 1e-12, 1.085, rtol=0.001)\n\n\ndef test_energy_flux_error_exp_cutoff_power_law():\n energy_min = 1 * u.TeV\n energy_max = 10 * u.TeV\n\n exppowerlaw = ExpCutoffPowerLawSpectralModel()\n exppowerlaw.parameters[\"index\"].error = 0.4\n exppowerlaw.parameters[\"amplitude\"].error = 1e-13\n exppowerlaw.parameters[\"lambda_\"].error = 0.03\n\n enrg_flux, enrg_flux_error = exppowerlaw.energy_flux_error(energy_min, energy_max)\n\n assert_allclose(enrg_flux.value / 1e-12, 2.788, rtol=0.001)\n assert_allclose(enrg_flux_error.value / 1e-12, 1.419, rtol=0.001)\n\n\ndef test_integral_exp_cut_off_power_law_large_number_of_bins():\n energy = np.geomspace(1, 10, 100) * u.TeV\n energy_min = energy[:-1]\n energy_max = energy[1:]\n\n exppowerlaw = ExpCutoffPowerLawSpectralModel(\n amplitude=\"1e-11 TeV-1 cm-2 s-1\", index=2\n )\n exppowerlaw.parameters[\"lambda_\"].value = 1e-3\n powerlaw = PowerLawSpectralModel(amplitude=\"1e-11 TeV-1 cm-2 s-1\", index=2)\n expected_flux = powerlaw.integral(energy_min, energy_max)\n\n flux = exppowerlaw.integral(energy_min, energy_max)\n\n assert_allclose(flux.value, expected_flux.value, rtol=0.01)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom gammapy.datasets import Datasets, SpectrumDatasetOnOff\nfrom gammapy.estimators.parameter import ParameterEstimator\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom gammapy.utils.testing import requires_data\n\npytest.importorskip(\"iminuit\")\n\n\[email protected]\ndef crab_datasets_1d():\n filename = \"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs23523.fits\"\n dataset = SpectrumDatasetOnOff.read(filename)\n datasets = Datasets([dataset])\n return datasets\n\n\[email protected]\ndef pwl_model():\n return PowerLawSpectralModel(amplitude=\"3e-11 cm-2s-1TeV-1\", index=2.7)\n\n\[email protected]\ndef crab_datasets_fermi():\n filename = \"$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_datasets.yaml\"\n filename_models = \"$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_models.yaml\"\n\n return Datasets.read(filename=filename, filename_models=filename_models)\n\n\n@requires_data()\ndef test_parameter_estimator_1d(crab_datasets_1d, pwl_model):\n datasets = crab_datasets_1d\n\n model = SkyModel(spectral_model=pwl_model, name=\"Crab\")\n model.spectral_model.amplitude.scan_n_values = 10\n\n for dataset in datasets:\n dataset.models = model\n\n estimator = ParameterEstimator(selection_optional=\"all\")\n\n result = estimator.run(datasets, parameter=\"amplitude\")\n\n assert_allclose(result[\"amplitude\"], 5.1428e-11, rtol=1e-3)\n assert_allclose(result[\"amplitude_err\"], 6.42467e-12, rtol=1e-3)\n assert_allclose(result[\"ts\"], 353.2092, rtol=1e-3)\n assert_allclose(result[\"amplitude_errp\"], 6.703e-12, rtol=5e-3)\n assert_allclose(result[\"amplitude_errn\"], 6.152e-12, rtol=5e-3)\n\n # Add test for scan\n assert_allclose(result[\"amplitude_scan\"].shape, 10)\n\n\n@requires_data()\ndef test_parameter_estimator_3d_no_reoptimization(crab_datasets_fermi):\n datasets = crab_datasets_fermi\n parameter = datasets[0].models.parameters[\"amplitude\"]\n parameter.scan_n_values = 10\n\n estimator = ParameterEstimator(reoptimize=False, selection_optional=[\"scan\"])\n alpha_value = datasets[0].models.parameters[\"alpha\"].value\n\n result = estimator.run(datasets, parameter)\n\n assert not datasets[0].models.parameters[\"alpha\"].frozen\n assert_allclose(datasets[0].models.parameters[\"alpha\"].value, alpha_value)\n assert_allclose(result[\"amplitude\"], 0.018251, rtol=1e-3)\n assert_allclose(result[\"amplitude_scan\"].shape, 10)\n assert_allclose(result[\"amplitude_scan\"][0], 0.017282, atol=1e-3)\n\n\n@requires_data()\ndef test_parameter_estimator_no_data(crab_datasets_1d, pwl_model):\n datasets = crab_datasets_1d\n\n model = SkyModel(spectral_model=pwl_model, name=\"Crab\")\n model.spectral_model.amplitude.scan_n_values = 10\n\n for dataset in datasets:\n dataset.mask_safe.data[...] = False\n dataset.models = model\n\n estimator = ParameterEstimator(selection_optional=\"all\")\n\n result = estimator.run(datasets, parameter=\"amplitude\")\n\n assert np.isnan(result[\"amplitude\"])\n assert np.isnan(result[\"amplitude_err\"])\n assert np.isnan(result[\"amplitude_errp\"])\n assert np.isnan(result[\"amplitude_errn\"])\n assert np.isnan(result[\"amplitude_ul\"])\n assert np.isnan(result[\"ts\"])\n assert np.isnan(result[\"npred\"])\n assert_allclose(result[\"counts\"], 0)\n\n # Add test for scan\n assert_allclose(result[\"amplitude_scan\"].shape, 10)\n assert np.all(np.isnan(result[\"stat_scan\"]))\n", "\"\"\"Plot PWN evolution with time.\"\"\"\nimport numpy as np\nfrom astropy.constants import M_sun\nfrom astropy.units import Quantity\nimport matplotlib.pyplot as plt\nfrom gammapy.astro.source import PWN, SNRTrueloveMcKee\n\nt = Quantity(np.logspace(1, 5, 100), \"yr\")\nn_ISM = Quantity(1, \"cm^-3\")\nsnr = SNRTrueloveMcKee(m_ejecta=8 * M_sun, n_ISM=n_ISM)\npwn = PWN(snr=snr)\npwn.pulsar.L_0 = Quantity(1e40, \"erg/s\")\n\nplt.plot(t.value, pwn.radius(t).to(\"pc\").value, label=\"Radius PWN\")\nplt.plot(t.value, snr.radius_reverse_shock(t).to(\"pc\").value, label=\"Reverse Shock SNR\")\nplt.plot(t.value, snr.radius(t).to(\"pc\").value, label=\"Radius SNR\")\n\nplt.xlabel(\"time [years]\")\nplt.ylabel(\"radius [pc]\")\nplt.legend(loc=4)\nplt.loglog()\nplt.show()\n", "r\"\"\"\n.. _expdecay-temporal-model:\n\nExpDecay temporal model\n=======================\n\nThis model parametrises an ExpDecay time model.\n\n.. math::\n F(t) = exp(t - t_{ref})/t0\n\n\n\"\"\"\n\n# %%\n# Example plot\n# ------------\n# Here is an example plot of the model:\n\nfrom astropy import units as u\nfrom astropy.time import Time\nimport matplotlib.pyplot as plt\nfrom gammapy.modeling.models import (\n ExpDecayTemporalModel,\n Models,\n PowerLawSpectralModel,\n SkyModel,\n)\n\nt0 = \"5 h\"\nt_ref = Time(\"2020-10-01\")\ntime_range = [t_ref, t_ref + 1 * u.d]\nexpdecay_model = ExpDecayTemporalModel(t_ref=t_ref.mjd * u.d, t0=t0)\nexpdecay_model.plot(time_range)\nplt.grid(which=\"both\")\n\n# %%\n# YAML representation\n# -------------------\n# Here is an example YAML file using the model:\n\nmodel = SkyModel(\n spectral_model=PowerLawSpectralModel(),\n temporal_model=expdecay_model,\n name=\"expdecay_model\",\n)\nmodels = Models([model])\n\nprint(models.to_yaml())\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nimport scipy.special\nfrom astropy import units as u\nfrom astropy.coordinates import Angle, SkyCoord\nfrom astropy.visualization import quantity_support\nfrom gammapy.maps import MapAxes, MapAxis, RegionGeom\nfrom ..core import IRF\n\n__all__ = [\"EnergyDispersion2D\"]\n\n\nclass EnergyDispersion2D(IRF):\n \"\"\"Offset-dependent energy dispersion matrix.\n\n Data format specification: :ref:`gadf:edisp_2d`\n\n Parameters\n ----------\n energy_axis_true : `MapAxis`\n True energy axis\n migra_axis : `MapAxis`\n Energy migration axis\n offset_axis : `MapAxis`\n Field of view offset axis\n data : `~numpy.ndarray`\n Energy dispersion probability density\n\n Examples\n --------\n Read energy dispersion IRF from disk:\n\n >>> from gammapy.maps import MapAxis\n >>> from gammapy.irf import EnergyDispersion2D\n >>> filename = '$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz'\n >>> edisp2d = EnergyDispersion2D.read(filename, hdu=\"EDISP\")\n\n Create energy dispersion matrix (`~gammapy.irf.EnergyDispersion`)\n for a given field of view offset and energy binning:\n\n >>> energy = MapAxis.from_bounds(0.1, 20, nbin=60, unit=\"TeV\", interp=\"log\").edges\n >>> edisp = edisp2d.to_edisp_kernel(offset='1.2 deg', energy=energy, energy_true=energy)\n\n See Also\n --------\n EnergyDispersion\n \"\"\"\n\n tag = \"edisp_2d\"\n required_axes = [\"energy_true\", \"migra\", \"offset\"]\n\n def _mask_out_bounds(self, invalid):\n return (\n invalid[self.axes.index(\"energy_true\")] & invalid[self.axes.index(\"migra\")]\n ) | invalid[self.axes.index(\"offset\")]\n\n @classmethod\n def from_gauss(\n cls, energy_axis_true, migra_axis, offset_axis, bias, sigma, pdf_threshold=1e-6\n ):\n \"\"\"Create Gaussian energy dispersion matrix (`EnergyDispersion2D`).\n\n The output matrix will be Gaussian in (energy_true / energy).\n\n The ``bias`` and ``sigma`` should be either floats or arrays of same dimension than\n ``energy_true``. ``bias`` refers to the mean value of the ``migra``\n distribution minus one, i.e. ``bias=0`` means no bias.\n\n Note that, the output matrix is flat in offset.\n\n Parameters\n ----------\n energy_axis_true : `MapAxis`\n True energy axis\n migra_axis : `~astropy.units.Quantity`\n Migra axis\n offset_axis : `~astropy.units.Quantity`\n Bin edges of offset\n bias : float or `~numpy.ndarray`\n Center of Gaussian energy dispersion, bias\n sigma : float or `~numpy.ndarray`\n RMS width of Gaussian energy dispersion, resolution.\n pdf_threshold : float, optional\n Zero suppression threshold\n \"\"\"\n axes = MapAxes([energy_axis_true, migra_axis, offset_axis])\n coords = axes.get_coord(mode=\"edges\", axis_name=\"migra\")\n\n migra_min = coords[\"migra\"][:, :-1, :]\n migra_max = coords[\"migra\"][:, 1:, :]\n\n # Analytical formula for integral of Gaussian\n s = np.sqrt(2) * sigma\n t1 = (migra_max - 1 - bias) / s\n t2 = (migra_min - 1 - bias) / s\n pdf = (scipy.special.erf(t1) - scipy.special.erf(t2)) / 2\n pdf = pdf / (migra_max - migra_min)\n\n # no offset dependence\n data = pdf * np.ones(axes.shape)\n data[data < pdf_threshold] = 0\n\n return cls(\n axes=axes,\n data=data.value,\n )\n\n def to_edisp_kernel(self, offset, energy_true=None, energy=None):\n \"\"\"Detector response R(Delta E_reco, Delta E_true)\n\n Probability to reconstruct an energy in a given true energy band\n in a given reconstructed energy band\n\n Parameters\n ----------\n offset : `~astropy.coordinates.Angle`\n Offset\n energy_true : `~astropy.units.Quantity`, None\n True energy axis\n energy : `~astropy.units.Quantity`\n Reconstructed energy axis\n\n Returns\n -------\n edisp : `~gammapy.irf.EDispKernel`\n Energy dispersion matrix\n \"\"\"\n from gammapy.makers.utils import make_edisp_kernel_map\n\n offset = Angle(offset)\n\n # TODO: expect directly MapAxis here?\n if energy is None:\n energy_axis = self.axes[\"energy_true\"].copy(name=\"energy\")\n else:\n energy_axis = MapAxis.from_energy_edges(energy)\n\n if energy_true is None:\n energy_axis_true = self.axes[\"energy_true\"]\n else:\n energy_axis_true = MapAxis.from_energy_edges(\n energy_true,\n name=\"energy_true\",\n )\n\n pointing = SkyCoord(\"0d\", \"0d\")\n\n center = pointing.directional_offset_by(\n position_angle=0 * u.deg, separation=offset\n )\n geom = RegionGeom.create(region=center, axes=[energy_axis, energy_axis_true])\n\n edisp = make_edisp_kernel_map(geom=geom, edisp=self, pointing=pointing)\n return edisp.get_edisp_kernel()\n\n def normalize(self):\n \"\"\"Normalise energy dispersion\"\"\"\n super().normalize(axis_name=\"migra\")\n\n def plot_migration(self, ax=None, offset=None, energy_true=None, **kwargs):\n \"\"\"Plot energy dispersion for given offset and true energy.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n offset : `~astropy.coordinates.Angle`, optional\n Offset\n energy_true : `~astropy.units.Quantity`, optional\n True energy\n **kwargs : dict\n Keyword arguments forwarded to `~matplotlib.pyplot.plot`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n offset = Angle([1], \"deg\")\n else:\n offset = np.atleast_1d(Angle(offset))\n\n if energy_true is None:\n energy_true = u.Quantity([0.1, 1, 10], \"TeV\")\n else:\n energy_true = np.atleast_1d(u.Quantity(energy_true))\n\n migra = self.axes[\"migra\"]\n\n with quantity_support():\n for ener in energy_true:\n for off in offset:\n disp = self.evaluate(\n offset=off, energy_true=ener, migra=migra.center\n )\n label = f\"offset = {off:.1f}\\nenergy = {ener:.1f}\"\n ax.plot(migra.center, disp, label=label, **kwargs)\n\n migra.format_plot_xaxis(ax=ax)\n ax.set_ylabel(\"Probability density\")\n ax.legend(loc=\"upper left\")\n return ax\n\n def plot_bias(self, ax=None, offset=None, add_cbar=False, **kwargs):\n \"\"\"Plot migration as a function of true energy for a given offset.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n offset : `~astropy.coordinates.Angle`, optional\n Offset\n add_cbar : bool\n Add a colorbar to the plot.\n kwargs : dict\n Keyword arguments passed to `~matplotlib.pyplot.pcolormesh`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import PowerNorm\n\n kwargs.setdefault(\"cmap\", \"GnBu\")\n kwargs.setdefault(\"norm\", PowerNorm(gamma=0.5))\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n offset = Angle(1, \"deg\")\n\n energy_true = self.axes[\"energy_true\"]\n migra = self.axes[\"migra\"]\n\n z = self.evaluate(\n offset=offset,\n energy_true=energy_true.center.reshape(1, -1, 1),\n migra=migra.center.reshape(1, 1, -1),\n ).value[0]\n\n with quantity_support():\n caxes = ax.pcolormesh(energy_true.edges, migra.edges, z.T, **kwargs)\n\n energy_true.format_plot_xaxis(ax=ax)\n migra.format_plot_yaxis(ax=ax)\n\n if add_cbar:\n label = \"Probability density (A.U.)\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n def peek(self, figsize=(15, 5)):\n \"\"\"Quick-look summary plots.\n\n Parameters\n ----------\n figsize : (float, float)\n Size of the resulting plot\n \"\"\"\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)\n self.plot_bias(ax=axes[0])\n self.plot_migration(ax=axes[1])\n edisp = self.to_edisp_kernel(offset=\"1 deg\")\n edisp.plot_matrix(ax=axes[2])\n\n plt.tight_layout()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.close", "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_allclose" ], [ "numpy.ndenumerate", "numpy.ones", "numpy.concatenate", "numpy.ravel_multi_index", "numpy.ravel", "numpy.where" ], [ "numpy.meshgrid", "numpy.linspace", "numpy.arange", "numpy.cos", "numpy.ones", "numpy.mean", "numpy.testing.assert_allclose", "numpy.exp", "numpy.zeros" ], [ "numpy.errstate", "numpy.array" ], [ "numpy.linspace", "numpy.finfo", "numpy.all", "numpy.geomspace", "numpy.testing.assert_allclose", "numpy.array" ], [ "numpy.isnan", "numpy.testing.assert_allclose" ], [ "matplotlib.pyplot.legend", "numpy.logspace", "matplotlib.pyplot.loglog", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.grid" ], [ "matplotlib.pyplot.gca", "matplotlib.colors.PowerNorm", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.subplots", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zihang97/PAGAN
[ "9233fc54ecf49d6a82bb0794333d61f707439a68", "9233fc54ecf49d6a82bb0794333d61f707439a68" ]
[ "src/snlayers/snconv1d.py", "torch_stft/util.py" ]
[ "# coding=utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules import conv\nfrom torch.nn.modules.utils import _single\nfrom ..functions.max_sv import max_singular_value\n\nclass SNConv1d(conv._ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _single(kernel_size)\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n super(SNConv1d, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _single(0), groups, bias)\n self.register_buffer('u', torch.Tensor(1, out_channels).normal_())\n\n @property\n def W_(self):\n w_mat = self.weight.view(self.weight.size(0), -1)\n sigma, _u = max_singular_value(w_mat, self.u)\n self.u.copy_(_u)\n return self.weight / sigma\n\n def forward(self, input):\n return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)\n", "import numpy as np\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\n\ndef window_sumsquare(window, n_frames, hop_length=200, win_length=800,\n n_fft=800, dtype=np.float32, norm=None):\n \"\"\"\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n n_frames : int > 0\n The number of analysis frames\n hop_length : int > 0\n The number of samples to advance between frames\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n n_fft : int > 0\n The length of each analysis frame.\n dtype : np.dtype\n The data type of the output\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n \"\"\"\n if win_length is None:\n win_length = n_fft # 512\n\n n = n_fft + hop_length * (n_frames - 1) # n = 512 + 160 + 512-1\n x = np.zeros(n, dtype=dtype) # x.size = 1383\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm)**2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]\n return x" ]
[ [ "torch.nn.functional.conv1d", "torch.nn.modules.utils._single", "torch.Tensor" ], [ "numpy.zeros", "scipy.signal.get_window" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers
[ "619c5c0b17438d1014f7ca7e4ce13cc44c45de3c" ]
[ "src/classifiers.py" ]
[ "# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ classifiers.py ]\n# Synopsis [ 'Naive Bayes' and 'Decision Tree' training, testing, and tunning functions ]\n# Author [ Ting-Wei Liu (Andi611) ]\n# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nfrom sklearn import tree\n\n\n############\n# CONSTANT #\n############\nN_FOLD = 10\nDEPTHS = np.arange(1, 64)\nALPHAS = np.arange(0.001, 1.0, 0.001)\nALPHAS_MUSHROOM = np.arange(0.0001, 1.0, 0.0001)\nBEST_DISTRIBUTION = 'Multinominal'\n\n\n\n###############\n# NAIVE BAYES #\n###############\nclass naive_bayes_runner(object):\n\n\tdef __init__(self, MODEL, train_x, train_y, test_x, test_y):\n\t\t\n\t\t#---data---#\n\t\tself.train_x = train_x\n\t\tself.train_y = train_y\n\t\tself.test_x = test_x\n\t\tself.test_y = test_y\n\n\t\t#---model---#\n\t\tself.cross_validate = False\n\t\tself.MODEL = MODEL\n\n\t\tif self.MODEL == 'NEWS':\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.065),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.136),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.002) }\n\t\tif self.MODEL == 'MUSHROOM':\n\t\t\tALPHAS = ALPHAS_MUSHROOM\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.0001),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.0001),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.0001) }\n\t\tif self.MODEL == 'INCOME':\n\t\t\tself.cross_validate = True\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.959),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.16),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.001) }\n\n\n\tdef _fit_and_evaluate(self, model):\n\t\tmodel_fit = model.fit(self.train_x, self.train_y)\n\t\tpred_y = model_fit.predict(self.test_x)\n\t\tacc = metrics.accuracy_score(self.test_y, pred_y)\n\t\treturn acc, pred_y\n\t\n\n\tdef search_alpha(self):\n\t\ttry:\n\t\t\tfrom tqdm import tqdm\n\t\texcept:\n\t\t\traise ImportError('Failed to import tqdm, use the following command to install: pip3 install tqdm')\n\t\tfor distribution, model in self.models.items():\n\t\t\tbest_acc = 0.0\n\t\t\tbest_alpha = 0.001\n\t\t\tif distribution != 'Guassian': \n\t\t\t\tprint('>> [Naive Bayes Runner] Searching for best alpha value, distribution:', distribution)\n\t\t\t\tfor alpha in tqdm(ALPHAS):\n\t\t\t\t\tmodel.set_params(alpha=alpha)\n\t\t\t\t\tif self.cross_validate: \n\t\t\t\t\t\tscores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\t\t\tacc = scores.mean()\n\t\t\t\t\telse:\n\t\t\t\t\t\tacc, _ = self._fit_and_evaluate(model)\n\t\t\t\t\tif acc > best_acc:\n\t\t\t\t\t\tbest_acc = acc\n\t\t\t\t\t\tbest_alpha = alpha\n\t\t\t\tprint('>> [Naive Bayes Runner] '+ distribution + ' - Best Alpha Value:', best_alpha)\n\n\n\tdef run_best_all(self):\n\t\tfor distribution, model in self.models.items():\n\t\t\tif self.cross_validate: \n\t\t\t\tscores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\tacc = scores.mean()\n\t\t\telse:\n\t\t\t\tacc, _ = self._fit_and_evaluate(model)\n\t\t\tprint('>> [Naive Bayes Runner] '+ distribution + ' - Accuracy:', acc)\n\n\n\tdef run_best(self):\n\t\tif self.cross_validate: \n\t\t\tscores = cross_val_score(self.models[BEST_DISTRIBUTION], self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\tacc = scores.mean()\n\t\t\tmodel_fit = self.models[BEST_DISTRIBUTION].fit(self.train_x, self.train_y)\n\t\t\tpred_y = model_fit.predict(self.test_x)\n\t\telse:\n\t\t\tacc, pred_y = self._fit_and_evaluate(self.models[BEST_DISTRIBUTION])\n\t\tprint('>> [Naive Bayes Runner] '+ BEST_DISTRIBUTION + ' - Accuracy:', acc)\n\t\treturn pred_y\n\n\n#################\n# DECISION TREE #\n#################\nclass decision_tree_runner(object):\n\t\n\tdef __init__(self, MODEL, train_x, train_y, test_x, test_y):\n\t\t\n\t\t#---data---#\n\t\tself.train_x = train_x\n\t\tself.train_y = train_y\n\t\tself.test_x = test_x\n\t\tself.test_y = test_y\n\n\t\t#---model---#\n\t\tself.cross_validate = False\n\t\tself.MODEL = MODEL\n\n\t\tif self.MODEL == 'NEWS':\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='gini', \n\t\t\t\t\t\t\t\t\t\t\t\t\t splitter='random', \n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=47,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\t\telif self.MODEL == 'MUSHROOM':\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='gini', \n\t\t\t\t\t\t\t\t\t\t\t\t\t splitter='random', \n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=7,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\t\telif self.MODEL == 'INCOME':\n\t\t\tself.cross_validate = True\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='entropy', \n\t\t\t\t\t\t\t\t\t\t\t\t\t min_impurity_decrease=2e-4,\n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=15,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\n\n\tdef _fit_and_evaluate(self):\n\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\tpred_y = model_fit.predict(self.test_x)\n\t\tacc = metrics.accuracy_score(self.test_y, pred_y)\n\t\treturn acc, pred_y\n\n\n\tdef search_max_depth(self):\n\t\ttry:\n\t\t\tfrom tqdm import tqdm\n\t\texcept:\n\t\t\traise ImportError('Failed to import tqdm, use the following command to install: $ pip3 install tqdm')\n\t\tbest_acc = 0.0\n\t\tbest_depth = 1\n\t\t\n\t\tprint('>> [Naive Bayes Runner] Searching for best max depth value...')\n\t\tfor depth in tqdm(DEPTHS):\n\t\t\tself.model.set_params(max_depth=depth)\n\t\t\tif self.cross_validate: \n\t\t\t\tscores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\tacc = scores.mean()\n\t\t\telse:\n\t\t\t\tacc, _ = self._fit_and_evaluate()\n\t\t\tif acc > best_acc:\n\t\t\t\tbest_acc = acc\n\t\t\t\tbest_depth = depth\n\t\tprint('>> [Decision Tree Runner] - Best Dpeth Value:', best_depth)\n\n\n\tdef visualize(self):\n\t\ttry:\n\t\t\timport graphviz\n\t\texcept:\n\t\t\traise ImportError('Failed to import graphviz, use the following command to install: $ pip3 install graphviz, and $ sudo apt-get install graphviz')\n\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\tdot_data = tree.export_graphviz(model_fit, out_file=None, \n\t\t\t\t\t\t\t\t\t\tfilled=True, rounded=True, \n\t\t\t\t\t\t\t\t\t\tspecial_characters=True) \n\t\tgraph = graphviz.Source(dot_data)\n\t\tgraph.format = 'png'\n\t\tgraph.render('../image/TREE_' + self.MODEL)\n\t\tprint('>> [Decision Tree Runner] - Tree visualization complete.')\n\n\n\tdef run_best(self):\n\t\tif self.cross_validate: \n\t\t\tscores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\tacc = scores.mean()\n\t\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\t\tpred_y = model_fit.predict(self.test_x)\n\t\telse:\t\t\n\t\t\tacc, pred_y = self._fit_and_evaluate()\n\t\tprint('>> [Decision Tree Runner] - Accuracy:', acc)\n\t\treturn pred_y\n\n" ]
[ [ "sklearn.tree.export_graphviz", "sklearn.naive_bayes.ComplementNB", "sklearn.model_selection.cross_val_score", "sklearn.naive_bayes.GaussianNB", "numpy.arange", "sklearn.naive_bayes.MultinomialNB", "sklearn.tree.DecisionTreeClassifier", "sklearn.naive_bayes.BernoulliNB", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jphacks/C_2008
[ "65d7a1d3a90045b149397cdd1e038ab648bb842e" ]
[ "sound_factory/sound_factory.py" ]
[ "import os\nimport re\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\n\nIMAGE_SHAPE = [(224, 224), (240, 240), (260, 260), (300, 300), (380, 380), (456, 456), (528, 528), (600, 600)]\n\ndef main(paths : list, model_name : str):\n try:\n model = tf.keras.models.load_model(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name))\n except Exception:\n print('そのようなモデルはありません')\n exit()\n\n model_index = int(re.search('\\d', model_name).group(0))\n with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name, 'labels.txt'), mode='r', encoding='utf-8') as f1:\n labels = [s.strip() for s in f1.readlines()]\n \n with open('manga_sound_labels.csv', mode='w', encoding='utf-8') as f2:\n for path in paths:\n if os.path.isfile(path):\n try:\n img = np.expand_dims(img_to_array(load_img(path,target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)\n except Exception:\n continue\n pridict = labels[np.argmax(model.predict(img)[0])]\n f2.write(path + ',' + pridict + '\\n')\n else:\n for filename in os.listdir(path):\n try:\n img = np.expand_dims(img_to_array(load_img(os.path.join(path, filename),target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)\n except Exception:\n continue\n pridict = labels[np.argmax(model.predict(img)[0])]\n f2.write(os.path.join(path, filename) + ',' + pridict + '\\n')\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='コマの画像から背景音を予測します')\n parser.add_argument('path',nargs='*', help='解析するファイル名かディレクトリ名')\n parser.add_argument('--model', default=os.path.join('best','b0'), help='クラス分けに使用するモデル名')\n args = parser.parse_args()\n if 'manga_sound_labels.csv' in os.listdir(os.getcwd()):\n print('manga_sound_labels.csvがすでにあるので終了します')\n exit()\n main(args.path, args.model)" ]
[ [ "tensorflow.keras.preprocessing.image.load_img" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.3", "2.4", "2.5", "2.6" ] } ]
bhomaidan1990/reinforcement-learning-an-introduction
[ "fbf020d9da2daec3194a17f968ef29d12ebde6f6" ]
[ "chapter05/blackjack.py" ]
[ "#######################################################################\n# Copyright (C) #\n# 2016-2018 Shangtong Zhang([email protected]) #\n# 2016 Kenta Shimada([email protected]) #\n# 2017 Nicky van Foreest([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom tqdm import tqdm\n\n# actions: hit or stand\nACTION_HIT = 0\nACTION_STAND = 1 # \"strike\" in the book\nACTIONS = [ACTION_HIT, ACTION_STAND]\n\n# policy for player\nPOLICY_PLAYER = np.zeros(22, dtype=np.int)\nfor i in range(12, 20):\n POLICY_PLAYER[i] = ACTION_HIT\nPOLICY_PLAYER[20] = ACTION_STAND\nPOLICY_PLAYER[21] = ACTION_STAND\n\n# function form of target policy of player\ndef target_policy_player(usable_ace_player, player_sum, dealer_card):\n return POLICY_PLAYER[player_sum]\n\n# function form of behavior policy of player\ndef behavior_policy_player(usable_ace_player, player_sum, dealer_card):\n if np.random.binomial(1, 0.5) == 1:\n return ACTION_STAND\n return ACTION_HIT\n\n# policy for dealer\nPOLICY_DEALER = np.zeros(22)\nfor i in range(12, 17):\n POLICY_DEALER[i] = ACTION_HIT\nfor i in range(17, 22):\n POLICY_DEALER[i] = ACTION_STAND\n\n# get a new card\ndef get_card():\n card = np.random.randint(1, 14)\n card = min(card, 10)\n return card\n\n# get the value of a card (11 for ace).\ndef card_value(card_id):\n return 11 if card_id == 1 else card_id\n\n# play a game\n# @policy_player: specify policy for player\n# @initial_state: [whether player has a usable Ace, sum of player's cards, one card of dealer]\n# @initial_action: the initial action\ndef play(policy_player, initial_state=None, initial_action=None):\n # player status\n\n # sum of player\n player_sum = 0\n\n # trajectory of player\n player_trajectory = []\n\n # whether player uses Ace as 11\n usable_ace_player = False\n\n # dealer status\n dealer_card1 = 0\n dealer_card2 = 0\n usable_ace_dealer = False\n\n if initial_state is None:\n # generate a random initial state\n\n while player_sum < 12:\n # if sum of player is less than 12, always hit\n card = get_card()\n player_sum += card_value(card)\n\n # If the player's sum is larger than 21, he may hold one or two aces.\n if player_sum > 21:\n assert player_sum == 22\n # last card must be ace\n player_sum -= 10\n else:\n usable_ace_player |= (1 == card)\n\n # initialize cards of dealer, suppose dealer will show the first card he gets\n dealer_card1 = get_card()\n dealer_card2 = get_card()\n\n else:\n # use specified initial state\n usable_ace_player, player_sum, dealer_card1 = initial_state\n dealer_card2 = get_card()\n\n # initial state of the game\n state = [usable_ace_player, player_sum, dealer_card1]\n\n # initialize dealer's sum\n dealer_sum = card_value(dealer_card1) + card_value(dealer_card2)\n usable_ace_dealer = 1 in (dealer_card1, dealer_card2)\n # if the dealer's sum is larger than 21, he must hold two aces.\n if dealer_sum > 21:\n assert dealer_sum == 22\n # use one Ace as 1 rather than 11\n dealer_sum -= 10\n assert dealer_sum <= 21\n assert player_sum <= 21\n\n # game starts!\n\n # player's turn\n while True:\n if initial_action is not None:\n action = initial_action\n initial_action = None\n else:\n # get action based on current sum\n action = policy_player(usable_ace_player, player_sum, dealer_card1)\n\n # track player's trajectory for importance sampling\n player_trajectory.append([(usable_ace_player, player_sum, dealer_card1), action])\n\n if action == ACTION_STAND:\n break\n # if hit, get new card\n card = get_card()\n # Keep track of the ace count. the usable_ace_player flag is insufficient alone as it cannot\n # distinguish between having one ace or two.\n ace_count = int(usable_ace_player)\n if card == 1:\n ace_count += 1\n player_sum += card_value(card)\n # If the player has a usable ace, use it as 1 to avoid busting and continue.\n while player_sum > 21 and ace_count:\n player_sum -= 10\n ace_count -= 1\n # player busts\n if player_sum > 21:\n return state, -1, player_trajectory\n assert player_sum <= 21\n usable_ace_player = (ace_count == 1)\n\n # dealer's turn\n while True:\n # get action based on current sum\n action = POLICY_DEALER[dealer_sum]\n if action == ACTION_STAND:\n break\n # if hit, get a new card\n new_card = get_card()\n ace_count = int(usable_ace_dealer)\n if new_card == 1:\n ace_count += 1\n dealer_sum += card_value(new_card)\n # If the dealer has a usable ace, use it as 1 to avoid busting and continue.\n while dealer_sum > 21 and ace_count:\n dealer_sum -= 10\n ace_count -= 1\n # dealer busts\n if dealer_sum > 21:\n return state, 1, player_trajectory\n usable_ace_dealer = (ace_count == 1)\n\n # compare the sum between player and dealer\n assert player_sum <= 21 and dealer_sum <= 21\n if player_sum > dealer_sum:\n return state, 1, player_trajectory\n elif player_sum == dealer_sum:\n return state, 0, player_trajectory\n else:\n return state, -1, player_trajectory\n\n# Monte Carlo Sample with On-Policy\ndef monte_carlo_on_policy(episodes):\n states_usable_ace = np.zeros((10, 10))\n # initialze counts to 1 to avoid 0 being divided\n states_usable_ace_count = np.ones((10, 10))\n states_no_usable_ace = np.zeros((10, 10))\n # initialze counts to 1 to avoid 0 being divided\n states_no_usable_ace_count = np.ones((10, 10))\n for i in tqdm(range(0, episodes)):\n _, reward, player_trajectory = play(target_policy_player)\n for (usable_ace, player_sum, dealer_card), _ in player_trajectory:\n player_sum -= 12\n dealer_card -= 1\n if usable_ace:\n states_usable_ace_count[player_sum, dealer_card] += 1\n states_usable_ace[player_sum, dealer_card] += reward\n else:\n states_no_usable_ace_count[player_sum, dealer_card] += 1\n states_no_usable_ace[player_sum, dealer_card] += reward\n return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count\n\n# Monte Carlo with Exploring Starts\ndef monte_carlo_es(episodes):\n # (playerSum, dealerCard, usableAce, action)\n state_action_values = np.zeros((10, 10, 2, 2))\n # initialze counts to 1 to avoid division by 0\n state_action_pair_count = np.ones((10, 10, 2, 2))\n\n # behavior policy is greedy\n def behavior_policy(usable_ace, player_sum, dealer_card):\n usable_ace = int(usable_ace)\n player_sum -= 12\n dealer_card -= 1\n # get argmax of the average returns(s, a)\n values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \\\n state_action_pair_count[player_sum, dealer_card, usable_ace, :]\n return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])\n\n # play for several episodes\n for episode in tqdm(range(episodes)):\n # for each episode, use a randomly initialized state and action\n initial_state = [bool(np.random.choice([0, 1])),\n np.random.choice(range(12, 22)),\n np.random.choice(range(1, 11))]\n initial_action = np.random.choice(ACTIONS)\n current_policy = behavior_policy if episode else target_policy_player\n _, reward, trajectory = play(current_policy, initial_state, initial_action)\n first_visit_check = set()\n for (usable_ace, player_sum, dealer_card), action in trajectory:\n usable_ace = int(usable_ace)\n player_sum -= 12\n dealer_card -= 1\n state_action = (usable_ace, player_sum, dealer_card, action)\n if state_action in first_visit_check:\n continue\n first_visit_check.add(state_action)\n # update values of state-action pairs\n state_action_values[player_sum, dealer_card, usable_ace, action] += reward\n state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1\n\n return state_action_values / state_action_pair_count\n\n# Monte Carlo Sample with Off-Policy\ndef monte_carlo_off_policy(episodes):\n initial_state = [True, 13, 2]\n\n rhos = []\n returns = []\n\n for i in range(0, episodes):\n _, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)\n\n # get the importance ratio\n numerator = 1.0\n denominator = 1.0\n for (usable_ace, player_sum, dealer_card), action in player_trajectory:\n if action == target_policy_player(usable_ace, player_sum, dealer_card):\n denominator *= 0.5\n else:\n numerator = 0.0\n break\n rho = numerator / denominator\n rhos.append(rho)\n returns.append(reward)\n\n rhos = np.asarray(rhos)\n returns = np.asarray(returns)\n weighted_returns = rhos * returns\n\n weighted_returns = np.add.accumulate(weighted_returns)\n rhos = np.add.accumulate(rhos)\n\n ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)\n\n with np.errstate(divide='ignore',invalid='ignore'):\n weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)\n\n return ordinary_sampling, weighted_sampling\n\ndef figure_5_1():\n states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)\n states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)\n\n states = [states_usable_ace_1,\n states_usable_ace_2,\n states_no_usable_ace_1,\n states_no_usable_ace_2]\n\n titles = ['Usable Ace, 10000 Episodes',\n 'Usable Ace, 500000 Episodes',\n 'No Usable Ace, 10000 Episodes',\n 'No Usable Ace, 500000 Episodes']\n\n _, axes = plt.subplots(2, 2, figsize=(40, 30))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n axes = axes.flatten()\n\n for state, title, axis in zip(states, titles, axes):\n fig = sns.heatmap(np.flipud(state), cmap=\"YlGnBu\", ax=axis, xticklabels=range(1, 11),\n yticklabels=list(reversed(range(12, 22))))\n fig.set_ylabel('player sum', fontsize=30)\n fig.set_xlabel('dealer showing', fontsize=30)\n fig.set_title(title, fontsize=30)\n\n plt.savefig('../images/figure_5_1.png')\n plt.close()\n\ndef figure_5_2():\n state_action_values = monte_carlo_es(500000)\n\n state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)\n state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)\n\n # get the optimal policy\n action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)\n action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)\n\n images = [action_usable_ace,\n state_value_usable_ace,\n action_no_usable_ace,\n state_value_no_usable_ace]\n\n titles = ['Optimal policy with usable Ace',\n 'Optimal value with usable Ace',\n 'Optimal policy without usable Ace',\n 'Optimal value without usable Ace']\n\n _, axes = plt.subplots(2, 2, figsize=(40, 30))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n axes = axes.flatten()\n\n for image, title, axis in zip(images, titles, axes):\n fig = sns.heatmap(np.flipud(image), cmap=\"YlGnBu\", ax=axis, xticklabels=range(1, 11),\n yticklabels=list(reversed(range(12, 22))))\n fig.set_ylabel('player sum', fontsize=30)\n fig.set_xlabel('dealer showing', fontsize=30)\n fig.set_title(title, fontsize=30)\n\n plt.savefig('../images/figure_5_2.png')\n plt.close()\n\ndef figure_5_3():\n true_value = -0.27726\n episodes = 10000\n runs = 100\n error_ordinary = np.zeros(episodes)\n error_weighted = np.zeros(episodes)\n for i in tqdm(range(0, runs)):\n ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)\n # get the squared error\n error_ordinary += np.power(ordinary_sampling_ - true_value, 2)\n error_weighted += np.power(weighted_sampling_ - true_value, 2)\n error_ordinary /= runs\n error_weighted /= runs\n\n plt.plot(np.arange(1, episodes + 1), error_ordinary, color='green', label='Ordinary Importance Sampling')\n plt.plot(np.arange(1, episodes + 1), error_weighted, color='red', label='Weighted Importance Sampling')\n plt.ylim(-0.1, 5)\n plt.xlabel('Episodes (log scale)')\n plt.ylabel(f'Mean square error\\n(average over {runs} runs)')\n plt.xscale('log')\n plt.legend()\n\n plt.savefig('../images/figure_5_3.png')\n plt.close()\n\n\nif __name__ == '__main__':\n figure_5_1()\n figure_5_2()\n figure_5_3()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.asarray", "numpy.flipud", "numpy.max", "numpy.where", "numpy.random.randint", "numpy.arange", "numpy.argmax", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "numpy.random.choice", "numpy.power", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.random.binomial", "numpy.errstate", "numpy.add.accumulate", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.xscale" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hxwork/OMNet
[ "be88a734e7327def365e1875bbc7cd2fea1539b0" ]
[ "common/manager.py" ]
[ "import os\r\nfrom collections import defaultdict\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom termcolor import colored\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nfrom common import utils\r\n\r\n\r\nclass Manager():\r\n def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):\r\n # params status\r\n self.params = params\r\n\r\n self.model = model\r\n self.optimizer = optimizer\r\n self.scheduler = scheduler\r\n self.dataloaders = dataloaders\r\n self.logger = logger\r\n\r\n self.epoch = 0\r\n self.step = 0\r\n self.best_val_score = np.inf\r\n self.cur_val_score = np.inf\r\n self.best_test_score = np.inf\r\n self.cur_test_score = np.inf\r\n\r\n # train status\r\n self.train_status = defaultdict(utils.AverageMeter)\r\n\r\n # val status\r\n self.val_status = defaultdict(utils.AverageMeter)\r\n\r\n # test status\r\n self.test_status = defaultdict(utils.AverageMeter)\r\n\r\n # model status\r\n self.loss_status = defaultdict(utils.AverageMeter)\r\n\r\n # init local tensorboard and html\r\n self.init_tb_and_html()\r\n\r\n def init_tb_and_html(self):\r\n # tensorboard loss\r\n local_tb_dir = os.path.join(self.params.model_dir, \"summary/loss\")\r\n os.makedirs(local_tb_dir, exist_ok=True)\r\n self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)\r\n # tensorboard metric\r\n local_tb_dir = os.path.join(self.params.model_dir, \"summary/metric\")\r\n os.makedirs(local_tb_dir, exist_ok=True)\r\n self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)\r\n # html\r\n local_html_dir = os.path.join(self.params.model_dir, \"summary/html\")\r\n os.makedirs(local_html_dir, exist_ok=True)\r\n self.local_html_dir = local_html_dir\r\n\r\n def update_step(self):\r\n self.step += 1\r\n\r\n def update_epoch(self):\r\n self.epoch += 1\r\n\r\n def update_loss_status(self, loss, batch_size):\r\n for k, v in loss.items():\r\n self.loss_status[k].update(val=v.item(), num=batch_size)\r\n\r\n def update_metric_status(self, metrics, split, batch_size):\r\n if split == \"val\":\r\n for k, v in metrics.items():\r\n self.val_status[k].update(val=v.item(), num=batch_size)\r\n self.cur_val_score = self.val_status[self.params.major_metric].avg\r\n elif split == \"test\":\r\n for k, v in metrics.items():\r\n self.test_status[k].update(val=v.item(), num=batch_size)\r\n self.cur_test_score = self.test_status[self.params.major_metric].avg\r\n else:\r\n raise ValueError(\"Wrong eval type: {}\".format(split))\r\n\r\n def summarize_metric_status(self, metrics, split):\r\n if split == \"val\":\r\n for k in metrics:\r\n if k.endswith('MSE'):\r\n self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))\r\n else:\r\n continue\r\n elif split == \"test\":\r\n for k in metrics:\r\n if k.endswith('MSE'):\r\n self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))\r\n else:\r\n continue\r\n else:\r\n raise ValueError(\"Wrong eval type: {}\".format(split))\r\n\r\n def reset_loss_status(self):\r\n for k, v in self.loss_status.items():\r\n self.loss_status[k].reset()\r\n\r\n def reset_metric_status(self, split):\r\n if split == \"val\":\r\n for k, v in self.val_status.items():\r\n self.val_status[k].reset()\r\n elif split == \"test\":\r\n for k, v in self.test_status.items():\r\n self.test_status[k].reset()\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def print_train_info(self):\r\n exp_name = self.params.model_dir.split('/')[-1]\r\n print_str = \"{} Epoch: {:4d}, lr={:.4f} \".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])\r\n print_str += \"total loss: %.4f(%.4f)\" % (self.loss_status['total'].val, self.loss_status['total'].avg)\r\n return print_str\r\n\r\n def print_metrics(self, split, title=\"Eval\", color=\"red\", only_best=False):\r\n if split == \"val\":\r\n metric_status = self.val_status\r\n is_best = self.cur_val_score < self.best_val_score\r\n elif split == \"test\":\r\n metric_status = self.test_status\r\n is_best = self.cur_test_score < self.best_test_score\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n print_str = \" | \".join(\"{}: {:4g}\".format(k, v.avg) for k, v in metric_status.items())\r\n if only_best:\r\n if is_best:\r\n self.logger.info(colored(\"Best Epoch: {}, {} Results: {}\".format(self.epoch, title, print_str), color, attrs=[\"bold\"]))\r\n else:\r\n self.logger.info(colored(\"Epoch: {}, {} Results: {}\".format(self.epoch, title, print_str), color, attrs=[\"bold\"]))\r\n\r\n def write_loss_to_tb(self, split):\r\n for k, v in self.loss_status.items():\r\n if split == \"train\":\r\n self.local_loss_writter.add_scalar(\"train_Loss/{}\".format(k), v.val, self.step)\r\n elif split == \"val\":\r\n self.local_loss_writter.add_scalar(\"val_Loss/{}\".format(k), v.val, self.step)\r\n elif split == \"test\":\r\n self.local_loss_writter.add_scalar(\"test_Loss/{}\".format(k), v.val, self.step)\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def write_metric_to_tb(self, split):\r\n if split == \"val\":\r\n for k, v in self.val_status.items():\r\n self.local_metric_writter.add_scalar(\"val_Metric/{}\".format(k), v.avg, self.epoch)\r\n elif split == \"test\":\r\n for k, v in self.test_status.items():\r\n self.local_metric_writter.add_scalar(\"test_Metric/{}\".format(k), v.avg, self.epoch)\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):\r\n\r\n state = {\r\n \"state_dict\": self.model.state_dict(),\r\n \"optimizer\": self.optimizer.state_dict(),\r\n \"scheduler\": self.scheduler.state_dict(),\r\n \"step\": self.step,\r\n \"epoch\": self.epoch,\r\n }\r\n if self.dataloaders[\"val\"] is not None:\r\n state[\"best_val_score\"] = self.best_val_score\r\n if self.dataloaders[\"test\"] is not None:\r\n state[\"best_test_score\"] = self.best_test_score\r\n\r\n # save latest checkpoint\r\n if self.epoch % save_latest_freq == 0:\r\n latest_ckpt_name = os.path.join(self.params.model_dir, \"model_latest.pth\")\r\n torch.save(state, latest_ckpt_name)\r\n self.logger.info(\"Saved latest checkpoint to: {}\".format(latest_ckpt_name))\r\n\r\n # save val latest metrics, and check if val is best checkpoints\r\n if self.dataloaders[\"val\"] is not None:\r\n val_latest_metrics_name = os.path.join(self.params.model_dir, \"val_metrics_latest.json\")\r\n utils.save_dict_to_json(self.val_status, val_latest_metrics_name)\r\n is_best = self.cur_val_score < self.best_val_score\r\n if is_best:\r\n # save metrics\r\n self.best_val_score = self.cur_val_score\r\n best_metrics_name = os.path.join(self.params.model_dir, \"val_metrics_best.json\")\r\n utils.save_dict_to_json(self.val_status, best_metrics_name)\r\n self.logger.info(\"Current is val best, score={:.7f}\".format(self.best_val_score))\r\n # save checkpoint\r\n if self.epoch > save_best_after:\r\n best_ckpt_name = os.path.join(self.params.model_dir, \"val_model_best.pth\")\r\n torch.save(state, best_ckpt_name)\r\n self.logger.info(\"Saved val best checkpoint to: {}\".format(best_ckpt_name))\r\n\r\n # save test latest metrics, and check if test is best checkpoints\r\n if self.dataloaders[\"test\"] is not None:\r\n test_latest_metrics_name = os.path.join(self.params.model_dir, \"test_metrics_latest.json\")\r\n utils.save_dict_to_json(self.test_status, test_latest_metrics_name)\r\n is_best = self.cur_test_score < self.best_test_score\r\n if is_best:\r\n # save metrics\r\n self.best_test_score = self.cur_test_score\r\n best_metrics_name = os.path.join(self.params.model_dir, \"test_metrics_best.json\")\r\n utils.save_dict_to_json(self.test_status, best_metrics_name)\r\n self.logger.info(\"Current is test best, score={:.7f}\".format(self.best_test_score))\r\n # save checkpoint\r\n if self.epoch > save_best_after:\r\n best_ckpt_name = os.path.join(self.params.model_dir, \"test_model_best.pth\")\r\n torch.save(state, best_ckpt_name)\r\n self.logger.info(\"Saved test best checkpoint to: {}\".format(best_ckpt_name))\r\n\r\n def load_checkpoints(self):\r\n state = torch.load(self.params.restore_file)\r\n\r\n ckpt_component = []\r\n if \"state_dict\" in state and self.model is not None:\r\n try:\r\n self.model.load_state_dict(state[\"state_dict\"])\r\n except RuntimeError:\r\n print(\"Using custom loading net\")\r\n net_dict = self.model.state_dict()\r\n if \"module\" not in list(state[\"state_dict\"].keys())[0]:\r\n state_dict = {\"module.\" + k: v for k, v in state[\"state_dict\"].items() if \"module.\" + k in net_dict.keys()}\r\n else:\r\n state_dict = {k: v for k, v in state[\"state_dict\"].items() if k in net_dict.keys()}\r\n net_dict.update(state_dict)\r\n self.model.load_state_dict(net_dict, strict=False)\r\n ckpt_component.append(\"net\")\r\n\r\n if not self.params.only_weights:\r\n\r\n if \"optimizer\" in state and self.optimizer is not None:\r\n try:\r\n self.optimizer.load_state_dict(state[\"optimizer\"])\r\n\r\n except RuntimeError:\r\n print(\"Using custom loading optimizer\")\r\n optimizer_dict = self.optimizer.state_dict()\r\n state_dict = {k: v for k, v in state[\"optimizer\"].items() if k in optimizer_dict.keys()}\r\n optimizer_dict.update(state_dict)\r\n self.optimizer.load_state_dict(optimizer_dict)\r\n ckpt_component.append(\"opt\")\r\n\r\n if \"scheduler\" in state and self.train_status[\"scheduler\"] is not None:\r\n try:\r\n self.scheduler.load_state_dict(state[\"scheduler\"])\r\n\r\n except RuntimeError:\r\n print(\"Using custom loading scheduler\")\r\n scheduler_dict = self.scheduler.state_dict()\r\n state_dict = {k: v for k, v in state[\"scheduler\"].items() if k in scheduler_dict.keys()}\r\n scheduler_dict.update(state_dict)\r\n self.scheduler.load_state_dict(scheduler_dict)\r\n ckpt_component.append(\"sch\")\r\n\r\n if \"step\" in state:\r\n self.step = state[\"step\"] + 1\r\n ckpt_component.append(\"step\")\r\n\r\n if \"epoch\" in state:\r\n self.epoch = state[\"epoch\"] + 1\r\n ckpt_component.append(\"epoch\")\r\n\r\n if \"best_val_score\" in state:\r\n self.best_val_score = state[\"best_val_score\"]\r\n ckpt_component.append(\"best val score: {:.3g}\".format(self.best_val_score))\r\n\r\n if \"best_test_score\" in state:\r\n self.best_test_score = state[\"best_test_score\"]\r\n ckpt_component.append(\"best test score: {:.3g}\".format(self.best_test_score))\r\n\r\n ckpt_component = \", \".join(i for i in ckpt_component)\r\n self.logger.info(\"Loaded models from: {}\".format(self.params.restore_file))\r\n self.logger.info(\"Ckpt load: {}\".format(ckpt_component))\r\n" ]
[ [ "numpy.sqrt", "torch.save", "torch.utils.tensorboard.SummaryWriter", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shinhaha/tensorflow
[ "4647017a727985d64c5b0addee92f0ec516952c1" ]
[ "Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py" ]
[ "import tensorflow as tf\n\n#placeholder variable(scalar)\nX=tf.placeholder(tf.float32,shape=[None])\nY=tf.placeholder(tf.float32,shape=[None])\n\nW=tf.Variable(tf.random_normal([1]),name='weight')\nb=tf.Variable(tf.random_normal([1]),name='bias')\n\nhypothesis=X*W+b\n#average\ncost=tf.reduce_mean(tf.square(hypothesis-Y))\n\noptimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)\n#minimize cost\ntrain=optimizer.minimize(cost)\n\nsess=tf.Session()\n#initialize var\nsess.run(tf.global_variables_initializer())\n\n#learning\nfor step in range(2001):\n cost_val,W_val,b_val,_=sess.run([cost,W,b,train],\n feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})\n if step%20==0:\n print(step,cost_val,W_val,b_val)\n\n#evlauation\nprint(sess.run(hypothesis,feed_dict={X:[5]}))\nprint(sess.run(hypothesis,feed_dict={X:[2.5]}))\nprint(sess.run(hypothesis,feed_dict={X:[1.5,3.5]}))" ]
[ [ "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.square", "tensorflow.Session", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
tizian/layer-laboratory
[ "008cc94b76127e9eb74227fcd3d0145da8ddec30", "6beefe4b656504673b8d6c3f0e8eb198265c84de" ]
[ "src/films/tests/test_hdrfilm.py", "src/python/python/autodiff.py" ]
[ "import mitsuba\nimport pytest\nimport os\nimport enoki as ek\n\n\ndef test01_construct(variant_scalar_rgb):\n from mitsuba.core.xml import load_string\n\n # With default reconstruction filter\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\"></film>\"\"\")\n assert film is not None\n assert film.reconstruction_filter() is not None\n\n # With a provided reconstruction filter\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <rfilter type=\"gaussian\">\n <float name=\"stddev\" value=\"18.5\"/>\n </rfilter>\n </film>\"\"\")\n assert film is not None\n assert film.reconstruction_filter().radius() == (4 * 18.5)\n\n # Certain parameter values are not allowed\n with pytest.raises(RuntimeError):\n load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <string name=\"component_format\" value=\"uint8\"/>\n </film>\"\"\")\n with pytest.raises(RuntimeError):\n load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <string name=\"pixel_format\" value=\"brga\"/>\n </film>\"\"\")\n\n\ndef test02_crops(variant_scalar_rgb):\n from mitsuba.core.xml import load_string\n\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"32\"/>\n <integer name=\"height\" value=\"21\"/>\n <integer name=\"crop_width\" value=\"11\"/>\n <integer name=\"crop_height\" value=\"5\"/>\n <integer name=\"crop_offset_x\" value=\"2\"/>\n <integer name=\"crop_offset_y\" value=\"3\"/>\n <boolean name=\"high_quality_edges\" value=\"true\"/>\n <string name=\"pixel_format\" value=\"rgba\"/>\n </film>\"\"\")\n assert film is not None\n assert ek.all(film.size() == [32, 21])\n assert ek.all(film.crop_size() == [11, 5])\n assert ek.all(film.crop_offset() == [2, 3])\n assert film.has_high_quality_edges()\n\n # Crop size doesn't adjust its size, so an error should be raised if the\n # resulting crop window goes out of bounds.\n incomplete = \"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"32\"/>\n <integer name=\"height\" value=\"21\"/>\n <integer name=\"crop_offset_x\" value=\"30\"/>\n <integer name=\"crop_offset_y\" value=\"20\"/>\"\"\"\n with pytest.raises(RuntimeError):\n film = load_string(incomplete + \"</film>\")\n film = load_string(incomplete + \"\"\"\n <integer name=\"crop_width\" value=\"2\"/>\n <integer name=\"crop_height\" value=\"1\"/>\n </film>\"\"\")\n assert film is not None\n assert ek.all(film.size() == [32, 21])\n assert ek.all(film.crop_size() == [2, 1])\n assert ek.all(film.crop_offset() == [30, 20])\n\n\[email protected]('file_format', ['exr', 'rgbe', 'pfm'])\ndef test03_develop(variant_scalar_rgb, file_format, tmpdir):\n from mitsuba.core.xml import load_string\n from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype\n from mitsuba.render import ImageBlock\n import numpy as np\n\n \"\"\"Create a test image. Develop it to a few file format, each time reading\n it back and checking that contents are unchanged.\"\"\"\n np.random.seed(12345 + ord(file_format[0]))\n # Note: depending on the file format, the alpha channel may be automatically removed.\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"41\"/>\n <integer name=\"height\" value=\"37\"/>\n <string name=\"file_format\" value=\"{}\"/>\n <string name=\"pixel_format\" value=\"rgba\"/>\n <string name=\"component_format\" value=\"float32\"/>\n <rfilter type=\"box\"/>\n </film>\"\"\".format(file_format))\n # Regardless of the output file format, values are stored as XYZAW (5 channels).\n contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5))\n # RGBE and will only reconstruct well images that have similar scales on\n # all channel (because exponent is shared between channels).\n if file_format is \"rgbe\":\n contents = 1 + 0.1 * contents\n # Use unit weights.\n contents[:, :, 4] = 1.0\n\n block = ImageBlock(film.size(), 5, film.reconstruction_filter())\n\n block.clear()\n for x in range(film.size()[1]):\n for y in range(film.size()[0]):\n block.put([y+0.5, x+0.5], contents[x, y, :])\n\n film.prepare(['X', 'Y', 'Z', 'A', 'W'])\n film.put(block)\n\n with pytest.raises(RuntimeError):\n # Should raise when the destination file hasn't been specified.\n film.develop()\n\n filename = str(tmpdir.join('test_image.' + file_format))\n film.set_destination_file(filename)\n film.develop()\n\n # Read back and check contents\n other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False)\n img = np.array(other, copy=False)\n\n if False:\n import matplotlib.pyplot as plt\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.imshow(contents[:, :, :3])\n plt.subplot(1, 3, 2)\n plt.imshow(img[:, :, :3])\n plt.subplot(1, 3, 3)\n plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm')\n plt.colorbar()\n plt.show()\n\n if file_format == \"exr\":\n assert ek.allclose(img, contents, atol=1e-5)\n else:\n if file_format == \"rgbe\":\n assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \\\n '\\n{}\\nvs\\n{}\\n'.format(img[:4, :4, :3], contents[:4, :4, :3])\n else:\n assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5)\n # Alpha channel was ignored, alpha and weights should default to 1.0.\n assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)\n", "from contextlib import contextmanager\nfrom typing import Union, Tuple\nimport enoki as ek\n\n\ndef _render_helper(scene, spp=None, sensor_index=0):\n \"\"\"\n Internally used function: render the specified Mitsuba scene and return a\n floating point array containing RGB values and AOVs, if applicable\n \"\"\"\n from mitsuba.core import (Float, UInt32, UInt64, Vector2f,\n is_monochromatic, is_rgb, is_polarized, DEBUG)\n from mitsuba.render import ImageBlock\n\n sensor = scene.sensors()[sensor_index]\n film = sensor.film()\n sampler = sensor.sampler()\n film_size = film.crop_size()\n if spp is None:\n spp = sampler.sample_count()\n\n total_sample_count = ek.hprod(film_size) * spp\n\n if sampler.wavefront_size() != total_sample_count:\n sampler.seed(ek.arange(UInt64, total_sample_count))\n\n pos = ek.arange(UInt32, total_sample_count)\n pos //= spp\n scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])\n pos = Vector2f(Float(pos % int(film_size[0])),\n Float(pos // int(film_size[0])))\n\n pos += sampler.next_2d()\n\n rays, weights = sensor.sample_ray_differential(\n time=0,\n sample1=sampler.next_1d(),\n sample2=pos * scale,\n sample3=0\n )\n\n spec, mask, aovs = scene.integrator().sample(scene, sampler, rays)\n spec *= weights\n del mask\n\n if is_polarized:\n from mitsuba.core import depolarize\n spec = depolarize(spec)\n\n if is_monochromatic:\n rgb = [spec[0]]\n elif is_rgb:\n rgb = spec\n else:\n from mitsuba.core import spectrum_to_xyz, xyz_to_srgb\n xyz = spectrum_to_xyz(spec, rays.wavelengths)\n rgb = xyz_to_srgb(xyz)\n del xyz\n\n aovs.insert(0, Float(1.0))\n for i in range(len(rgb)):\n aovs.insert(i + 1, rgb[i])\n del rgb, spec, weights, rays\n\n block = ImageBlock(\n size=film.crop_size(),\n channel_count=len(aovs),\n filter=film.reconstruction_filter(),\n warn_negative=False,\n warn_invalid=DEBUG,\n border=False\n )\n\n block.clear()\n block.put(pos, aovs)\n\n del pos\n del aovs\n\n data = block.data()\n\n ch = block.channel_count()\n i = UInt32.arange(ek.hprod(block.size()) * (ch - 1))\n\n weight_idx = i // (ch - 1) * ch\n values_idx = (i * ch) // (ch - 1) + 1\n\n weight = ek.gather(data, weight_idx)\n values = ek.gather(data, values_idx)\n\n return values / (weight + 1e-8)\n\n\ndef write_bitmap(filename, data, resolution, write_async=True):\n \"\"\"\n Write the linearized RGB image in `data` to a PNG/EXR/.. file with\n resolution `resolution`.\n \"\"\"\n import numpy as np\n from mitsuba.core import Bitmap, Struct\n\n if type(data).__name__ == 'Tensor':\n data = data.detach().cpu()\n\n data = np.array(data.numpy())\n data = data.reshape(resolution[1], resolution[0], -1)\n bitmap = Bitmap(data)\n if filename.endswith('.png') or \\\n filename.endswith('.jpg') or \\\n filename.endswith('.jpeg'):\n bitmap = bitmap.convert(Bitmap.PixelFormat.RGB,\n Struct.Type.UInt8, True)\n quality = 0 if filename.endswith('png') else -1\n\n if write_async:\n bitmap.write_async(filename, quality=quality)\n else:\n bitmap.write(filename, quality=quality)\n\n\ndef render(scene,\n spp: Union[None, int, Tuple[int, int]] = None,\n unbiased=False,\n optimizer: 'mitsuba.python.autodiff.Optimizer' = None,\n sensor_index=0):\n \"\"\"\n Perform a differentiable of the scene `scene`, returning a floating point\n array containing RGB values and AOVs, if applicable.\n\n Parameter ``spp`` (``None``, ``int``, or a 2-tuple ``(int, int)``):\n Specifies the number of samples per pixel to be used for rendering,\n overriding the value that is specified in the scene. If ``spp=None``,\n the original value takes precedence. If ``spp`` is a 2-tuple\n ``(spp_primal: int, spp_deriv: int)``, the first element specifies the\n number of samples for the *primal* pass, and the second specifies the\n number of samples for the *derivative* pass. See the explanation of the\n ``unbiased`` parameter for further detail on what these mean.\n\n Memory usage is roughly proportional to the ``spp``, value, hence this\n parameter should be reduced if you encounter out-of-memory errors.\n\n Parameter ``unbiased`` (``bool``):\n One potential issue when naively differentiating a rendering algorithm\n is that the same set of Monte Carlo sample is used to generate both the\n primal output (i.e. the image) along with derivative output. When the\n rendering algorithm and objective are jointly differentiated, we end up\n with expectations of products that do *not* satisfy the equality\n :math:`\\mathbb{E}[X Y]=\\mathbb{E}[X]\\, \\mathbb{E}[Y]` due to\n correlations between :math:`X` and :math:`Y` that result from this\n sample re-use.\n\n When ``unbiased=True``, the ``render()`` function will generate an\n *unbiased* estimate that de-correlates primal and derivative\n components, which boils down to rendering the image twice and naturally\n comes at some cost in performance :math:`(\\sim 1.6 \\times\\!)`. Often,\n biased gradients are good enough, in which case ``unbiased=False``\n should be specified instead.\n\n The number of samples per pixel per pass can be specified separately\n for both passes by passing a tuple to the ``spp`` parameter.\n\n Note that unbiased mode is only relevant for reverse-mode\n differentiation. It is not needed when visualizing parameter gradients\n in image space using forward-mode differentiation.\n\n Parameter ``optimizer`` (:py:class:`mitsuba.python.autodiff.Optimizer`):\n The optimizer referencing relevant scene parameters must be specified\n when ``unbiased=True``. Otherwise, there is no need to provide this\n parameter.\n\n Parameter ``sensor_index`` (``int``):\n When the scene contains more than one sensor/camera, this parameter\n can be specified to select the desired sensor.\n \"\"\"\n if unbiased:\n if optimizer is None:\n raise Exception('render(): unbiased=True requires that an '\n 'optimizer is specified!')\n if not type(spp) is tuple:\n spp = (spp, spp)\n\n with optimizer.disable_gradients():\n image = _render_helper(scene, spp=spp[0],\n sensor_index=sensor_index)\n image_diff = _render_helper(scene, spp=spp[1],\n sensor_index=sensor_index)\n ek.reattach(image, image_diff)\n else:\n if type(spp) is tuple:\n raise Exception('render(): unbiased=False requires that spp '\n 'is either an integer or None!')\n image = _render_helper(scene, spp=spp, sensor_index=sensor_index)\n\n return image\n\n\nclass Optimizer:\n \"\"\"\n Base class of all gradient-based optimizers (currently SGD and Adam)\n \"\"\"\n def __init__(self, params, lr):\n \"\"\"\n Parameter ``params``:\n dictionary ``(name: variable)`` of differentiable parameters to be\n optimized.\n\n Parameter ``lr``:\n learning rate\n \"\"\"\n self.set_learning_rate(lr)\n self.params = params\n if not params.all_differentiable():\n raise Exception('Optimizer.__init__(): all parameters should '\n 'be differentiable!')\n self.state = {}\n for k, p in self.params.items():\n ek.set_requires_gradient(p)\n self._reset(k)\n\n def set_learning_rate(self, lr):\n \"\"\"Set the learning rate.\"\"\"\n from mitsuba.core import Float\n # Ensure that the JIT compiler does merge 'lr' into the PTX code\n # (this would trigger a recompile every time it is changed)\n self.lr = lr\n self.lr_v = ek.detach(Float(lr, literal=False))\n\n @contextmanager\n def disable_gradients(self):\n \"\"\"Temporarily disable the generation of gradients.\"\"\"\n for _, p in self.params.items():\n ek.set_requires_gradient(p, False)\n try:\n yield\n finally:\n for _, p in self.params.items():\n ek.set_requires_gradient(p, True)\n\n\nclass SGD(Optimizer):\n \"\"\"\n Implements basic stochastic gradient descent with a fixed learning rate\n and, optionally, momentum :cite:`Sutskever2013Importance` (0.9 is a typical\n parameter value for the ``momentum`` parameter).\n\n The momentum-based SGD uses the update equation\n\n .. math::\n\n v_{i+1} = \\\\mu \\\\cdot v_i + g_{i+1}\n\n .. math::\n p_{i+1} = p_i + \\\\varepsilon \\\\cdot v_{i+1},\n\n where :math:`v` is the velocity, :math:`p` are the positions,\n :math:`\\\\varepsilon` is the learning rate, and :math:`\\\\mu` is\n the momentum parameter.\n \"\"\"\n\n def __init__(self, params, lr, momentum=0):\n \"\"\"\n Parameter ``lr``:\n learning rate\n\n Parameter ``momentum``:\n momentum factor\n \"\"\"\n assert momentum >= 0 and momentum < 1\n assert lr > 0\n self.momentum = momentum\n super().__init__(params, lr)\n\n def step(self):\n \"\"\" Take a gradient step \"\"\"\n for k, p in self.params.items():\n g_p = ek.gradient(p)\n size = ek.slices(g_p)\n if size == 0:\n continue\n\n if self.momentum != 0:\n if size != ek.slices(self.state[k]):\n # Reset state if data size has changed\n self._reset(k)\n\n self.state[k] = self.momentum * self.state[k] + g_p\n value = ek.detach(p) - self.lr_v * self.state[k]\n else:\n value = ek.detach(p) - self.lr_v * g_p\n\n value = type(p)(value)\n ek.set_requires_gradient(value)\n self.params[k] = value\n self.params.update()\n\n def _reset(self, key):\n \"\"\" Zero-initializes the internal state associated with a parameter \"\"\"\n if self.momentum == 0:\n return\n p = self.params[key]\n size = ek.slices(p)\n self.state[key] = ek.detach(type(p).zero(size))\n\n def __repr__(self):\n return ('SGD[\\n lr = %.2g,\\n momentum = %.2g\\n]') % \\\n (self.lr, self.momentum)\n\n\nclass Adam(Optimizer):\n \"\"\"\n Implements the Adam optimizer presented in the paper *Adam: A Method for\n Stochastic Optimization* by Kingman and Ba, ICLR 2015.\n \"\"\"\n def __init__(self, params, lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8):\n \"\"\"\n Parameter ``lr``:\n learning rate\n\n Parameter ``beta_1``:\n controls the exponential averaging of first\n order gradient moments\n\n Parameter ``beta_2``:\n controls the exponential averaging of second\n order gradient moments\n \"\"\"\n super().__init__(params, lr)\n\n assert 0 <= beta_1 < 1 and 0 <= beta_2 < 1 \\\n and lr > 0 and epsilon > 0\n\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.t = 0\n\n def step(self):\n \"\"\" Take a gradient step \"\"\"\n self.t += 1\n\n from mitsuba.core import Float\n lr_t = ek.detach(Float(self.lr * ek.sqrt(1 - self.beta_2**self.t) /\n (1 - self.beta_1**self.t), literal=False))\n\n for k, p in self.params.items():\n g_p = ek.gradient(p)\n size = ek.slices(g_p)\n\n if size == 0:\n continue\n elif size != ek.slices(self.state[k][0]):\n # Reset state if data size has changed\n self._reset(k)\n\n m_tp, v_tp = self.state[k]\n m_t = self.beta_1 * m_tp + (1 - self.beta_1) * g_p\n v_t = self.beta_2 * v_tp + (1 - self.beta_2) * ek.sqr(g_p)\n self.state[k] = (m_t, v_t)\n\n u = ek.detach(p) - lr_t * m_t / (ek.sqrt(v_t) + self.epsilon)\n u = type(p)(u)\n ek.set_requires_gradient(u)\n self.params[k] = u\n\n def _reset(self, key):\n \"\"\" Zero-initializes the internal state associated with a parameter \"\"\"\n p = self.params[key]\n size = ek.slices(p)\n self.state[key] = (ek.detach(type(p).zero(size)),\n ek.detach(type(p).zero(size)))\n\n def __repr__(self):\n return ('Adam[\\n'\n ' lr = %g,\\n'\n ' betas = (%g, %g),\\n'\n ' eps = %g\\n'\n ']' % (self.lr, self.beta_1, self.beta_2, self.epsilon))\n\n\ndef render_torch(scene, params=None, **kwargs):\n from mitsuba.core import Float\n # Delayed import of PyTorch dependency\n ns = globals()\n if 'render_torch_helper' in ns:\n render_torch = ns['render_torch_helper']\n else:\n import torch\n\n class Render(torch.autograd.Function):\n @staticmethod\n def forward(ctx, scene, params, *args):\n try:\n assert len(args) % 2 == 0\n args = dict(zip(args[0::2], args[1::2]))\n\n spp = None\n sensor_index = 0\n unbiased = True\n malloc_trim = False\n\n ctx.inputs = [None, None]\n for k, v in args.items():\n if k == 'spp':\n spp = v\n elif k == 'sensor_index':\n sensor_index = v\n elif k == 'unbiased':\n unbiased = v\n elif k == 'malloc_trim':\n malloc_trim = v\n elif params is not None:\n params[k] = type(params[k])(v)\n ctx.inputs.append(None)\n ctx.inputs.append(params[k] if v.requires_grad\n else None)\n continue\n\n ctx.inputs.append(None)\n ctx.inputs.append(None)\n\n if type(spp) is not tuple:\n spp = (spp, spp)\n\n result = None\n ctx.malloc_trim = malloc_trim\n\n if ctx.malloc_trim:\n torch.cuda.empty_cache()\n\n if params is not None:\n params.update()\n\n if unbiased:\n result = render(scene, spp=spp[0],\n sensor_index=sensor_index).torch()\n\n for v in ctx.inputs:\n if v is not None:\n ek.set_requires_gradient(v)\n\n ctx.output = render(scene, spp=spp[1],\n sensor_index=sensor_index)\n\n if result is None:\n result = ctx.output.torch()\n\n if ctx.malloc_trim:\n ek.cuda_malloc_trim()\n return result\n except Exception as e:\n print(\"render_torch(): critical exception during \"\n \"forward pass: %s\" % str(e))\n raise e\n\n @staticmethod\n def backward(ctx, grad_output):\n try:\n ek.set_gradient(ctx.output, ek.detach(Float(grad_output)))\n Float.backward()\n result = tuple(ek.gradient(i).torch() if i is not None\n else None\n for i in ctx.inputs)\n del ctx.output\n del ctx.inputs\n if ctx.malloc_trim:\n ek.cuda_malloc_trim()\n return result\n except Exception as e:\n print(\"render_torch(): critical exception during \"\n \"backward pass: %s\" % str(e))\n raise e\n\n render_torch = Render.apply\n ns['render_torch_helper'] = render_torch\n\n result = render_torch(scene, params,\n *[num for elem in kwargs.items() for num in elem])\n\n sensor_index = 0 if 'sensor_index' not in kwargs \\\n else kwargs['sensor_index']\n crop_size = scene.sensors()[sensor_index].film().crop_size()\n return result.reshape(crop_size[1], crop_size[0], -1)\n\n\ndel Union\ndel Tuple\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "torch.cuda.empty_cache" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kvzhao/pytorch-metric-learning
[ "9eb792bcfc1616b599e6ee457514e3cb3a7235dd" ]
[ "src/pytorch_metric_learning/utils/logging_presets.py" ]
[ "import logging\r\nfrom . import common_functions as c_f\r\nimport os\r\nimport torch\r\nfrom collections import defaultdict\r\nimport sqlite3\r\n\r\n# You can write your own hooks for logging.\r\n# But if you'd like something that just works, then use this HookContainer.\r\n# You'll need to install record-keeper and tensorboard.\r\n# pip install record-keeper tensorboard\r\n\r\nclass HookContainer: \r\n\r\n def __init__(self, record_keeper, \r\n record_group_name_prefix=None, \r\n primary_metric=\"mean_average_precision_at_r\", \r\n validation_split_name=\"val\"):\r\n self.record_keeper = record_keeper\r\n self.record_group_name_prefix = record_group_name_prefix\r\n self.saveable_trainer_objects = [\"models\", \"optimizers\", \"lr_schedulers\", \"loss_funcs\", \"mining_funcs\"]\r\n self.primary_metric = primary_metric\r\n self.validation_split_name = validation_split_name\r\n\r\n ############################################\r\n ############################################\r\n ################## HOOKS #################\r\n ############################################\r\n ############################################\r\n\r\n ### Define the end_of_iteration hook. This will be executed at the end of every iteration. ###\r\n def end_of_iteration_hook(self, trainer):\r\n record_these = [[trainer.loss_tracker.losses, {\"input_group_name_for_non_objects\": \"loss_histories\"}],\r\n [trainer.loss_tracker.loss_weights, {\"input_group_name_for_non_objects\": \"loss_weights\"}],\r\n [trainer.loss_funcs, {\"recursive_types\": [torch.nn.Module]}],\r\n [trainer.mining_funcs, {}],\r\n [trainer.models, {}],\r\n [trainer.optimizers, {\"custom_attr_func\": self.optimizer_custom_attr_func}]]\r\n for record, kwargs in record_these:\r\n self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)\r\n\r\n # This hook will be passed into the trainer and will be executed at the end of every epoch.\r\n def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, test_collate_fn=None):\r\n if not self.primary_metric in tester.accuracy_calculator.get_curr_metrics():\r\n raise ValueError(\"HookContainer `primary_metric` must be one of: {}\".format(tester.accuracy_calculator.get_curr_metrics()))\r\n if not os.path.exists(model_folder): os.makedirs(model_folder)\r\n def actual_hook(trainer):\r\n continue_training = True\r\n if trainer.epoch % test_interval == 0:\r\n best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, test_collate_fn)\r\n continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)\r\n return continue_training\r\n return actual_hook\r\n\r\n def end_of_testing_hook(self, tester):\r\n for split_name, accuracies in tester.all_accuracies.items():\r\n epoch = accuracies[\"epoch\"]\r\n self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))\r\n _, _, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, split_name, epoch)\r\n best = {\"best_epoch\":best_epoch, \"best_accuracy\": best_accuracy}\r\n self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name)) \r\n\r\n for split_name, u in tester.dim_reduced_embeddings.items():\r\n for k, (dim_reduced, labels) in u.items():\r\n tag = '%s/%s'%(self.record_group_name(tester, split_name), k)\r\n self.record_keeper.add_embedding_plot(dim_reduced, labels, tag, epoch)\r\n\r\n\r\n\r\n ############################################\r\n ############################################\r\n ######### MODEL LOADING AND SAVING #########\r\n ############################################\r\n ############################################\r\n\r\n def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):\r\n if device is None: device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n resume_epoch, model_suffix = c_f.latest_version(model_folder, \"trunk_*.pth\", best=best)\r\n if resume_epoch > 0:\r\n for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:\r\n c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)\r\n return resume_epoch + 1\r\n\r\n\r\n def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):\r\n for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:\r\n c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)\r\n if prev_suffix is not None:\r\n c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder) \r\n\r\n def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, collate_fn):\r\n epoch = trainer.epoch\r\n tester.test(dataset_dict, epoch, trainer.models[\"trunk\"], trainer.models[\"embedder\"], list(dataset_dict.keys()), collate_fn)\r\n prev_best_epoch, _ = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)\r\n is_new_best, curr_accuracy, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)\r\n self.record_keeper.save_records()\r\n trainer.step_lr_plateau_schedulers(curr_accuracy)\r\n self.save_models(trainer, model_folder, epoch, epoch-test_interval) # save latest model\r\n if is_new_best:\r\n logging.info(\"New best accuracy! {}\".format(curr_accuracy))\r\n curr_suffix = \"best%d\"%best_epoch\r\n prev_suffix = \"best%d\"%prev_best_epoch if prev_best_epoch is not None else None\r\n self.save_models(trainer, model_folder, curr_suffix, prev_suffix) # save best model \r\n return best_epoch\r\n\r\n def is_new_best_accuracy(self, tester, split_name, epoch):\r\n curr_accuracy = self.get_curr_primary_metric(tester, split_name)\r\n best_epoch, best_accuracy = self.get_best_epoch_and_accuracy(tester, split_name)\r\n is_new_best = False\r\n if (curr_accuracy > best_accuracy) or (best_epoch is None):\r\n best_epoch, best_accuracy = epoch, curr_accuracy\r\n is_new_best = True\r\n return is_new_best, curr_accuracy, best_epoch, best_accuracy \r\n\r\n\r\n ############################################\r\n ############################################\r\n ##### BEST EPOCH AND ACCURACY TRACKING #####\r\n ############################################\r\n ############################################\r\n\r\n\r\n def get_loss_history(self, loss_names=()):\r\n columns = \"*\" if len(loss_names) == 0 else \", \".join(loss_names)\r\n table_name = \"loss_histories\"\r\n if not self.record_keeper.table_exists(table_name):\r\n return {}\r\n output = self.record_keeper.query(\"SELECT {} FROM {}\".format(columns, table_name), return_dict=True)\r\n output.pop(\"id\", None)\r\n return output\r\n\r\n\r\n def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):\r\n table_name = self.record_group_name(tester, split_name)\r\n\r\n if not self.record_keeper.table_exists(table_name):\r\n return {}\r\n\r\n def get_accuracies(keys):\r\n keys = \"*\" if return_all_metrics else \"epoch, %s\"%keys\r\n query = \"SELECT {} FROM {}\".format(keys, table_name)\r\n return self.record_keeper.query(query, return_dict=True)\r\n\r\n keys = metrics if len(metrics) > 0 else [self.primary_metric]\r\n output = self.try_keys(keys, tester, get_accuracies)\r\n output.pop(\"id\", None)\r\n return output\r\n\r\n\r\n def get_curr_primary_metric(self, tester, split_name):\r\n def get_curr(key):\r\n return tester.all_accuracies[split_name][key]\r\n return self.try_primary_metric(tester, get_curr)\r\n\r\n def try_keys(self, input_keys, tester, input_func):\r\n for average in [True, False]:\r\n keys = \", \".join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])\r\n try:\r\n return input_func(keys)\r\n except (KeyError, sqlite3.OperationalError):\r\n pass\r\n raise KeyError \r\n\r\n def try_primary_metric(self, tester, input_func):\r\n return self.try_keys([self.primary_metric], tester, input_func)\r\n\r\n # returns accuracies of a specified epoch\r\n def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):\r\n table_name = self.record_group_name(tester, split_name)\r\n if not self.record_keeper.table_exists(table_name):\r\n return []\r\n def get_accuracies(key):\r\n columns = \"*\" if select_all else \"epoch, %s\"%key\r\n query = \"SELECT %s FROM %s WHERE epoch=?\"%(columns, table_name)\r\n return self.record_keeper.query(query, (epoch, ))\r\n return self.try_primary_metric(tester, get_accuracies)\r\n\r\n # returns accuracies of best epoch and the metric name used to determine best acuracy\r\n def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=(-1,)):\r\n table_name = self.record_group_name(tester, split_name)\r\n if not self.record_keeper.table_exists(table_name):\r\n return [], None \r\n def get_accuracies(key):\r\n columns = \"*\" if select_all else \"epoch, %s\"%key\r\n params = \", \".join([\"?\"]*len(ignore_epoch))\r\n query = \"\"\"SELECT {0} FROM {1} WHERE {2}=\r\n (SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))\r\n AND epoch NOT IN ({3})\"\"\".format(columns, table_name, key, params)\r\n output = self.record_keeper.query(query, ignore_epoch+ignore_epoch)\r\n return output, key\r\n return self.try_primary_metric(tester, get_accuracies)\r\n\r\n def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=(-1,)):\r\n accuracies, key = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)\r\n if len(accuracies) > 0:\r\n return accuracies[0][\"epoch\"], accuracies[0][key]\r\n return None, 0\r\n\r\n def patience_remaining(self, epoch, best_epoch, patience):\r\n if patience is not None and best_epoch is not None:\r\n if epoch - best_epoch > patience:\r\n logging.info(\"Validation accuracy has plateaued. Exiting.\")\r\n return False\r\n return True\r\n\r\n def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):\r\n if skip_eval_if_already_done:\r\n splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)\r\n if len(splits_to_eval) == 0:\r\n logging.info(\"Already evaluated\")\r\n return False\r\n tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)\r\n return True\r\n\r\n def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):\r\n input_splits_to_eval = list(dataset_dict.keys()) if input_splits_to_eval is None else input_splits_to_eval\r\n splits_to_eval = []\r\n for split in input_splits_to_eval:\r\n if len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0:\r\n splits_to_eval.append(split)\r\n return splits_to_eval\r\n\r\n def base_record_group_name(self, tester):\r\n base_record_group_name = \"%s_\"%self.record_group_name_prefix if self.record_group_name_prefix else ''\r\n base_record_group_name += tester.description_suffixes(\"accuracies\")\r\n return base_record_group_name\r\n\r\n def record_group_name(self, tester, split_name):\r\n base_record_group_name = self.base_record_group_name(tester)\r\n return \"%s_%s\"%(base_record_group_name, split_name.upper())\r\n\r\n def optimizer_custom_attr_func(self, optimizer):\r\n return {\"lr\": optimizer.param_groups[0][\"lr\"]}\r\n\r\n\r\n\r\nclass EmptyContainer:\r\n def end_of_epoch_hook(self, *args):\r\n return None\r\n end_of_iteration_hook = None\r\n end_of_testing_hook = None\r\n\r\n\r\n\r\ndef get_record_keeper(csv_folder, tensorboard_folder, global_db_path=None, experiment_name=None, is_new_experiment=True, save_figures=False, save_lists=False):\r\n try:\r\n import record_keeper as record_keeper_package\r\n from torch.utils.tensorboard import SummaryWriter\r\n record_writer = record_keeper_package.RecordWriter(folder = csv_folder, \r\n global_db_path = global_db_path, \r\n experiment_name = experiment_name, \r\n is_new_experiment = is_new_experiment, \r\n save_lists = save_lists)\r\n tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)\r\n record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer = tensorboard_writer, \r\n record_writer = record_writer, \r\n attributes_to_search_for = c_f.list_of_recordable_attributes_list_names(),\r\n save_figures=save_figures)\r\n return record_keeper, record_writer, tensorboard_writer\r\n\r\n except ModuleNotFoundError as e:\r\n logging.warn(e)\r\n logging.warn(\"There won't be any logging or model saving.\")\r\n logging.warn(\"To fix this, pip install record-keeper tensorboard\")\r\n return None, None, None\r\n\r\n\r\ndef get_hook_container(record_keeper, **kwargs):\r\n if record_keeper:\r\n return HookContainer(record_keeper, **kwargs)\r\n else:\r\n logging.warn(\"No record_keeper, so no preset hooks are being returned.\")\r\n return EmptyContainer()\r\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]