repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
macdaliot/exist | [
"65244f79c602c5a00c3ea6a7eef512ce9c21e60a"
] | [
"scripts/insert2db/reputation/plugins/dshield_medium.py"
] | [
"import sys\nimport os\nimport configparser\nimport requests\nimport pandas as pd\nimport hashlib\nfrom io import StringIO\nfrom datetime import datetime, timezone\n\n## Django Setup\nimport django\nimport pymysql\npymysql.install_as_MySQLdb()\nconffile = os.path.join(os.path.dirname(__file__), \"../../conf/insert2db.conf\")\nconf = configparser.SafeConfigParser()\nconf.read(conffile)\nsys.path.append(conf.get('exist', 'syspath'))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')\ndjango.setup()\nfrom apps.reputation.models import blacklist\nimport django.utils.timezone as tzone\nfrom django.db import IntegrityError\n\n## Logger Setup\nfrom logging import getLogger, DEBUG, NullHandler\nlogger = getLogger(__name__)\nlogger.addHandler(NullHandler())\nlogger.setLevel(DEBUG)\nlogger.propagate = True\n\nDataDir = os.path.join(os.path.dirname(__file__), '../data/')\n\nclass Tracker():\n def __init__(self):\n self.name = 'Dshield_Medium'\n self.ID = 222\n self.URL = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'\n self.DataFilePath = DataDir + 'dshield/suspiciousdomains_Medium.txt'\n self.header = [\n 'domain',\n ]\n\n def cmpFiles(self, oldfile, newtext):\n diffline = ''\n if not os.path.exists(oldfile):\n f = open(oldfile, 'w')\n f.close()\n oldsets = set(open(oldfile).readlines())\n newsets = set(newtext.replace('\\r\\n','\\n').splitlines(True))\n results = newsets.difference(oldsets)\n for result in results:\n diffline += result\n return diffline[:-1]\n\n def delComment(self, s):\n result = ''\n for line in s.splitlines(True):\n if not line.startswith('#') \\\n and line != \"Site\\n\":\n result += line\n return result\n\n def makeDataframe(self):\n df = pd.DataFrame()\n newline = ''\n try:\n res = requests.get(self.URL)\n if res.status_code != 200:\n return df\n newline = self.cmpFiles(self.DataFilePath, res.text)\n newline = self.delComment(newline)\n except Exception as e:\n logger.error(e)\n if not newline == '':\n open(self.DataFilePath, 'w').write(res.text)\n df = pd.read_csv(StringIO(newline), names=self.header)\n return df\n\n def parse(self):\n logger.info(\"start parsing: %s\", self.name)\n\n df = self.makeDataframe()\n queries = []\n if not df.empty:\n for i, v in df.iterrows():\n line = str(self.ID) + \",\"\n line += str(v.values)\n md5 = hashlib.md5(line.encode('utf-8')).hexdigest()\n try:\n query = blacklist(\n id = md5,\n domain = v.domain,\n datetime = tzone.now(),\n source = self.ID,\n referrer = 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt',\n )\n except Exception as e:\n logger.error(\"%s: %s\", e, line)\n queries.append(query)\n else:\n logger.info(\"no update\")\n\n logger.info(\"done parsing: %s, %s queries were parsed\", self.name, len(queries))\n return queries\n\n"
] | [
[
"pandas.DataFrame"
]
] |
derekdylu/mgt2001 | [
"b228d5e75e75a2f3f170e35db1bea999b765bec8"
] | [
"mgt2001/hyp/non.py"
] | [
"from matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nimport scipy.stats as stats\n\n\ndef inter_p_value(p_value):\n # interpretation\n if p_value >= 0 and p_value < 0.01:\n inter_p = 'Overwhelming Evidence'\n elif p_value >= 0.01 and p_value < 0.05:\n inter_p = 'Strong Evidence'\n elif p_value >= 0.05 and p_value < 0.1:\n inter_p = 'Weak Evidence'\n elif p_value >= .1:\n inter_p = 'No Evidence'\n return inter_p\n\n\ndef grank(data):\n if type(data) == np.ndarray or type(data) == list:\n alldata = data.copy()\n data = data.copy()\n else:\n alldata = data.values.copy()\n data = data.values.copy()\n alldata.sort()\n tmp_df = pd.DataFrame({'value': alldata})\n tmp_df['rank'] = tmp_df.index + 1\n value_to_rank = tmp_df.groupby('value').mean().reset_index()\n samp = pd.DataFrame({'value': data})\n samp = pd.merge(samp, value_to_rank, how='left')\n return samp['rank']\n\n\ndef ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05):\n \"\"\"\n df can only have two columns and df.shape[0] > 10\n alternative has three options: 'two-sided', 'less', 'greater'\n \"\"\"\n # sort all data points by values\n tmp_values = df.values.reshape(-1)\n tmp_values = tmp_values[~np.isnan(tmp_values)]\n tmp_values.sort()\n\n # assign ranks\n updated_df = pd.DataFrame({'value': tmp_values})\n updated_df['rank'] = updated_df.index + 1\n\n # average rank for identical value\n updated_df = updated_df.groupby('value').mean().reset_index()\n # display(updated_df)\n\n # Compute Sum of Ranks\n samp1 = pd.DataFrame({'value': df[to_compute].dropna().values})\n samp1 = pd.merge(samp1, updated_df)\n T = samp1['rank'].sum()\n\n # compute mean and standard deviation\n n1 = df.iloc[:, 0].dropna().shape[0]\n n2 = df.iloc[:, 1].dropna().shape[0]\n\n E_T = n1*(n1+n2+1)/2\n\n sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5\n z = (T-E_T)/sigmaT\n # compute p-value\n # right (greater)\n p_value = 1 - stats.norm.cdf(z)\n\n if alternative == 'greater':\n pass\n elif alternative == 'less':\n p_value = stats.norm.cdf(z)\n elif alternative == 'two-sided':\n # two-tail\n if p_value > 0.5:\n p_value = stats.norm.cdf(z)\n p_value *= 2\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= z-test =======\nT (sum of ranks) = {T}\n(n1, n2) = ({n1}, {n2})\nmu_t = {E_T}\nsigma_t = {sigmaT}\nz statistic value (observed) = {z:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n'''\n print(result)\n result_dict = {'T': T, 'ET': E_T,\n 'sigmaT': sigmaT, 'z': z, 'p-value': p_value}\n return updated_df, result_dict\n\n\ndef sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n n = diff.size - np.sum(diff == 0)\n\n if sign == '+':\n sign_count = np.sum(diff > 0)\n else:\n sign_count = np.sum(diff < 0)\n\n if alternative == 'greater' or alternative == 'less':\n # 如果超過一半就要切換\n if sign_count > n / 2:\n p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)\n else:\n p_value = stats.binom.cdf(sign_count, n=n, p=0.5)\n elif alternative == 'two-sided':\n p_value = stats.binom.cdf(sign_count, n=n, p=0.5)\n if p_value > 0.5:\n p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)\n\n p_value *= 2\n\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= Sign Test - Binomial Distribution =======\n(For small sample size (<= 10))\n\nTargeted Sign: {sign}\nn = {n}\nSign counts = {sign_count}\n\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n return sign_count, p_value\n\n\ndef sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n diff = diff[~(diff == 0)]\n n = len(diff)\n\n if sign == '+':\n T = np.sum(diff > 0)\n else:\n T = np.sum(diff < 0)\n z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5))\n # right tail\n if alternative == 'greater':\n p_value = 1 - stats.norm.cdf(z_stat)\n elif alternative == 'less':\n p_value = stats.norm.cdf(z_stat)\n elif alternative == 'two-sided':\n p_value = 1 - stats.norm.cdf(z_stat)\n if p_value > 0.5:\n p_value = stats.norm.cdf(z_stat)\n p_value *= 2\n flag = False\n if p_value < alpha:\n flag = True\n result = f'''======= Sign Test - z Statistic =======\n(For large sample size (> 10))\n\nTargeted Sign: {sign}\nn = {n}\nSign counts = {T}\n\nz statistic = {z_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n\n return T, p_value\n\n\ndef wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):\n\n diff = diff[~(diff == 0)]\n n = len(diff)\n\n diff_abs = np.sort(np.abs(diff).to_numpy())\n\n updated_diff = pd.DataFrame({'diff_abs': diff_abs})\n updated_diff['rank'] = updated_diff.index + 1\n updated_diff = updated_diff.groupby('diff_abs').mean().reset_index()\n\n new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)})\n new_df = pd.merge(new_df, updated_diff)\n\n if sign == '+':\n T = np.sum(new_df['rank'][new_df['diff'] > 0])\n else:\n T = np.sum(new_df['rank'][new_df['diff'] < 0])\n\n E_T = n * (n + 1) / 4\n sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5\n\n z_stat = (T - E_T) / sigma_T\n\n if alternative == 'greater':\n # right tail test\n p_value = 1 - stats.norm.cdf(z_stat)\n elif alternative == 'less':\n # left tail test\n p_value = stats.norm.cdf(z_stat)\n elif alternative == 'two-sided':\n # two-tailed test\n p_value = 1 - stats.norm.cdf(z_stat)\n if p_value > 0.5:\n p_value = stats.norm.cdf(z_stat)\n p_value *= 2\n\n flag = False\n if p_value < alpha:\n flag = True\n\n result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic =======\n(For large sample size (> 30))\n\nTargeted Sign: {sign}\nn = {n}\nSum of rank (T statistic) = {T}\n\nmu_t = {E_T}\nsigma_t = {sigma_T}\n\nz statistic value (observed) = {z_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 ({alternative}) → {flag}\n '''\n print(result)\n\n result_dict = {'n': n, 'T': T, 'E_T': E_T,\n 'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value}\n\n return new_df, result_dict\n\n\ndef kruskal_chi2_test(data=None, alpha=0.05, precision=4):\n \"\"\"\n col = 要比較的 target\n row = data for each target\n \"\"\"\n if type(data) == pd.DataFrame:\n data = data.copy().to_numpy()\n alldata = np.concatenate(data.copy())\n else:\n alldata = np.concatenate(data.copy())\n\n k = data.shape[1]\n alldata.sort()\n\n tmp_df = pd.DataFrame(({'value': alldata}))\n tmp_df['rank'] = tmp_df.index + 1 # rank\n value_to_rank = tmp_df.groupby('value').mean().reset_index()\n T = []\n sample_rank_df = []\n for i in range(k):\n\n samp = pd.DataFrame(\n {'value': data[:, i][~np.isnan(data[:, i])]})\n\n samp = pd.merge(samp, value_to_rank)\n sample_rank_df.append(samp)\n T.append(samp['rank'].sum())\n\n n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]\n\n # print(T)\n # print(n)\n\n rule_of_five_str = \"\"\n if (np.sum(np.array(n) < 5) > 0):\n rule_of_five_str += \"!(At least one sample size is less than 5)\"\n else:\n rule_of_five_str += \"(All sample size >= 5)\"\n\n N = np.sum(n)\n\n t_over_n = 0\n\n for i in range(k):\n t_over_n += T[i] ** 2 / n[i]\n\n H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)\n p_value = 1 - stats.chi2.cdf(H, k - 1)\n chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)\n\n result_dict = {'H': H, 'p-value': p_value,\n 'T': T, 'sample_rank_df': sample_rank_df}\n flag = p_value < alpha\n\n result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======\n{rule_of_five_str}\n\nH statistic value (observed) = {H:.{precision}f}\nchi2 critical value = {chi2_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (Not all {k} population locations are the same) → {flag}\n '''\n print(result)\n return result_dict\n\n\ndef friedman_chi2_test(data=None, alpha=0.05, precision=4):\n \"\"\"\n col = 要比較的 target\n row = blocked data for each target\n \"\"\"\n if type(data) == np.ndarray:\n data = pd.DataFrame(data)\n\n new_df = data.apply(grank, axis=1)\n b, k = new_df.shape\n\n rule_of_five_str = \"\"\n if (b < 5 and k < 5):\n rule_of_five_str += f\"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)\"\n else:\n rule_of_five_str += f\"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)\"\n\n T = new_df.sum().to_numpy()\n\n F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1)\n p_value = 1 - stats.chi2.cdf(F_r, k - 1)\n chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)\n\n result_dict = {'F_r': F_r, 'p-value': p_value,\n 'T': T, 'sample_ranked_df': new_df}\n flag = p_value < alpha\n\n result = f'''======= Friedman Test with Chi-squared Test =======\n{rule_of_five_str}\n\nF_r statistic value (observed) = {F_r:.{precision}f}\nchi2 critical value = {chi2_stat:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (Not all {k} population locations are the same) → {flag}\n '''\n print(result)\n return result_dict\n\n\ndef pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4):\n \"\"\"\n a, b 還不能傳入東西\n Make sure that data is in the form of [a, b]\n \"\"\"\n cov_mat = np.cov(data.values, rowvar=False)\n cor_mat = np.corrcoef(data.values, rowvar=False)\n cov = cov_mat[0][1]\n cor = cor_mat[0][1]\n\n n = data.shape[0]\n d_of_f = n - 2\n t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f)\n t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5)\n\n flag = abs(t_stat) > t_c\n result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c}\n results = f\"\"\"======= Pearson Correlation Coefficient =======\nCovariance: {cov:.{precision}f}\nCoefficient of Correlation: {cor:.{precision}f}\n\nt (Critical Value) = {t_c:.{precision}f}\nt (Observed Value) = {t_stat:.{precision}f}\n\nReject H_0 (There are linear relationship between two variables) → {flag}\n\"\"\"\n\n print(results)\n\n return result_dict\n\n\ndef spearman_test(a=None, b=None, alpha=0.05, precision=4):\n spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b)\n # print(f'Correlation = {cor:.4f}, p-value={p_value:.4f}')\n n = len(a)\n\n rule_of_30_str = ''\n\n results = f\"\"\"======= Spearman Rank Correlation Coefficient =======\n[scipy.stats.spearmanr]\nCoefficient of Correlation: {spearman_restult_cor:.{precision}f}\np-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)})\n\"\"\"\n\n if (n < 30):\n rule_of_30_str += f\"!(n = {n} < 30)\"\n flag = spearman_restult_p_value < alpha\n results += f\"\"\"\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n result_dict = {'spearman_result': [\n spearman_restult_cor, spearman_restult_p_value]}\n else:\n rule_of_30_str += f\"(n = {n} >= 30)\"\n flag = spearman_restult_p_value < alpha\n results += f\"\"\"\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n z_stat = spearman_restult_cor * ((n - 1) ** 0.5)\n z_cv = stats.norm.ppf(1 - alpha/2)\n p_value = stats.norm.sf(z_stat) * 2\n if p_value > 1:\n p_value = stats.norm.cdf(z_stat) * 2\n flag = p_value < alpha\n results += f\"\"\"\n[z test statistic]\n{rule_of_30_str}\n\nr_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result)\nz stat (observed value) = {z_stat:.{precision}f}\nz (critical value) = {z_cv:.{precision}f}\np-value = {p_value:.{precision}f} ({inter_p_value(p_value)})\nReject H_0 (There are relationship between two variables) → {flag}\n \"\"\"\n\n result_dict = {'spearman_result': [\n spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value}\n\n print(results)\n\n return result_dict\n"
] | [
[
"numpy.sum",
"scipy.stats.norm.sf",
"scipy.stats.t.ppf",
"scipy.stats.chi2.ppf",
"scipy.stats.norm.ppf",
"pandas.DataFrame",
"scipy.stats.binom.cdf",
"numpy.abs",
"scipy.stats.norm.cdf",
"pandas.merge",
"numpy.isnan",
"scipy.stats.chi2.cdf",
"numpy.array",
"scipy.stats.spearmanr",
"numpy.cov",
"numpy.corrcoef"
]
] |
cosmoscope/qt-client | [
"c6cb59267c8be9149a95fb853a4f181d9092c86b"
] | [
"qt_client/components/plot_data_model.py"
] | [
"import numpy as np\nfrom PyQt5.QtCore import (QAbstractTableModel, QModelIndex, QObject, Qt,\n QVariant, pyqtProperty, pyqtSignal, pyqtSlot)\n\nfrom ..hub import Hub, Message\n\n\nclass PlotDataModel(QAbstractTableModel):\n # DataRole = Qt.UserRole + 1\n\n def __init__(self, *args, **kwargs):\n super(PlotDataModel, self).__init__(*args, **kwargs)\n\n self._data = list(zip(np.arange(100), np.random.sample(100)))\n\n # The data model needs to listen for add data events\n self._hub = Hub()\n # self._hub.subscribe(AddDataMessage, self.add_data, self)\n # self._hub.subscribe(AddPlotDataMessage, self.add_data, self)\n\n # def roleNames(self):\n # return {\n # self.DataRole: b'data'\n # }\n\n def rowCount(self, parent=None, *args, **kwargs):\n return len(self._data)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 2\n\n def data(self, index, role=None):\n return self._data[index.row()][index.column()]\n # if role == self.DataRole:\n # return self._data[index.row()]\n if role == Qt.DisplayRole:\n return self._data[index.row()][index.column()]\n elif role == Qt.EditRole:\n return self._data[index.row()][index.column()]\n\n return QVariant()\n"
] | [
[
"numpy.arange",
"numpy.random.sample"
]
] |
xXEminenTXx/ImageClassifier | [
"e0e63e12108b523270ea7d615afcbfc696b07996"
] | [
"predict_functions.py"
] | [
"# python imports\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nfrom sys import exit\n\n# File containing all of the functions used in the predict program\ndef load_checkpoint(filepath):\n\n checkpoint = torch.load(filepath)\n \n if checkpoint[\"arch\"] == 'VGG':\n model = models.vgg16(pretrained=True)\n \n elif checkpoint[\"arch\"] == 'Densenet':\n model = models.densenet121(pretrained=True)\n \n else:\n print(\"Unsupported arch used in checkpoint\")\n exit(1)\n\n for param in model.parameters():\n param.requires_grad = False\n\n model.class_to_idx = checkpoint['class_to_idx']\n\n # Load classifier from checkpoint\n classifier = checkpoint['classifier']\n\n model.classifier = classifier\n\n model.load_state_dict(checkpoint['model_state_dict'])\n\n return model\n\ndef process_image(image_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n\n # Process a PIL image for use in a PyTorch model\n\n pil_image = Image.open(image_path)\n\n # Resize\n if pil_image.size[0] > pil_image.size[1]:\n pil_image.thumbnail((5000, 256))\n else:\n pil_image.thumbnail((256, 5000))\n\n # Crop \n left_margin = (pil_image.width-224)/2\n bottom_margin = (pil_image.height-224)/2\n right_margin = left_margin + 224\n top_margin = bottom_margin + 224\n\n pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))\n\n # Normalize\n np_image = np.array(pil_image)/255\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n np_image = (np_image - mean) / std\n\n # PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array\n # Color channel needs to be first; retain the order of the other two dimensions.\n np_image = np_image.transpose((2, 0, 1))\n\n return np_image\n\ndef predict(image_path, model, topk, gpu):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n\n image = process_image(image_path)\n\n if gpu:\n model.to('cuda')\n image = torch.from_numpy(image).type(torch.cuda.FloatTensor)\n else:\n model.to('cpu')\n image = torch.from_numpy(image).type(torch.FloatTensor)\n\n # Returns a new tensor with a dimension of size one inserted at the specified position.\n image = image.unsqueeze(0)\n\n output = model.forward(image)\n\n probabilities = torch.exp(output)\n\n # Probabilities and the indices of those probabilities corresponding to the classes\n top_probabilities, top_indices = probabilities.topk(topk)\n\n # Convert to lists\n top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0] \n top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0] \n\n # Convert topk_indices to the actual class labels using class_to_idx\n # Invert the dictionary so you get a mapping from index to class.\n\n idx_to_class = {value: key for key, value in model.class_to_idx.items()}\n #print(idx_to_class)\n\n top_classes = [idx_to_class[index] for index in top_indices]\n\n return top_probabilities, top_classes\n"
] | [
[
"numpy.array",
"torch.from_numpy",
"torch.load",
"torch.exp"
]
] |
Gordonbuck/ml-oov-we | [
"ce28cd8b556a16125ba36cd41781a3e60bb26422"
] | [
"src/train.py"
] | [
"import higher\nfrom leap import Leap\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport gc\n\n\ndef train(model, source_corpus, char2idx, args, device):\n model = model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,\n threshold=args.threshold)\n best_valid_cosine = 1\n\n for epoch in np.arange(args.n_epochs):\n valid_cosine = []\n valid_ce = []\n\n model.train()\n for batch in np.arange(args.n_batch):\n train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,\n args.n_shot,\n char2idx, device,\n fixed=args.fixed_shot,\n return_inds=True)\n optimizer.zero_grad()\n\n if args.lang_model:\n pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)\n loss = nn.functional.cross_entropy(pred_ind, train_inds)\n loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()\n else:\n pred_emb = model.forward(train_contexts, train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()\n\n loss.backward()\n optimizer.step()\n\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,\n args.n_shot,\n char2idx, device,\n use_valid=True,\n fixed=args.fixed_shot,\n return_inds=True)\n if args.lang_model:\n pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)\n loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()\n valid_ce += [loss.cpu().numpy()]\n else:\n pred_emb = model.forward(valid_contexts, valid_vocabs)\n\n loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()\n valid_cosine += [loss.cpu().numpy()]\n\n avg_valid = np.average(valid_cosine)\n lr_scheduler.step(avg_valid)\n\n if args.lang_model:\n avg_ce = np.average(valid_ce)\n print(f\"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}\")\n else:\n print(f\"Average cosine loss: {avg_valid}\")\n\n if avg_valid < best_valid_cosine:\n best_valid_cosine = avg_valid\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))\n\n if optimizer.param_groups[0]['lr'] < args.lr_early_stop:\n print('LR early stop')\n break\n\n\ndef maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):\n model = model.to(device)\n meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n best_score = 3\n\n for meta_epoch in np.arange(args.n_meta_epochs):\n gc.collect()\n source_valid_cosine = []\n target_valid_cosine = []\n\n model.train()\n with torch.backends.cudnn.flags(benchmark=True):\n for meta_batch in np.arange(args.n_meta_batch):\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)\n meta_optimizer.zero_grad()\n\n with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):\n for inner_batch in np.arange(args.n_inner_batch):\n source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)\n pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()\n diffopt.step(loss)\n\n target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()\n loss.backward()\n\n meta_optimizer.step()\n\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)\n pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()\n source_valid_cosine += [loss.cpu().numpy()]\n\n target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()\n target_valid_cosine += [loss.cpu().numpy()]\n\n avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)\n score = avg_target_valid\n lr_scheduler.step(score)\n print(f\"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}\")\n\n if score < best_score:\n best_score = score\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))\n\n if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:\n print('LR early stop')\n break\n\n\ndef leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):\n model = model.to(device)\n leap = Leap(model)\n meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n best_score = 3\n\n for meta_epoch in np.arange(args.n_meta_epochs):\n source_valid_cosine = []\n target_valid_cosine = []\n\n model.train()\n for meta_batch in np.arange(args.n_meta_batch):\n meta_optimizer.zero_grad()\n\n leap.init_task()\n leap.to(model)\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)\n for inner_batch in np.arange(args.n_task_steps):\n inner_optimizer.zero_grad()\n source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)\n pred_emb = model.forward(source_train_contexts, source_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()\n loss.backward()\n leap.update(loss, model)\n inner_optimizer.step()\n\n leap.init_task()\n leap.to(model)\n inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)\n for inner_batch in np.arange(args.n_task_steps):\n inner_optimizer.zero_grad()\n target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_train_contexts, target_train_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()\n loss.backward()\n leap.update(loss, model)\n inner_optimizer.step()\n\n leap.normalize()\n meta_optimizer.step()\n\n leap.to(model)\n model.eval()\n with torch.no_grad():\n for batch in np.arange(args.n_batch):\n source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)\n pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()\n source_valid_cosine += [loss.cpu().numpy()]\n\n target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(\n args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,\n repeat_ctxs=args.meta_repeat_ctxs)\n pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)\n loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()\n target_valid_cosine += [loss.cpu().numpy()]\n\n avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)\n score = avg_target_valid\n lr_scheduler.step(score)\n print(f\"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}\")\n\n if score < best_score:\n best_score = score\n torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))\n\n if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:\n print('LR early stop')\n break\n"
] | [
[
"torch.backends.cudnn.flags",
"torch.no_grad",
"numpy.arange",
"torch.nn.functional.cross_entropy",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.average",
"torch.nn.functional.cosine_similarity"
]
] |
krisbuote/Reinforcement-Learning-Trader | [
"ae8c3af0856a480c88546c2a7e478a735585e0af"
] | [
"Reinforcement-Learning-Trader/Agent.py"
] | [
"import keras\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, LSTM, Dropout\nfrom keras.optimizers import Adam\n\nimport numpy as np\nimport random\nfrom collections import deque\n\nclass Agent:\n def __init__(self, state_size, is_eval=False, model_name=\"\"):\n self.state_size = state_size # normalized previous days\n self.action_size = 2 # buy, sell\n self.memory = deque(maxlen=1000)\n self.inventory = []\n self.net_worth = []\n self.model_name = model_name\n self.is_eval = is_eval\n\n self.gamma = 0.95\n self.epsilon = 1.0\n self.epsilon_min = 0.08\n self.epsilon_decay = 0.995\n\n self.model = load_model(\"models/\" + model_name) if is_eval else self._model()\n\n def _model(self):\n model = Sequential()\n model.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n model.add(Dense(units=32, activation=\"relu\"))\n model.add(Dense(units=8, activation=\"relu\"))\n model.add(Dense(self.action_size, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(lr=0.001))\n\n return model\n\n def act(self, state):\n if not self.is_eval and random.random() <= self.epsilon:\n return random.randrange(self.action_size)\n\n options = self.model.predict(state)\n return np.argmax(options[0])\n\n def expReplay(self, batch_size):\n mini_batch = []\n l = len(self.memory)\n for i in range(l - batch_size + 1, l):\n mini_batch.append(self.memory[i])\n\n for state, action, reward, next_state, done in mini_batch:\n target = reward\n if not done:\n target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])\n\n target_f = self.model.predict(state)\n target_f[0][action] = target\n self.model.fit(state, target_f, epochs=1, verbose=0)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n"
] | [
[
"numpy.argmax"
]
] |
mussard/share_data_benchmark | [
"c02bfa4017b9008800cabe47d7c7959f82c26060"
] | [
"MRPT/vdz/atoms/V_0/mrpt.py"
] | [
"import json\nfrom pyscf import gto,scf,mcscf, fci, lo, ci, cc\nfrom pyscf.scf import ROHF, UHF,ROKS\nimport numpy as np\nimport pandas as pd\n\n# THIS IS WERE IT STARTS ====================================\n\ndf=json.load(open(\"../../../trail.json\"))\n\nspins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}\n\nnd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}\n\ncas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}\n\ndatacsv={}\nfor nm in ['atom','charge','method','basis','pseudopotential',\n 'totalenergy','totalenergy-stocherr','totalenergy-syserr']:\n datacsv[nm]=[]\n\nbasis='vdz'\nel='V'\ncharge=0\n\nmol=gto.Mole()\nmol.ecp={}\nmol.basis={}\nmol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])\nmol.basis[el]=gto.basis.parse(df[el][basis])\nmol.charge=charge\nif el == 'Cr' or el == 'Cu':\n mol.spin=spins[el]-charge\nelse:\n mol.spin=spins[el]+charge\nmol.build(atom=\"%s 0. 0. 0.\"%el,verbose=4)\n\nm=ROHF(mol)\nm.level_shift=1000.0\ndm=m.from_chk(\"../../../../HF/atoms/\"+el+basis+str(charge)+\".chk\")\nhf=m.kernel(dm)\nm.analyze()\n\nfrom pyscf.shciscf import shci\nmc = shci.SHCISCF(m, 6, cas[el]-charge)\n#mc.fcisolver.conv_tol = 1e-14\nmc.fcisolver.mpiprefix=\"srun -n20\"\nmc.fcisolver.num_thrds=12\nmc.verbose = 4\ncas=mc.kernel()[0]\n \nfrom pyscf.icmpspt import icmpspt\npt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\\\n pttype=\"MRLCC\",\\\n third_order=True,\\\n fully_ic=True,\\\n do_dm4=True)\n\ndatacsv['atom'].append(el)\ndatacsv['charge'].append(charge)\ndatacsv['method'].append('MRPT')\ndatacsv['basis'].append(basis)\ndatacsv['pseudopotential'].append('trail')\ndatacsv['totalenergy'].append(cas+pt)\ndatacsv['totalenergy-stocherr'].append(0.0)\ndatacsv['totalenergy-syserr'].append(0.0)\npd.DataFrame(datacsv).to_csv(el+\".csv\",index=False)\n\n"
] | [
[
"pandas.DataFrame"
]
] |
PariseC/osm2rail | [
"dfc373aedba4a82fd144192cb6a855e8a11b0601"
] | [
"osm2rail/plotter.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection,PolyCollection\n\ndef showNetwork(network,savefig=None):\n node_x_coords=[]\n node_y_coords=[]\n link_coords=[]\n poi_coords=[]\n\n for _,node in network.node_dict.items():\n node_x_coords.append(node.x_coord)\n node_y_coords.append(node.y_coord)\n\n for _,link in network.link_dict.items():\n coords = list(link.geometry.coords)\n link_coords.append(np.array(coords))\n\n if len(network.POI_list):\n for poi in network.POI_list:\n coords = list(poi.geometry.exterior.coords)\n poi_coords.append(np.array(coords))\n\n fig, ax = plt.subplots(figsize=(12, 8))\n # plot network nodes\n ax.scatter(node_x_coords, node_y_coords, marker='o', c='red', s=10, zorder=1)\n # plot network links\n ax.add_collection(LineCollection(link_coords, colors='orange', linewidths=1, zorder=2))\n # plot network pois\n if len(poi_coords):\n coll = PolyCollection(poi_coords, alpha=0.7, zorder=0)\n ax.add_collection(coll)\n # set axis\n ax.autoscale_view()\n plt.xlabel('x_coord')\n plt.ylabel('y_coord')\n plt.tight_layout()\n # show fig\n plt.show()\n # save fig\n if savefig:\n try:\n figname = savefig['filename'] if 'filename' in savefig.keys() else 'network.png'\n dpi = savefig['dpi'] if 'dpi' in savefig else 300\n fig.savefig(figname, dpi=dpi, bbox_inches='tight')\n except Exception as e:\n print(e)"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.collections.PolyCollection",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.xlabel"
]
] |
mikee385/fbsrankings | [
"2b50e26a302b53c21cd8f5c965943d6fbf0680a1"
] | [
"src/fbsrankings/domain/service/srs_ranking_service.py"
] | [
"from typing import Dict\nfrom typing import List\n\nimport numpy\n\nfrom fbsrankings.domain.model.affiliation import Subdivision\nfrom fbsrankings.domain.model.game import Game\nfrom fbsrankings.domain.model.game import GameStatus\nfrom fbsrankings.domain.model.ranking import Ranking\nfrom fbsrankings.domain.model.ranking import SeasonData\nfrom fbsrankings.domain.model.ranking import TeamRankingRepository\nfrom fbsrankings.domain.model.ranking import TeamRankingService\nfrom fbsrankings.domain.model.team import TeamID\n\n\nclass TeamData:\n def __init__(self, index: int) -> None:\n self.index = index\n self.game_total = 0\n self.point_margin = 0\n\n def add_game(self, point_margin: int) -> None:\n self.game_total += 1\n self.point_margin += point_margin\n\n\nclass SRSRankingService(TeamRankingService):\n name: str = \"SRS\"\n\n def __init__(self, repository: TeamRankingRepository) -> None:\n self._repository = repository\n\n def calculate_for_season(self, season_data: SeasonData) -> List[Ranking[TeamID]]:\n team_data: Dict[TeamID, TeamData] = {}\n for affiliation in season_data.affiliation_map.values():\n if affiliation.subdivision == Subdivision.FBS:\n team_data[affiliation.team_id] = TeamData(len(team_data))\n\n season_is_complete = True\n games_by_week: Dict[int, List[Game]] = {}\n for game in season_data.game_map.values():\n winning_data = None\n if game.winning_team_id is not None:\n winning_data = team_data.get(game.winning_team_id)\n\n losing_data = None\n if game.losing_team_id is not None:\n losing_data = team_data.get(game.losing_team_id)\n\n if winning_data is not None and losing_data is not None:\n week_games = games_by_week.setdefault(game.week, [])\n week_games.append(game)\n\n elif game.status == GameStatus.SCHEDULED:\n season_is_complete = False\n\n n = len(team_data)\n a = numpy.zeros((n + 1, n))\n b = numpy.zeros(n + 1)\n\n rankings = []\n for week in sorted(games_by_week.keys()):\n for game in games_by_week[week]:\n if (\n game.home_team_score is not None\n and game.away_team_score is not None\n ):\n home_data = team_data[game.home_team_id]\n away_data = team_data[game.away_team_id]\n\n home_margin = self._adjust_margin(\n game.home_team_score - game.away_team_score,\n )\n home_data.add_game(home_margin)\n away_data.add_game(-home_margin)\n\n a[home_data.index, away_data.index] -= 1.0\n a[away_data.index, home_data.index] -= 1.0\n\n for data in team_data.values():\n a[data.index, data.index] = data.game_total\n b[data.index] = data.point_margin\n a[n, data.index] = 1.0\n b[n] = 0.0\n\n x = numpy.linalg.lstsq(a, b, rcond=-1)[0]\n\n result = {id_: x[data.index] for id_, data in team_data.items()}\n ranking_values = TeamRankingService._to_values(season_data, result)\n\n rankings.append(\n self._repository.create(\n SRSRankingService.name,\n season_data.season.id_,\n week,\n ranking_values,\n ),\n )\n\n if season_is_complete:\n rankings.append(\n self._repository.create(\n SRSRankingService.name,\n season_data.season.id_,\n None,\n ranking_values,\n ),\n )\n\n return rankings\n\n @staticmethod\n def _adjust_margin(margin: int) -> int:\n if margin > 24:\n return 24\n if margin < -24:\n return -24\n if 0 < margin < 7:\n return 7\n if 0 > margin > -7:\n return -7\n return margin\n"
] | [
[
"numpy.linalg.lstsq",
"numpy.zeros"
]
] |
jamaps/tracc | [
"0f71b07b6560ed2f5a9a9f6f94a07e487af254c5"
] | [
"build/lib/tracc/tracc.py"
] | [
"import tracc\nimport pandas as pd\nimport numpy as np\n\n\nclass costs:\n\n def __init__(self,\n travelcosts_df,\n columns = None\n ):\n\n \"\"\"\n Inputs data and prunes columns if desired\n \"\"\"\n\n if columns is not None:\n self.data = travelcosts_df[columns]\n\n else:\n self.data = travelcosts_df\n\n\n def intrazonal(self,\n cost_column,\n origin_column,\n destination_column,\n method = \"constant\",\n value = 0,\n polygon_file = None,\n polygon_id = None\n ):\n \"\"\"\n Computes and updates intrazonal travel cost in a travel costs matrix. The output will include a travel cost between any origin or destination location in the matrix to itself.\n\n Parameters\n ----------\n cost_column : column name for travel costs\n\n origin_column : column name for origin IDs\n\n destinationn_column : column name for origin IDs\n\n method : \"constant\" applies a single @value to all intrazonal travel costs. \"radius\" applies a cost which is proportional to the radius of a circle with the same area as its input polygon\n\n value : parameters for the method\n\n polygon_file : file path to an input spatial polygon (e.g. geojson) if needed (it is for method = \"radius\")\n\n polygon_id : ID field for the polygon_file needed for joining to the cost matrix\n \"\"\"\n\n # making sure ID columns are strings for a merge later on\n self.data[origin_column] = self.data[origin_column].astype(str)\n self.data[destination_column] = self.data[destination_column].astype(str)\n\n # getting set of unique locations in the dataset\n locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())\n locations = list(set(locations))\n\n if method == \"constant\":\n\n new_times = [value] * len(locations)\n\n df = pd.DataFrame(\n list(zip(locations, locations, new_times)),\n columns =[origin_column, destination_column, cost_column + \"_i\"])\n\n elif method == \"radius\":\n\n from tracc.spatial import radius\n\n # compute based on the equivilant radius of each polygon\n df = radius(polygon_file,polygon_id)\n df[origin_column] = df[polygon_id]\n df[destination_column] = df[polygon_id]\n del df[polygon_id]\n df[cost_column + \"_i\"] = value * df[\"radius\"]\n del df[\"radius\"]\n\n else:\n raise Exception(\"Method can only be 'constant' or 'radius'\")\n\n df[origin_column] = df[origin_column].astype(str)\n df[destination_column] = df[destination_column].astype(str)\n\n # join in the newly created intrazonal travel times\n self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])\n\n # replace the older intrazonal travel times\n self.data[cost_column] = np.where((self.data[cost_column + \"_i\"] >= 0),self.data[cost_column + \"_i\"],self.data[cost_column])\n\n del self.data[cost_column + \"_i\"]\n\n\n\n\n def fill_missing_costs(\n self,\n cost_column,\n origin_column,\n destination_column,\n spatial_file_path,\n spatial_file_id,\n where = \"origin\",\n weight_type = \"Queen\"\n ):\n \"\"\"\n Completes an OD matrix by filling locations that were missing from the original matrix, based on a neighbourhood spatial weights matrix. For example if a origin zone has no travel costs, it presumes its travel costs to destinations are the average of the same costs of its neighbouring zones.\n \"\"\"\n\n from tracc.spatial import area\n\n # get list of zones which are missing from the input costs table\n dfz = area(spatial_file_path, spatial_file_id)\n dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)\n self.data[origin_column] = self.data[origin_column].astype(str)\n li1 = list(self.data[origin_column].unique())\n li2 = list(dfz[spatial_file_id].unique())\n missing = [x for x in li2 if x not in li1]\n del li1,li2\n\n if len(missing) == 0:\n return None\n\n if where == \"origin\":\n\n # get neighbours for each missing zone\n from tracc.spatial import get_neighbours\n neighbours = get_neighbours(spatial_file_path, \"Queen\", spatial_file_id)\n\n new_times = []\n\n # for each zone, compute average travel times to other zones based on neighbours\n for location in missing:\n\n locneigh = neighbours[location]\n\n temp = self.data[self.data[origin_column].isin(locneigh)]\n\n temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())\n\n temp[origin_column] = location\n\n new_times.append(temp)\n\n # combine the outputs, and concat to the input times\n new_times = pd.concat(new_times)\n self.data = pd.concat([self.data, new_times])\n\n elif where == \"destination\":\n\n # get neighbours for each missing zone\n from tracc.spatial import get_neighbours\n neighbours = get_neighbours(spatial_file_path, \"Queen\", spatial_file_id)\n\n new_times = []\n\n # for each zone, compute average travel times from other zones based on neighbours\n for location in missing:\n\n locneigh = neighbours[location]\n\n temp = self.data[self.data[destination_column].isin(locneigh)]\n\n temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())\n\n temp[destination_column] = location\n\n new_times.append(temp)\n\n # combine the outputs, and concat to the input times\n new_times = pd.concat(new_times)\n self.data = pd.concat([self.data, new_times])\n\n else:\n\n raise Exception(\"Input paramater @where should either be 'origin' or 'destination'\")\n\n\n\n\n def generalized_cost(\n self,\n columns,\n coefficients,\n exponents = None,\n prune_output = True,\n output_cost_name = \"GC\"\n ):\n\n \"\"\"\n Computes generalized costs\n \"\"\"\n\n # need to add a column check warning, and make the intercept = 0 if none is provided\n\n # set all exponents as 1 if none are inputted\n if exponents is None:\n exponents = [1] * len(columns)\n\n # compute the generalized cost value\n self.data[output_cost_name] = coefficients[len(coefficients) - 1]\n i = 0\n while i < len(columns):\n self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]\n i += 1\n\n # delete initital cost columns if desired\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n def impedence_calc(\n self,\n cost_column,\n impedence_func,\n impedence_func_params,\n prune_output = False,\n output_col_name = \"fCij\"\n ):\n\n \"\"\"\n Measures impdence given input of travel cost and selected impedence funciton and parameters\n\n # To Do: add in more impdence function options\n \"\"\"\n\n if impedence_func == \"cumulative\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))\n\n elif impedence_func == \"linear\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))\n\n elif impedence_func == \"exponential\":\n self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))\n\n else:\n raise Exception(\"Please select an appropriate decay function\")\n\n if prune_output is True:\n del self.data[cost_column]\n\n\n def impedence_combine(self,\n columns,\n how = \"product\",\n output_col_name = \"fCij\",\n prune_output = True\n ):\n\n \"\"\"\n If there are multiple impedences, and we want to combine them into a single impedence value. This is similar to genearlized cost.\n\n For example, if we have an impedence value for transit travel time, and we also want to remove any trips based on a fare criteria, it can be applied in this way.\n \"\"\"\n\n if how == \"product\":\n self.data[output_col_name] = 1\n i = 0\n while i < len(columns):\n self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]\n i += 1\n\n elif how == \"sum\":\n self.data[output_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]\n i += 1\n\n else:\n raise Exception('the input @how must be one of \"product\" or \"sum\"')\n\n\n\n def max_impedence(self,\n columns,\n imp_col_name = \"fCij\"\n ):\n \"\"\"\n Reduces the cost table to only include rows with the maximum impedence value for the set of input columns.\n\n For example, if there 3 transit trips from i to j, each with a different computed generalized_cost resulting from different route choices, this function will return the row with the one resulting in the greatest impedence value (i.e. lowest generalized cost)\n \"\"\"\n\n self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()\n\n\n\nclass supply:\n\n def __init__(self,\n supply_df,\n columns = None\n ):\n \"\"\"\n intitializing can include pruning the dataset to a list of @column names\n \"\"\"\n\n if columns is not None:\n self.data = supply_df[columns]\n\n else:\n self.data = supply_df\n\n\n\n def weight(self,\n columns,\n weights,\n weight_col_name = \"Oj\",\n prune_output = True\n ):\n \"\"\"\n Creating a value based on a weighted linear combination other values. Can be used to weight by destinations by their desirability.\n\n Parameters\n ----------------\n columns : columns in which to input into the weights function\n\n weights : linear multipliers, the same length as the weights\n\n weight_col_name : output column name\n\n prune_output : if True, delete all input columns used in the weight function\n \"\"\"\n\n if len(columns) != len(weights):\n raise Exception(\"Please make sure columns and weights are lists of the same length\")\n\n if len(columns) < 2:\n raise Exception(\"Can only weight opportunities if 2 or more are inputted\")\n\n if sum(weights) < 0.999 or sum(weights) > 1.001:\n print(\"WARNING: the inputted weights do not sum to 1.\")\n\n\n self.data[weight_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]\n i += 1\n\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n\nclass demand:\n\n def __init__(self,\n demand_df,\n columns = None\n ):\n \"\"\"\n intitializing can include pruning the dataset to a list of @column names\n \"\"\"\n\n if columns is not None:\n self.data = demand_df[columns]\n\n else:\n self.data = demand_df\n\n\n def weight(self,\n columns,\n weights,\n weight_col_name = \"Pi\",\n prune_output = True\n ):\n \"\"\"\n Creating a value based on a weighted linear combination other values. Can be used to weight by population groups by their propensity to travel to certain activity types.\n\n Parameters\n ----------------\n columns : columns in which to input into the weights function\n\n weights : linear multipliers, the same length as the weights\n\n weight_col_name : output column name\n\n prune_output : if True, delete all input columns used in the weight function\n \"\"\"\n\n if len(columns) != len(weights):\n raise Exception(\"Please make sure columns and weights are lists of the same length\")\n\n if len(columns) < 2:\n raise Exception(\"Can only weight opportunities if 2 or more are inputted\")\n\n if sum(weights) < 0.999 or sum(weights) > 1.001:\n print(\"WARNING: the inputted weights do not sum to 1.\")\n\n self.data[weight_col_name] = 0\n i = 0\n while i < len(columns):\n self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]\n i += 1\n\n if prune_output is True:\n for col in list(set(columns)):\n del self.data[col]\n\n\n\nclass accessibility:\n\n def __init__(self,\n travelcosts_df,\n supply_df,\n demand_df = None,\n travelcosts_ids = [\"origin_id\",\"destination_id\"],\n supply_ids = \"destination_id\",\n demand_ids = None\n ):\n \"\"\"\n Parameters\n ----------\n travelcosts_df : a pandas dataframe containing travel costs from a set of locations (e.g. orignis) to another set of locations (e.g. destinations). Data should be in a long table format:\n\n origin_id | destination_id | travel_cost_1 | travel_cost_2 (optional) | etc (optional)\n\n supply_df : a pandas dataframe containing the number of opportunities (e.g. supply), relational to the destination IDs in travelcosts_df\n\n demand_df : a pandas dataframe containing the number of agents competiting for opportunities (e.g. demand), relational to the origin IDs in travelcosts_df. This is optional since several accessibility measures do not account for demand\n\n travelcosts_ids : a two item list of the column names for the origin and destination IDs in the travelcosts_df table\n\n supply_ids : a single variable string for the destination ID in the supply_df table\n\n demand_ids : a single variable string for the origin ID in the demand_df table. This is optional since several accessibility measures do not account for demand\n\n \"\"\"\n\n self.travelcosts_ids = travelcosts_ids\n self.supply_ids = supply_ids\n self.demand_ids = demand_ids\n\n if demand_df is None and supply_df is None:\n raise Exception(\"Please input a supply_df or a demand_df\")\n\n # setting ID columns to strings to aid merging\n travelcosts_df[travelcosts_ids[0]] = travelcosts_df[travelcosts_ids[0]].astype(str)\n travelcosts_df[travelcosts_ids[1]] = travelcosts_df[travelcosts_ids[1]].astype(str)\n\n # join supply data to the travel costs\n if supply_df is not None and demand_df is None:\n supply_df[supply_ids] = supply_df[supply_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n supply_df,\n left_on=travelcosts_ids[1],\n right_on=self.supply_ids,\n how = 'left'\n )\n\n # join demand data as well, if inputted\n elif demand_df is not None and supply_df is None:\n demand_df[demand_ids] = demand_df[demand_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n demand_df,\n left_on=travelcosts_ids[0],\n right_on=self.demand_ids,\n how = 'left'\n )\n\n else:\n supply_df[supply_ids] = supply_df[supply_ids].astype(str)\n demand_df[demand_ids] = demand_df[demand_ids].astype(str)\n self.data = pd.merge(\n travelcosts_df,\n supply_df,\n left_on=travelcosts_ids[1],\n right_on=self.supply_ids,\n how = 'left'\n )\n self.data = pd.merge(\n self.data,\n demand_df,\n left_on=travelcosts_ids[0],\n right_on=self.demand_ids,\n how = 'left'\n )\n\n\n def potential(self, opportunity, impedence, output_col_name = None):\n \"\"\"\n Measures potential accessibility to destinations\n\n Parameters\n ----------\n opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe\n\n impedence : column from the travel costs object to weight opportunities by\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n Output\n ----------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.\n\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_\" + opportunity + \"_\" + impedence\n else:\n A_col_name = output_col_name\n\n # multiply the opportunity by the impedence\n self.data[A_col_name] = self.data[opportunity] * self.data[impedence]\n\n # sum by the origin locations\n Ai = self.data.groupby(self.travelcosts_ids[0])[[A_col_name]].sum().reset_index()\n\n del self.data[A_col_name]\n\n return Ai\n\n\n\n\n def passive(self, population, impedence, output_col_name = None):\n\n \"\"\"\n Measures passive accessibility to destinations\n\n Parameters\n ----------\n population : a string indicating the column name for which population we are measuring access to (e.g. overall population, employed population, etc.). This column should be in the demand_df dataframe\n\n impedence : column from the travel costs object to weight opportunities by\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n Output\n ----------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.\n\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_\" + population + \"_\" + impedence\n else:\n A_col_name = output_col_name\n\n # multiply the opportunity by the impedence\n self.data[A_col_name] = self.data[population] * self.data[impedence]\n\n # sum by the origin locations\n Ai = self.data.groupby(self.travelcosts_ids[1])[[A_col_name]].sum().reset_index()\n\n del self.data[A_col_name]\n\n return Ai\n\n\n\n\n def mintravelcost(self, travelcost, opportunity, min_n, output_col_name = None):\n \"\"\"\n Parameters\n ----------\n opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe\n\n travelcost : a string indicating the column name for which travel cost shall be used (e.g. travel time, monetary cost, etc.). This column should be in the travelcosts_df dataframe\n\n min_n : an int indicating the number of desired reachable opportunities (e.g. 1 library, 3 grocery stores, 10k jobs, etc.)\n\n output_col_name : a string for the column name of the output accessibility measure\n\n\n\n Output\n ---------\n A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column are the accessibility measures based on the input parameters.\n \"\"\"\n\n # set the output name for the accessibility measure\n if output_col_name is None:\n A_col_name = \"A_mintravelcost_\" + str(travelcost) + \"_\" + str(opportunity) + \"_\" + str(min_n)\n else:\n A_col_name = output_col_name\n\n # internal function of returning the min travel time for n opportunities\n def get_min(df, tc, o, n):\n df = df.sort_values(by=[tc], ascending=True)\n df[\"cumsum\"] = df[o].cumsum()\n df = df[df[\"cumsum\"] >= n]\n return df[travelcost].min()\n\n # generating the accessibility measure\n out = pd.DataFrame(self.data.groupby(self.travelcosts_ids[0]).apply(get_min, tc = travelcost, o = opportunity, n = min_n))\n\n # setting the column name of the output\n out.columns = [A_col_name]\n\n return out\n\n\n\nclass summary:\n \"\"\"\n Computing various summary statistics of accessibility, usually with respect to different population groups\n\n Some of these can be used to assess distributions and equity of transport networks.\n \"\"\"\n\n def __init__(\n self,\n accessibility_df,\n summary_vars,\n accessibility_id = \"id\",\n summary_vars_id = \"id\"\n ):\n\n # join the data\n self.data = pd.merge(\n accessibility_df,\n summary_vars,\n left_on=accessibility_id,\n right_on=summary_vars_id,\n how = 'left'\n )\n\n def weighted_mean(self, access_var, group_var):\n\n return tracc.statistics.weighted_mean(self.data, access_var, group_var)\n\n def weighted_var(self, access_var, group_var):\n\n return tracc.statistics.weighted_var(self.data, access_var, group_var)\n\n def weighted_sd(self, access_var, group_var):\n\n return tracc.statistics.weighted_sd(self.data, access_var, group_var)\n\n def weighted_CV(self, access_var, group_var):\n\n return tracc.statistics.weighted_CV(self.data, access_var, group_var)\n\n def weighted_Gini(self, access_var, group_var):\n\n return tracc.statistics.weighted_Gini(self.data, access_var, group_var)\n\n def quantiles(self, access_var, group_vars, nbins = 10, result = \"percent\"):\n\n # assign each observation a bin, based on nbins\n dfq = pd.DataFrame( tracc.statistics.weighted_qcut(self.data[access_var], self.data[group_vars[0]], nbins))\n\n # create a specific name for the quantile column\n q_col_name = 'q' + str(nbins) + \"_\" + (group_vars[0])\n dfq.columns = [q_col_name]\n self.data = self.data.join(dfq, how='outer')\n\n # group by each bin, susmmarize\n dfq = self.data.groupby([q_col_name])[group_vars].sum()\n\n # return as counts or percent\n if result == \"count\":\n return dfq\n elif result == \"percent\":\n for var in group_vars:\n dfq[var] = dfq[var] / dfq[var].sum()\n return dfq\n"
] | [
[
"numpy.where",
"pandas.merge",
"pandas.concat"
]
] |
ShashankBice/pygeotools | [
"5bc74f96cf79f3089572cab7e4f3632ca36b22bc"
] | [
"pygeotools/lib/iolib.py"
] | [
"#! /usr/bin/env python\n\"\"\"\nFunctions for IO, mostly wrapped around GDAL\n\nNote: This was all written before RasterIO existed, which might be a better choice. \n\"\"\"\n\nimport os\nimport subprocess\n\nimport numpy as np\nfrom osgeo import gdal, gdal_array, osr\n\n#Define drivers\nmem_drv = gdal.GetDriverByName('MEM')\ngtif_drv = gdal.GetDriverByName('GTiff')\nvrt_drv = gdal.GetDriverByName(\"VRT\")\n\n#Default GDAL creation options\ngdal_opt = ['COMPRESS=LZW', 'TILED=YES', 'BIGTIFF=IF_SAFER']\n#gdal_opt += ['BLOCKXSIZE=1024', 'BLOCKYSIZE=1024']\n#List that can be used for building commands\ngdal_opt_co = []\n[gdal_opt_co.extend(('-co', i)) for i in gdal_opt]\n\n#Add methods to load ma from OpenCV, PIL, etc.\n#These formats should be directly readable as np arrays\n\n#Note: want to modify to import all bands as separate arrays in ndarray\n#Unless the user requests a single band, or range of bands\n\n#Check for file existence\ndef fn_check(fn):\n \"\"\"Wrapper to check for file existence\n \n Parameters\n ----------\n fn : str\n Input filename string.\n \n Returns\n -------\n bool\n True if file exists, False otherwise.\n \"\"\"\n return os.path.exists(fn)\n\ndef fn_check_full(fn):\n \"\"\"Check for file existence\n\n Avoids race condition, but slower than os.path.exists.\n \n Parameters\n ----------\n fn : str\n Input filename string.\n \n Returns\n -------\n status \n True if file exists, False otherwise.\n \"\"\"\n status = True \n if not os.path.isfile(fn): \n status = False\n else:\n try: \n open(fn) \n except IOError:\n status = False\n return status\n\ndef fn_list_check(fn_list):\n status = True\n for fn in fn_list:\n if not fn_check(fn):\n print('Unable to find: %s' % fn)\n status = False\n return status\n\ndef fn_list_valid(fn_list):\n print('%i input fn' % len(fn_list))\n out_list = []\n for fn in fn_list:\n if not fn_check(fn):\n print('Unable to find: %s' % fn)\n else:\n out_list.append(fn)\n print('%i output fn' % len(out_list))\n return out_list \n\n#Wrapper around gdal.Open\ndef fn_getds(fn):\n \"\"\"Wrapper around gdal.Open()\n \"\"\"\n ds = None\n if fn_check(fn):\n ds = gdal.Open(fn, gdal.GA_ReadOnly)\n else:\n print(\"Unable to find %s\" % fn)\n return ds\n\ndef fn_getma(fn, bnum=1):\n \"\"\"Get masked array from input filename\n\n Parameters\n ----------\n fn : str\n Input filename string\n bnum : int, optional\n Band number\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n #Add check for filename existence\n ds = fn_getds(fn)\n return ds_getma(ds, bnum=bnum)\n\n#Given input dataset, return a masked array for the input band\ndef ds_getma(ds, bnum=1):\n \"\"\"Get masked array from input GDAL Dataset\n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n bnum : int, optional\n Band number\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n b = ds.GetRasterBand(bnum)\n return b_getma(b)\n\n#Given input band, return a masked array\ndef b_getma(b):\n \"\"\"Get masked array from input GDAL Band\n\n Parameters\n ----------\n b : gdal.Band \n Input GDAL Band \n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n b_ndv = get_ndv_b(b)\n #bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)\n #This is more appropriate for float, handles precision issues\n bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)\n return bma\n\ndef get_sub_dim(src_ds, scale=None, maxdim=1024):\n \"\"\"Compute dimensions of subsampled dataset \n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n scale : int, optional\n Scaling factor\n maxdim : int, optional \n Maximum dimension along either axis, in pixels\n \n Returns\n -------\n ns\n Numper of samples in subsampled output\n nl\n Numper of lines in subsampled output\n scale \n Final scaling factor\n \"\"\"\n ns = src_ds.RasterXSize\n nl = src_ds.RasterYSize\n maxdim = float(maxdim)\n if scale is None:\n scale_ns = ns/maxdim\n scale_nl = nl/maxdim\n scale = max(scale_ns, scale_nl)\n #Need to check to make sure scale is positive real \n if scale > 1:\n ns = int(round(ns/scale))\n nl = int(round(nl/scale))\n return ns, nl, scale\n\ndef fn_getma_sub(fn, bnum=1, scale=None, maxdim=1024., return_ds=False): \n ds = gdal.Open(fn)\n return ds_getma_sub(ds, bnum=bnum, scale=scale, maxdim=maxdim, return_ds=return_ds)\n\n#Load a subsampled array\n#Can specify scale factor or max dimension\n#No need to load the entire dataset for stats computation\ndef ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False): \n \"\"\"Load a subsampled array, rather than full resolution\n\n This is useful when working with large rasters\n\n Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.\n\n Parameters\n ----------\n ds : gdal.Dataset \n Input GDAL Datset\n bnum : int, optional\n Band number\n scale : int, optional\n Scaling factor\n maxdim : int, optional \n Maximum dimension along either axis, in pixels\n \n Returns\n -------\n np.ma.array \n Masked array containing raster values\n \"\"\"\n #print src_ds.GetFileList()[0]\n b = src_ds.GetRasterBand(bnum)\n b_ndv = get_ndv_b(b)\n ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)\n #The buf_size parameters determine the final array dimensions\n b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)\n bma = np.ma.masked_values(b_array, b_ndv)\n out = bma\n if return_ds:\n dtype = src_ds.GetRasterBand(1).DataType\n src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)\n gt = np.array(src_ds.GetGeoTransform())\n gt[[1,5]] = gt[[1,5]]*scale\n src_ds_sub.SetGeoTransform(list(gt))\n src_ds_sub.SetProjection(src_ds.GetProjection())\n b = src_ds_sub.GetRasterBand(1)\n b.WriteArray(bma)\n b.SetNoDataValue(b_ndv)\n out = (bma, src_ds_sub)\n return out\n\n#Note: need to consolidate with warplib.writeout (takes ds, not ma)\n#Add option to build overviews when writing GTiff\n#Input proj must be WKT\ndef writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):\n \"\"\"Write input array to disk as GeoTiff\n\n Parameters\n ----------\n a : np.array or np.ma.array\n Input array\n dst_fn : str\n Output filename\n src_ds: GDAL Dataset, optional\n Source Dataset to use for creating copy\n bnum : int, optional \n Output band\n ndv : float, optional \n Output NoData Value\n gt : list, optional\n Output GeoTransform\n proj : str, optional\n Output Projection (OGC WKT or PROJ.4 format)\n create : bool, optional\n Create new dataset\n sparse : bool, optional\n Output should be created with sparse options\n \"\"\"\n #If input is not np.ma, this creates a new ma, which has default filL_value of 1E20\n #Must manually override with ndv\n #Also consumes a lot of memory\n #Should bypass if input is bool\n from pygeotools.lib.malib import checkma \n a = checkma(a, fix=False)\n #Want to preserve fill_value if already specified\n if ndv is not None:\n a.set_fill_value(ndv)\n driver = gtif_drv\n #Currently only support writing singleband rasters\n #if a.ndim > 2:\n # np_nbands = a.shape[2]\n # if src_ds.RasterCount np_nbands: \n # for bnum in np_nbands:\n nbands = 1\n np_dt = a.dtype.name\n if src_ds is not None:\n #If this is a fn, get a ds\n #Note: this saves a lot of unnecessary iolib.fn_getds calls\n if isinstance(src_ds, str):\n src_ds = fn_getds(src_ds)\n #if isinstance(src_ds, gdal.Dataset):\n src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)\n src_gt = src_ds.GetGeoTransform()\n #This is WKT\n src_proj = src_ds.GetProjection()\n #src_srs = osr.SpatialReference() \n #src_srs.ImportFromWkt(src_ds.GetProjectionRef())\n\n #Probably a cleaner way to handle this\n if gt is None:\n gt = src_gt\n if proj is None:\n proj = src_proj\n\n #Need to create a new copy of the default options\n opt = list(gdal_opt)\n \n #Note: packbits is better for sparse data\n if sparse:\n opt.remove('COMPRESS=LZW')\n opt.append('COMPRESS=PACKBITS')\n #Not sure if VW can handle sparse tif\n #opt.append('SPARSE_OK=TRUE')\n\n #Use predictor=3 for floating point data\n if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt: \n opt.append('PREDICTOR=3')\n\n #If input ma is same as src_ds, write out array using CreateCopy from existing dataset\n #if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())): \n #Should compare srs.IsSame(src_srs)\n if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):\n #Note: third option is strict flag, set to false\n dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)\n #Otherwise, use Create\n else:\n a_dtype = a.dtype\n gdal_dtype = np2gdal_dtype(a_dtype)\n if a_dtype.name == 'bool':\n #Set ndv to 0\n a.fill_value = False\n opt.remove('COMPRESS=LZW')\n opt.append('COMPRESS=DEFLATE')\n #opt.append('NBITS=1')\n #Create(fn, nx, ny, nbands, dtype, opt)\n dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)\n #Note: Need GeoMA here to make this work, or accept gt as argument\n #Could also do ds creation in calling script\n if gt is not None:\n dst_ds.SetGeoTransform(gt)\n if proj is not None:\n dst_ds.SetProjection(proj)\n \n dst_ds.GetRasterBand(bnum).WriteArray(a.filled())\n dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))\n dst_ds = None\n\ndef writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):\n \"\"\"\n Write out a vrt to accompany a csv of points\n \"\"\"\n out_vrt = os.path.splitext(out_csv)[0]+'.vrt'\n out_csv = os.path.split(out_csv)[-1]\n f = open(out_vrt, 'w')\n f.write('<OGRVRTDataSource>\\n')\n f.write(' <OGRVRTLayer name=\"%s\">\\n' % os.path.splitext(out_csv)[0])\n f.write(' <SrcDataSource>%s</SrcDataSource>\\n' % out_csv)\n f.write(' <GeometryType>wkbPoint</GeometryType>\\n')\n f.write(' <LayerSRS>%s</LayerSRS>\\n' % srs)\n f.write(' <GeometryField encoding=\"PointFromColumns\" x=\"%s\" y=\"%s\"/>\\n' % (x, y))\n f.write(' </OGRVRTLayer>\\n')\n f.write('</OGRVRTDataSource>\\n')\n f.close()\n\n#Move to geolib?\n#Look up equivalent GDAL data type\ndef np2gdal_dtype(d):\n \"\"\"\n Get GDAL RasterBand datatype that corresponds with NumPy datatype\n Input should be numpy array or numpy dtype\n \"\"\"\n dt_dict = gdal_array.codes \n if isinstance(d, (np.ndarray, np.generic)):\n d = d.dtype\n #This creates dtype from another built-in type\n #d = np.dtype(d)\n if isinstance(d, np.dtype):\n if d.name == 'int8':\n gdal_dt = 1\n elif d.name == 'bool':\n #Write out as Byte\n gdal_dt = 1 \n else:\n gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]\n else:\n print(\"Input must be NumPy array or NumPy dtype\")\n gdal_dt = None\n return gdal_dt\n\ndef gdal2np_dtype(b):\n \"\"\"\n Get NumPy datatype that corresponds with GDAL RasterBand datatype\n Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype\n \"\"\"\n dt_dict = gdal_array.codes\n if isinstance(b, str):\n b = gdal.Open(b)\n if isinstance(b, gdal.Dataset):\n b = b.GetRasterBand(1)\n if isinstance(b, gdal.Band):\n b = b.DataType\n if isinstance(b, int):\n np_dtype = dt_dict[b]\n else:\n np_dtype = None\n print(\"Input must be GDAL Dataset or RasterBand object\")\n return np_dtype\n\n#Replace nodata value in GDAL band\ndef replace_ndv(b, new_ndv):\n b_ndv = get_ndv_b(b) \n bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)\n bma.set_fill_value(new_ndv)\n b.WriteArray(bma.filled())\n b.SetNoDataValue(new_ndv)\n return b\n\ndef set_ndv(dst_fn, ndv):\n dst_ds = gdal.Open(dst_fn, gdal.GA_Update)\n for n in range(1, dst_ds.RasterCount+1):\n b = dst_ds.GetRasterBand(1)\n b.SetNoDataValue(ndv)\n dst_ds = None\n\n#Should overload these functions to handle fn, ds, or b\n#Perhaps abstract, as many functions will need this functionality\ndef get_ndv_fn(fn):\n ds = gdal.Open(fn, gdal.GA_ReadOnly)\n return get_ndv_ds(ds)\n\n#Want to modify to handle multi-band images and return list of ndv\ndef get_ndv_ds(ds, bnum=1):\n b = ds.GetRasterBand(bnum)\n return get_ndv_b(b)\n\n#Return nodata value for GDAL band\ndef get_ndv_b(b):\n \"\"\"Get NoData value for GDAL band.\n\n If NoDataValue is not set in the band, \n extract upper left and lower right pixel values.\n Otherwise assume NoDataValue is 0.\n \n Parameters\n ----------\n b : GDALRasterBand object \n This is the input band.\n \n Returns\n -------\n b_ndv : float \n NoData value \n \"\"\"\n\n b_ndv = b.GetNoDataValue()\n if b_ndv is None:\n #Check ul pixel for ndv\n ns = b.XSize\n nl = b.YSize\n ul = float(b.ReadAsArray(0, 0, 1, 1))\n #ur = float(b.ReadAsArray(ns-1, 0, 1, 1))\n lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))\n #ll = float(b.ReadAsArray(0, nl-1, 1, 1))\n #Probably better to use 3/4 corner criterion\n #if ul == ur == lr == ll:\n if np.isnan(ul) or ul == lr:\n b_ndv = ul\n else:\n #Assume ndv is 0\n b_ndv = 0\n elif np.isnan(b_ndv):\n b_dt = gdal.GetDataTypeName(b.DataType)\n if 'Float' in b_dt:\n b_ndv = np.nan\n else:\n b_ndv = 0\n return b_ndv\n\n#Write out a recarray as a csv\ndef write_recarray(outfn, ra):\n with open(outfn,'w') as f:\n f.write(','.join([str(item) for item in ra.dtype.names])+'\\n')\n for row in ra:\n f.write(','.join([str(item) for item in row])+'\\n')\n \n#Check to make sure image doesn't contain errors\ndef image_check(fn):\n ds = gdal.Open(fn)\n status = True \n for i in range(ds.RasterCount):\n ds.GetRasterBand(i+1).Checksum()\n if gdal.GetLastErrorType() != 0:\n status = False \n return status\n\n#Return number of CPUs\n#Logical is \"virtual\" cpu count with hyperthreading\n#Set to False for physical cpu count\ndef cpu_count(logical=True):\n \"\"\"Return system CPU count\n \"\"\"\n if logical:\n from multiprocessing import cpu_count\n ncpu=cpu_count()\n else:\n import psutil\n ncpu=psutil.cpu_count(logical=False)\n return ncpu\n\ndef setstripe(dir, threads=cpu_count()):\n #import socket\n #if 'nasa' in socket.getfqdn():\n #Better to use 'df -T' to determine filesystem of directory\n #Can do this with psutil Python lib, but need to also find mount point of file\n if dir is not None:\n if 'lustre' in str(subprocess.check_output(['df','-T'])):\n if os.path.exists(dir): \n if threads is None:\n threads = cpu_count()\n cmd = ['lfs', 'setstripe', dir, '-c', str(threads)]\n print(' '.join(cmd))\n subprocess.call(cmd)\n\n#This is a shared directory for files like LULC, used by multiple tools \n#Default location is $HOME/data\n#Can specify in ~/.bashrc or ~/.profile\n#export DATADIR=$HOME/data\ndef get_datadir():\n default_datadir = os.path.join(os.path.expanduser('~'), 'data')\n datadir = os.environ.get('DATADIR', default_datadir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n return datadir\n\n#Function to get files using urllib\n#This works with ftp\ndef getfile(url, outdir=None):\n \"\"\"Function to fetch files using urllib\n\n Works with ftp\n\n \"\"\"\n fn = os.path.split(url)[-1]\n if outdir is not None:\n fn = os.path.join(outdir, fn)\n if not os.path.exists(fn):\n #Find appropriate urlretrieve for Python 2 and 3\n try:\n from urllib.request import urlretrieve\n except ImportError:\n from urllib import urlretrieve \n print(\"Retrieving: %s\" % url)\n #Add progress bar\n urlretrieve(url, fn)\n return fn\n\n#Function to get files using requests\n#Works with https authentication\ndef getfile2(url, auth=None, outdir=None):\n \"\"\"Function to fetch files using requests\n\n Works with https authentication\n\n \"\"\"\n import requests\n print(\"Retrieving: %s\" % url)\n fn = os.path.split(url)[-1]\n if outdir is not None:\n fn = os.path.join(outdir, fn)\n if auth is not None:\n r = requests.get(url, stream=True, auth=auth)\n else:\n r = requests.get(url, stream=True)\n chunk_size = 1000000\n with open(fn, 'wb') as fd:\n for chunk in r.iter_content(chunk_size):\n fd.write(chunk)\n\n#Get necessary credentials to access MODSCAG products - hopefully this will soon be archived with NSIDC \ndef get_auth():\n \"\"\"Get authorization token for https\n \"\"\"\n import getpass\n from requests.auth import HTTPDigestAuth\n #This binds raw_input to input for Python 2\n input_func = input\n try:\n input_func = raw_input\n except NameError:\n pass\n uname = input_func(\"MODSCAG Username:\")\n pw = getpass.getpass(\"MODSCAG Password:\")\n auth = HTTPDigestAuth(uname, pw)\n #wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw\n return auth\n\ndef readcsv(fn):\n \"\"\"\n Wrapper to read arbitrary csv, check for header\n\n Needs some work to be more robust, quickly added for demcoreg sampling\n \"\"\"\n import csv\n #Check first line for header\n with open(fn, 'r') as f:\n reader = csv.DictReader(f)\n hdr = reader.fieldnames\n\n #Assume there is a header on first line, check \n skiprows = 1\n if np.all(f.isdigit() for f in hdr):\n hdr = None\n skiprows = 0\n\n #Check header for lat/lon/z or x/y/z tags\n\n #Should probably do genfromtxt here if header exists and dtype of cols is variable\n pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)\n return pts\n"
] | [
[
"numpy.ma.masked_values",
"numpy.loadtxt",
"numpy.isnan"
]
] |
chenxiaoyu523/RPNet-Pytorch | [
"7beceb9f39e66eba5283536b478f86523fcc96c7"
] | [
"data/utils.py"
] | [
"import os\nfrom PIL import Image\nimport numpy as np\n\n\ndef get_files(folder, name_filter=None, extension_filter=None):\n \"\"\"Helper function that returns the list of files in a specified folder\n with a specified extension.\n\n Keyword arguments:\n - folder (``string``): The path to a folder.\n - name_filter (```string``, optional): The returned files must contain\n this substring in their filename. Default: None; files are not filtered.\n - extension_filter (``string``, optional): The desired file extension.\n Default: None; files are not filtered\n\n \"\"\"\n if not os.path.isdir(folder):\n raise RuntimeError(\"\\\"{0}\\\" is not a folder.\".format(folder))\n\n # Filename filter: if not specified don't filter (condition always true);\n # otherwise, use a lambda expression to filter out files that do not\n # contain \"name_filter\"\n if name_filter is None:\n # This looks hackish...there is probably a better way\n name_cond = lambda filename: True\n else:\n name_cond = lambda filename: name_filter in filename\n\n # Extension filter: if not specified don't filter (condition always true);\n # otherwise, use a lambda expression to filter out files whose extension\n # is not \"extension_filter\"\n if extension_filter is None:\n # This looks hackish...there is probably a better way\n ext_cond = lambda filename: True\n else:\n ext_cond = lambda filename: filename.endswith(extension_filter)\n\n filtered_files = []\n\n # Explore the directory tree to get files that contain \"name_filter\" and\n # with extension \"extension_filter\"\n for path, _, files in os.walk(folder):\n files.sort()\n for file in files:\n if name_cond(file) and ext_cond(file):\n full_path = os.path.join(path, file)\n filtered_files.append(full_path)\n\n return filtered_files\n\n\ndef pil_loader(data_path, label_path):\n \"\"\"Loads a sample and label image given their path as PIL images.\n\n Keyword arguments:\n - data_path (``string``): The filepath to the image.\n - label_path (``string``): The filepath to the ground-truth image.\n\n Returns the image and the label as PIL images.\n\n \"\"\"\n data = Image.open(data_path)\n label = Image.open(label_path)\n\n return data, label\n\n\ndef remap(image, old_values, new_values):\n assert isinstance(image, Image.Image) or isinstance(\n image, np.ndarray), \"image must be of type PIL.Image or numpy.ndarray\"\n assert type(new_values) is tuple, \"new_values must be of type tuple\"\n assert type(old_values) is tuple, \"old_values must be of type tuple\"\n assert len(new_values) == len(\n old_values), \"new_values and old_values must have the same length\"\n\n # If image is a PIL.Image convert it to a numpy array\n if isinstance(image, Image.Image):\n image = np.array(image)\n\n # Replace old values by the new ones\n tmp = np.zeros_like(image)\n for old, new in zip(old_values, new_values):\n # Since tmp is already initialized as zeros we can skip new values\n # equal to 0\n if new != 0:\n tmp[image == old] = new\n\n return Image.fromarray(tmp)\n\n\ndef enet_weighing(dataloader, num_classes, c=1.02):\n \"\"\"Computes class weights as described in the ENet paper:\n\n w_class = 1 / (ln(c + p_class)),\n\n where c is usually 1.02 and p_class is the propensity score of that\n class:\n\n propensity_score = freq_class / total_pixels.\n\n References: https://arxiv.org/abs/1606.02147\n\n Keyword arguments:\n - dataloader (``data.Dataloader``): A data loader to iterate over the\n dataset.\n - num_classes (``int``): The number of classes.\n - c (``int``, optional): AN additional hyper-parameter which restricts\n the interval of values for the weights. Default: 1.02.\n\n \"\"\"\n class_count = 0\n total = 0\n for _, label in dataloader:\n label = label.cpu().numpy()\n\n # Flatten label\n flat_label = label.flatten()\n\n # Sum up the number of pixels of each class and the total pixel\n # counts for each label\n class_count += np.bincount(flat_label, minlength=num_classes)\n total += flat_label.size\n\n # Compute propensity score and then the weights for each class\n propensity_score = class_count / total\n class_weights = 1 / (np.log(c + propensity_score))\n\n return class_weights\n\n\ndef median_freq_balancing(dataloader, num_classes):\n \"\"\"Computes class weights using median frequency balancing as described\n in https://arxiv.org/abs/1411.4734:\n\n w_class = median_freq / freq_class,\n\n where freq_class is the number of pixels of a given class divided by\n the total number of pixels in images where that class is present, and\n median_freq is the median of freq_class.\n\n Keyword arguments:\n - dataloader (``data.Dataloader``): A data loader to iterate over the\n dataset.\n whose weights are going to be computed.\n - num_classes (``int``): The number of classes\n\n \"\"\"\n class_count = 0\n total = 0\n for _, label in dataloader:\n label = label.cpu().numpy()\n\n # Flatten label\n flat_label = label.flatten()\n\n # Sum up the class frequencies\n bincount = np.bincount(flat_label, minlength=num_classes)\n\n # Create of mask of classes that exist in the label\n mask = bincount > 0\n # Multiply the mask by the pixel count. The resulting array has\n # one element for each class. The value is either 0 (if the class\n # does not exist in the label) or equal to the pixel count (if\n # the class exists in the label)\n total += mask * flat_label.size\n\n # Sum up the number of pixels found for each class\n class_count += bincount\n\n # Compute the frequency and its median\n freq = class_count / total\n med = np.median(freq)\n\n return med / freq\n"
] | [
[
"numpy.zeros_like",
"numpy.bincount",
"numpy.median",
"numpy.log",
"numpy.array"
]
] |
rpuntaie/tensorflow_examples | [
"1958f7f0de9d96859dc3961a1695e1543fec9fd3",
"1958f7f0de9d96859dc3961a1695e1543fec9fd3"
] | [
"mask.py",
"course_v2/_09nlp.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nChain models.\n\nMasking.\n\nShow output of layer.\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.layers import Masking, Dense\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.models import Sequential, Model\n\nX_train = np.random.rand(4,3,2)\nDense_unit = 1\ndense_reg = 0.01\nmdl = Sequential()\nmdl.add(Input(shape=(X_train.shape[1],X_train.shape[2]),name='input_feature'))\nmdl.add(Masking(mask_value=0,name='masking'))\nmdl.add(Dense(Dense_unit,kernel_regularizer=l2(dense_reg),activation='relu',name='output_feature'))\nmdl.summary()\n#this is the same as chaining models\nmdl2mask = Model(inputs=mdl.input,outputs=mdl.get_layer(\"masking\").output)\nmdl2mask.compile()\nmdl.compile()\nmaskoutput = mdl2mask.predict(X_train)\nmdloutput = mdl.predict(X_train)\nprint(maskoutput) # print output after/of masking\nprint(mdloutput) # print output of mdl\nprint(maskoutput.shape) #(4, 3, 2): masking has the shape of the layer before (input here)\nprint(mdloutput.shape) #(4, 3, 1): shape of the output of dense\n\n",
"#!/usr/bin/env python3\n\n# Tokenizing text and creating sequences for sentences\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c01_nlp_turn_words_into_tokens.ipynb\n\n# This colab shows you how to tokenize text and create sequences for sentences as\n# the first stage of preparing text for use with TensorFlow models.\n\n## Import the Tokenizer\n\n# Import the Tokenizer\nimport io\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n\nimport tensorflow_datasets as tfds\n\nfrom savefit import *\n\n## Write some sentences\n# Feel free to change and add sentences as you like\n\nsentences = [\n 'My favorite food is ice cream',\n 'do you like ice cream too?',\n 'My dog likes ice cream!',\n \"your favorite flavor of icecream is chocolate\",\n \"chocolate isn't good for dogs\",\n \"your dog, your cat, and your parrot prefer broccoli\"\n]\n\n## Tokenize the words\n# The first step to preparing text to be used in a machine learning model is to\n# tokenize the text, in other words, to generate numbers for the words.\n\n# Optionally set the max number of words to tokenize.\n# The out of vocabulary (OOV) token represents words that are not in the index.\n# Call fit_on_text() on the tokenizer to generate unique numbers for each word\ntokenizer = Tokenizer(num_words = 100, oov_token=\"<OOV>\")\ntokenizer.fit_on_texts(sentences)\n\n\n## View the word index\n# After you tokenize the text, the tokenizer has a word index that contains\n# key-value pairs for all the words and their numbers.\n# The word is the key, and the number is the value.\n# Notice that the OOV token is the first entry.\n\n# Examine the word index\nword_index = tokenizer.word_index\nprint(word_index)\n\n# Get the number for a given word\nprint(word_index['favorite'])\n\n# Create sequences for the sentences\n\n# After you tokenize the words, the word index contains a unique number for each\n# word. However, the numbers in the word index are not ordered. Words in a\n# sentence have an order. So after tokenizing the words, the next step is to\n# generate sequences for the sentences.\n\nsequences = tokenizer.texts_to_sequences(sentences)\nprint (sequences)\n\n# Sequence sentences that contain words that are not in the word index\n\n# Let's take a look at what happens if the sentence being sequenced contains\n# words that are not in the word index.\n# The Out of Vocabluary (OOV) token is the first entry in the word index. You\n# will see it shows up in the sequences in place of any word that is not in the\n# word index.\n\nsentences2 = [\"I like hot chocolate\", \"My dogs and my hedgehog like kibble but my squirrel prefers grapes and my chickens like ice cream, preferably vanilla\"]\n\nsequences2 = tokenizer.texts_to_sequences(sentences2)\nprint(sequences2)\n\n\n# Preparing text to use with TensorFlow models\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c02_nlp_padding.ipynb\n\n# The high level steps to prepare text to be used in a machine learning model are:\n\n# 1. Tokenize the words to get numerical values for them\n# 2. Create numerical sequences of the sentences\n# 3. Adjust the sequences to all be the same length.\n\n## Make the sequences all the same length\n\n# Later, when you feed the sequences into a neural network to train a model, the\n# sequences all need to be uniform in size. Currently the sequences have varied\n# lengths, so the next step is to make them all be the same size, either by\n# padding them with zeros and/or truncating them.\n# \n# Use f.keras.preprocessing.sequence.pad_sequences to add zeros to the sequences\n# to make them all be the same length. By default, the padding goes at the start\n# of the sequences, but you can specify to pad at the end.\n# \n# You can optionally specify the maximum length to pad the sequences to.\n# Sequences that are longer than the specified max length will be truncated. By\n# default, sequences are truncated from the beginning of the sequence, but you\n# can specify to truncate from the end.\n# \n# If you don't provide the max length, then the sequences are padded to match the\n# length of the longest sentence.\n# \n# For all the options when padding and truncating sequences, see\n# https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences\n\n\npadded = pad_sequences(sequences)\nprint(\"\\nWord Index = \" , word_index)\nprint(\"\\nSequences = \" , sequences)\nprint(\"\\nPadded Sequences:\")\nprint(padded)\n\n\n# Specify a max length for the padded sequences\npadded = pad_sequences(sequences, maxlen=15)\nprint(padded)\n\n# Put the padding at the end of the sequences\npadded = pad_sequences(sequences, maxlen=15, padding=\"post\")\nprint(padded)\n\n# Limit the length of the sequences, you will see some sequences get truncated\npadded = pad_sequences(sequences, maxlen=3)\nprint(padded)\n\n## What happens if some of the sentences contain words that are not in the word index?\n\n# Here's where the \"out of vocabulary\" token is used. Try generating sequences\n# for some sentences that have words that are not in the word index.\n\n# Try turning sentences that contain words that \n# aren't in the word index into sequences.\n# Add your own sentences to the test_data\ntest_data = [\n \"my best friend's favorite ice cream flavor is strawberry\",\n \"my dog's best friend is a manatee\"\n]\nprint (test_data)\n\n# Remind ourselves which number corresponds to the\n# out of vocabulary token in the word index\nprint(\"<OOV> has the number\", word_index['<OOV>'], \"in the word index.\")\n\n# Convert the test sentences to sequences\ntest_seq = tokenizer.texts_to_sequences(test_data)\nprint(\"\\nTest Sequence = \", test_seq)\n\n# Pad the new sequences\npadded = pad_sequences(test_seq, maxlen=10)\nprint(\"\\nPadded Test Sequence: \")\n\n# Notice that \"1\" appears in the sequence wherever there's a word \n# that's not in the word index\nprint(padded)\n\n\n# Tokenize and sequence a bigger corpus of text\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c03_nlp_prepare_larger_text_corpus.ipynb\n\n# So far, you have written some test sentences and generated a word index and\n# then created sequences for the sentences. \n\n# Now you will tokenize and sequence a larger body of text, specifically reviews\n# from Amazon and Yelp. \n\n## About the dataset\n\n# You will use a dataset containing Amazon and Yelp reviews of products and\n# restaurants. This dataset was originally extracted from\n# [Kaggle](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).\n\n# The dataset includes reviews, and each review is labelled as 0 (bad) or 1\n# (good). However, in this exercise, you will only work with the reviews, not the\n# labels, to practice tokenizing and sequencing the text. \n\n### Example good reviews:\n\n# * This is hands down the best phone I've ever had.\n# * Four stars for the food & the guy in the blue shirt for his great vibe & still letting us in to eat !\n\n### Example bad reviews: \n\n# * A lady at the table next to us found a live green caterpillar In her salad\n# * If you plan to use this in a car forget about it.\n\n### See more reviews\n# Feel free to [download the\n # dataset](https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P)\n# from a drive folder belonging to Udacity and open it on your local machine to\n# see more reviews.\n\n# Get the corpus of text\n\n# The combined dataset of reviews has been saved in a Google drive belonging to\n# Udacity. You can download it from there.\n\npath = tf.keras.utils.get_file('reviews.csv', 'https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P')\nprint (path)\n\n\n# Each row in the csv file is a separate review.\n# The csv file has 2 columns:\n# \n# * **text** (the review)\n# * **sentiment** (0 or 1 indicating a bad or good review)\n\n# Read the csv file\ndataset = pd.read_csv(path)\n\n# Review the first few entries in the dataset\ndataset.head()\n\n# Get the reviews from the csv file\n\n# Get the reviews from the text column\nreviews = dataset['text'].tolist()\n\n# Tokenize the text\n# Create the tokenizer, specify the OOV token, tokenize the text, then inspect the word index.\n\ntokenizer = Tokenizer(oov_token=\"<OOV>\")\ntokenizer.fit_on_texts(reviews)\n\nword_index = tokenizer.word_index\nprint(len(word_index))\nprint(word_index)\n\n\n# Generate sequences for the reviews\n# Generate a sequence for each review. Set the max length to match the longest\n# review. Add the padding zeros at the end of the review for reviews that are not\n# as long as the longest one.\n\nsequences = tokenizer.texts_to_sequences(reviews)\npadded_sequences = pad_sequences(sequences, padding='post')\n\n# What is the shape of the vector containing the padded sequences?\n# The shape shows the number of sequences and the length of each one.\nprint(padded_sequences.shape)\n\n# What is the first review?\nprint (reviews[0])\n\n# Show the sequence for the first review\nprint(padded_sequences[0])\n\n# Try printing the review and padded sequence for other elements.\n\n\n# Word Embeddings and Sentiment\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c04_nlp_embeddings_and_sentiment.ipynb\n\n# In this colab, you'll work with word embeddings and train a basic neural\n# network to predict text sentiment. At the end, you'll be able to visualize how\n# the network sees the related sentiment of each word in the dataset.\n\n## Get the dataset\n\n# We're going to use a dataset containing Amazon and Yelp reviews, with their\n# related sentiment (1 for positive, 0 for negative). This dataset was originally\n# extracted from\n# [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).\n\n# !wget --no-check-certificate -O sentiment.csv https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P\n\ndataset = pd.read_csv('sentiment.csv')\n\nsentences = dataset['text'].tolist()\nlabels = dataset['sentiment'].tolist()\n\n# Separate out the sentences and labels into training and test sets\ntraining_size = int(len(sentences) * 0.8)\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\n# Make labels into numpy arrays for use with the network later\ntraining_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)\n\n## Tokenize the dataset\n\n# Tokenize the dataset, including padding and OOV\n\nvocab_size = 1000\nembedding_dim = 16\nmax_length = 100\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\n\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\nsequences = tokenizer.texts_to_sequences(training_sentences)\npadded = pad_sequences(sequences,maxlen=max_length, padding=padding_type, \n truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences,maxlen=max_length, \n padding=padding_type, truncating=trunc_type)\n\n## Review a Sequence\n\n# Let's quickly take a look at one of the padded sequences to ensure everything\n# above worked appropriately.\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\nprint(decode_review(padded[1]))\nprint(training_sentences[1])\n\n## Train a Basic Sentiment Model with Embeddings\n\n# Build a basic sentiment network\n# Note the embedding layer is first, \n# and the output is only 1 node as it is either 0 or 1 (negative or positive)\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\nnum_epochs = 10\nmodel,history = savefit(model, padded, training_labels_final, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels_final), verbose=0)\n\n## Get files for visualizing the network\n\n# The code below will download two files for visualizing how your network \"sees\"\n# the sentiment related to each word. Head to http://projector.tensorflow.org/\n# and load these files, then click the \"Sphereize\" checkbox.\n\n# First get the weights of the embedding layer\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\n\n# Write out the embedding vectors and metadata\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\n# Download the files\ntry:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')\n\n## Predicting Sentiment in New Reviews\n\n# Now that you've trained and visualized your network, take a look below at how\n# we can predict sentiment in new reviews the network has never seen before.\n\n# Use the model to predict a review \nfake_reviews = ['I love this phone', 'I hate spaghetti', \n 'Everything was cold',\n 'Everything was hot exactly as I wanted', \n 'Everything was green', \n 'the host seated us immediately',\n 'they gave us free chocolate cake', \n 'not sure about the wilted flowers on the table',\n 'only works when I stand on tippy toes', \n 'does not work when I stand on my head']\n\nprint(fake_reviews) \n\n# Create the sequences\npadding_type='post'\nsample_sequences = tokenizer.texts_to_sequences(fake_reviews)\nfakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) \n\nprint('\\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\\n') \n\nclasses = model.predict(fakes_padded)\n\n# The closer the class is to 1, the more positive the review is deemed to be\nfor x in range(len(fake_reviews)):\n print(fake_reviews[x])\n print(classes[x])\n print('\\n')\n\n# Try adding reviews of your own\n# Add some negative words (such as \"not\") to the good reviews and see what happens\n# For example:\n# they gave us free chocolate cake and did not charge us\n\n\n# Tweaking the Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb\n\n# In this colab, you'll investigate how various tweaks to data processing and the\n# model itself can impact results. At the end, you'll once again be able to\n# visualize how the network sees the related sentiment of each word in the\n# dataset.\n\nsentences = dataset['text'].tolist()\nlabels = dataset['sentiment'].tolist()\n\n# Separate out the sentences and labels into training and test sets\ntraining_size = int(len(sentences) * 0.8)\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\n# Make labels into numpy arrays for use with the network later\ntraining_labels_final = np.array(training_labels)\ntesting_labels_final = np.array(testing_labels)\n\n## Tokenize the dataset (with tweaks!)\n\n# Now, we'll tokenize the dataset, but we can make some changes to this from\n# before. Previously, we used: \n\nvocab_size = 1000\nembedding_dim = 16\nmax_length = 100\ntrunc_type='post'\npadding_type='post'\n\n# How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how\n# the model performs?\n\nvocab_size = 500\nembedding_dim = 16\nmax_length = 50\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\n## Train a Sentiment Model (with tweaks!)\n\n# We'll use a slightly different model here, using `GlobalAveragePooling1D`\n# instead of `Flatten()`.\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(6, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\nnum_epochs = 30\nmodel,history = savefit(model, training_padded, training_labels_final, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels_final),verbose=0)\n\n## Visualize the training graph\n\n# You can use the code below to visualize the training and validation accuracy\n# while you try out different tweaks to the hyperparameters and model.\n\ndef plot_graphs(history, string):\n if not history:\n return\n plt.plot(history.history[string])\n plt.plot(history.history['val_'+string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend([string, 'val_'+string])\n plt.show()\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\n## Get files for visualizing the network\n\n# The code below will download two files for visualizing how your network \"sees\"\n# the sentiment related to each word. Head to http://projector.tensorflow.org/\n# and load these files, then click the checkbox to \"sphereize\" the data.\n\n# Note: You may run into errors with the projection if your `vocab_size` earlier\n# was larger than the actual number of words in the vocabulary, in which case\n# you'll need to decrease this variable and re-train in order to visualize.\n\n# First get the weights of the embedding layer\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\nimport io\n\n# Create the reverse word index\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n# Write out the embedding vectors and metadata\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\n# Download the files\ntry:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')\n\n## Predicting Sentiment in New Reviews\n\n# Using LSTMs, CNNs, GRUs with a larger dataset\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c02_nlp_multiple_models_for_predicting_sentiment.ipynb\n\n# In this colab, you use different kinds of layers to see how they affect the\n# model.\n# You will use the glue/sst2 dataset, which is available through tensorflow_datasets. \n# The General Language Understanding Evaluation (GLUE) benchmark\n# (https://gluebenchmark.com/) is a collection of resources for training,\n# evaluating, and analyzing natural language understanding systems.\n# These resources include the Stanford Sentiment Treebank (SST) dataset that\n# consists of sentences from movie reviews and human annotations of their\n# sentiment. This colab uses version 2 of the SST dataset.\n# The splits are:\n# \n# * train\t67,349\n# * validation\t872\n# \n# and the column headings are:\n# \n# * sentence\n# * label\n\n# For more information about the dataset, see\n# [https://www.tensorflow.org/datasets/catalog/glue#gluesst2](https://www.tensorflow.org/datasets/catalog/glue#gluesst2)\n\n# Get the dataset.\n# It has 70000 items, so might take a while to download\ndataset, info = tfds.load('glue/sst2', with_info=True)\nprint(info.features)\nprint(info.features[\"label\"].num_classes)\nprint(info.features[\"label\"].names)\n\n# Get the training and validation datasets\ndataset_train, dataset_validation = dataset['train'], dataset['validation']\ndataset_train\n\n# Print some of the entries\nfor example in dataset_train.take(2):\n review, label = example[\"sentence\"], example[\"label\"]\n print(\"Review:\", review)\n print(\"Label: %d \\n\" % label.numpy())\n\n# Get the sentences and the labels\n# for both the training and the validation sets\ntraining_reviews = []\ntraining_labels = []\n\nvalidation_reviews = []\nvalidation_labels = []\n\n# The dataset has 67,000 training entries, but that's a lot to process here!\n\n# If you want to take the entire dataset: WARNING: takes longer!!\n# for item in dataset_train.take(-1):\n\n# Take 10,000 reviews\nfor item in dataset_train.take(10000):\n review, label = item[\"sentence\"], item[\"label\"]\n training_reviews.append(str(review.numpy()))\n training_labels.append(label.numpy())\n\nprint (\"\\nNumber of training reviews is: \", len(training_reviews))\n\n# print some of the reviews and labels\nfor i in range(0, 2):\n print (training_reviews[i])\n print (training_labels[i])\n\n# Get the validation data\n# there's only about 800 items, so take them all\nfor item in dataset_validation.take(-1): \n review, label = item[\"sentence\"], item[\"label\"]\n validation_reviews.append(str(review.numpy()))\n validation_labels.append(label.numpy())\n\nprint (\"\\nNumber of validation reviews is: \", len(validation_reviews))\n\n# Print some of the validation reviews and labels\nfor i in range(0, 2):\n print (validation_reviews[i])\n print (validation_labels[i])\n\n\n# Tokenize the words and sequence the sentences\n\n\n# There's a total of 21224 words in the reviews\n# but many of them are irrelevant like with, it, of, on.\n# If we take a subset of the training data, then the vocab\n# will be smaller.\n\n# A reasonable review might have about 50 words or so,\n# so we can set max_length to 50 (but feel free to change it as you like)\n\nvocab_size = 4000\nembedding_dim = 16\nmax_length = 50\ntrunc_type='post'\npad_type='post'\noov_tok = \"<OOV>\"\n\ntokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_reviews)\nword_index = tokenizer.word_index\n\n\n# Pad the sequences\n\n# Pad the sequences so that they are all the same length\ntraining_sequences = tokenizer.texts_to_sequences(training_reviews)\ntraining_padded = pad_sequences(training_sequences,maxlen=max_length, \n truncating=trunc_type, padding=pad_type)\n\nvalidation_sequences = tokenizer.texts_to_sequences(validation_reviews)\nvalidation_padded = pad_sequences(validation_sequences,maxlen=max_length)\n\ntraining_labels_final = np.array(training_labels)\nvalidation_labels_final = np.array(validation_labels)\n\n# Create the model using an Embedding\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(), \n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\nmodel.summary()\n\n# Train the model\n\nnum_epochs = 20\nmodel,history = savefit(model, training_padded, training_labels_final, epochs=num_epochs, \n validation_data=(validation_padded, validation_labels_final),verbose=0)\n\n\n# Plot the accurracy and loss\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\n# Write a function to predict the sentiment of reviews\n\n# Write some new reviews \n\nreview1 = \"\"\"I loved this movie\"\"\"\n\nreview2 = \"\"\"that was the worst movie I've ever seen\"\"\"\n\nreview3 = \"\"\"too much violence even for a Bond film\"\"\"\n\nreview4 = \"\"\"a captivating recounting of a cherished myth\"\"\"\n\nnew_reviews = [review1, review2, review3, review4]\n\n\n# Define a function to prepare the new reviews for use with a model\n# and then use the model to predict the sentiment of the new reviews \n\ndef predict_review(model, reviews):\n # Create the sequences\n padding_type='post'\n sample_sequences = tokenizer.texts_to_sequences(reviews)\n reviews_padded = pad_sequences(sample_sequences, padding=padding_type, \n maxlen=max_length) \n classes = model.predict(reviews_padded)\n for x in range(len(reviews_padded)):\n print(reviews[x])\n print(classes[x])\n print('\\n')\n\npredict_review(model, new_reviews)\n\n\n\n# Define a function to train and show the results of models with different layers\n\ndef fit_model_and_show_results (model, reviews):\n model.summary()\n model, history = savefit(model, training_padded, training_labels_final, epochs=num_epochs, \n validation_data=(validation_padded, validation_labels_final),verbose=0)\n plot_graphs(history, \"accuracy\")\n plot_graphs(history, \"loss\")\n predict_review(model, reviews)\n\n# Use a CNN\n\nnum_epochs = 30\n\nmodel_cnn = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Conv1D(16, 5, activation='relu'),\n tf.keras.layers.GlobalMaxPooling1D(),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\n# Default learning rate for the Adam optimizer is 0.001\n# Let's slow down the learning rate by 10.\nlearning_rate = 0.0001\nmodel_cnn.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate), \n metrics=['accuracy'])\n\nfit_model_and_show_results(model_cnn, new_reviews)\n\n# Use a GRU\n\nnum_epochs = 30\n\nmodel_gru = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.00003 # slower than the default learning rate\nmodel_gru.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\n\nfit_model_and_show_results(model_gru, new_reviews)\n\n# Add a bidirectional LSTM\n\nnum_epochs = 30\n\nmodel_bidi_lstm = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)), \n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.00003\nmodel_bidi_lstm.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\nfit_model_and_show_results(model_bidi_lstm, new_reviews)\n\n# Use multiple bidirectional LSTMs\n\nnum_epochs = 30\n\nmodel_multiple_bidi_lstm = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim, \n return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nlearning_rate = 0.0003\nmodel_multiple_bidi_lstm.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n metrics=['accuracy'])\nfit_model_and_show_results(model_multiple_bidi_lstm, new_reviews)\n\n# Try some more reviews\n\n# Write some new reviews \n\nreview1 = \"\"\"I loved this movie\"\"\"\n\nreview2 = \"\"\"that was the worst movie I've ever seen\"\"\"\n\nreview3 = \"\"\"too much violence even for a Bond film\"\"\"\n\nreview4 = \"\"\"a captivating recounting of a cherished myth\"\"\"\n\nreview5 = \"\"\"I saw this movie yesterday and I was feeling low to start with,\n but it was such a wonderful movie that it lifted my spirits and brightened \n my day, you can\\'t go wrong with a movie with Whoopi Goldberg in it.\"\"\"\n\nreview6 = \"\"\"I don\\'t understand why it received an oscar recommendation\n for best movie, it was long and boring\"\"\"\n\nreview7 = \"\"\"the scenery was magnificent, the CGI of the dogs was so realistic I\n thought they were played by real dogs even though they talked!\"\"\"\n\nreview8 = \"\"\"The ending was so sad and yet so uplifting at the same time. \n I'm looking for an excuse to see it again\"\"\"\n\nreview9 = \"\"\"I had expected so much more from a movie made by the director \n who made my most favorite movie ever, I was very disappointed in the tedious \n story\"\"\"\n\nreview10 = \"I wish I could watch this movie every day for the rest of my life\"\n\nmore_reviews = [review1, review2, review3, review4, review5, review6, review7, \n review8, review9, review10]\n\n\nprint(\"============================\\n\",\"Embeddings only:\\n\", \"============================\")\npredict_review(model, more_reviews)\n\nprint(\"============================\\n\",\"With CNN\\n\", \"============================\")\npredict_review(model_cnn, more_reviews)\n\nprint(\"===========================\\n\",\"With bidirectional GRU\\n\", \"============================\")\npredict_review(model_gru, more_reviews)\n\nprint(\"===========================\\n\", \"With a single bidirectional LSTM:\\n\", \"===========================\")\npredict_review(model_bidi_lstm, more_reviews)\n\nprint(\"===========================\\n\", \"With multiple bidirectional LSTM:\\n\", \"==========================\")\npredict_review(model_multiple_bidi_lstm, more_reviews)\n\n\n# Constructing a Text Generation Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c03_nlp_constructing_text_generation_model.ipynb\n\n# Using most of the techniques you've already learned, it's now possible to\n# generate new text by predicting the next word that follows a given seed word.\n# To practice this method, we'll use the [Kaggle Song Lyrics\n# Dataset](https://www.kaggle.com/mousehead/songlyrics).\n\n## Import TensorFlow and related functions\n\n## Get the Dataset\n\n# As noted above, we'll utilize the [Song Lyrics\n# dataset](https://www.kaggle.com/mousehead/songlyrics) on Kaggle.\n\n# !wget --no-check-certificate https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8 -O songdata.csv\n\n## **First 10 Songs**\n\n# Let's first look at just 10 songs from the dataset, and see how things perform.\n\n### Preprocessing\n\n# Let's perform some basic preprocessing to get rid of punctuation and make\n# everything lowercase. We'll then split the lyrics up by line and tokenize the\n# lyrics.\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\nimport string\n\ndef create_lyrics_corpus(dataset, field):\n # Remove all other punctuation\n dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')\n # Make it lowercase\n dataset[field] = dataset[field].str.lower()\n # Make it one long string to split by line\n lyrics = dataset[field].str.cat()\n corpus = lyrics.split('\\n')\n # Remove any trailing whitespace\n for l in range(len(corpus)):\n corpus[l] = corpus[l].rstrip()\n # Remove any empty lines\n corpus = [l for l in corpus if l != '']\n\n return corpus\n\n# Read the dataset from csv - just first 10 songs for now\ndataset = pd.read_csv('songdata.csv', dtype=str)[:10]\n# Create the corpus using the 'text' column containing lyrics\ncorpus = create_lyrics_corpus(dataset, 'text')\n# Tokenize the corpus\ntokenizer = tokenize_corpus(corpus)\n\ntotal_words = len(tokenizer.word_index) + 1\n\nprint(tokenizer.word_index)\nprint(total_words)\n\n### Create Sequences and Labels\n\n# After preprocessing, we next need to create sequences and labels. Creating the\n# sequences themselves is similar to before with `texts_to_sequences`, but also\n# including the use of\n# [N-Grams](https://towardsdatascience.com/introduction-to-language-models-n-gram-e323081503d9);\n# creating the labels will now utilize those sequences as well as utilize one-hot\n# encoding over all potential output words.\n\nsequences = []\nfor line in corpus:\n\ttoken_list = tokenizer.texts_to_sequences([line])[0]\n\tfor i in range(1, len(token_list)):\n\t\tn_gram_sequence = token_list[:i+1]\n\t\tsequences.append(n_gram_sequence)\n\n# Pad sequences for equal input length \nmax_sequence_len = max([len(seq) for seq in sequences])\nsequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))\n\n# Split sequences between the \"input\" sequence and \"output\" predicted word\ninput_sequences, labels = sequences[:,:-1], sequences[:,-1]\n# One-hot encode the labels\none_hot_labels = tf.keras.utils.to_categorical(labels, num_classes=total_words)\n\n# Check out how some of our data is being stored\n# The Tokenizer has just a single index per word\nprint(tokenizer.word_index['know'])\nprint(tokenizer.word_index['feeling'])\n# Input sequences will have multiple indexes\nprint(input_sequences[5])\nprint(input_sequences[6])\n# And the one hot labels will be as long as the full spread of tokenized words\nprint(one_hot_labels[5])\nprint(one_hot_labels[6])\n\n### Train a Text Generation Model\n\n# Building an RNN to train our text generation model will be very similar to the\n# sentiment models you've built previously. The only real change necessary is to\n# make sure to use Categorical instead of Binary Cross Entropy as the loss\n# function - we could use Binary before since the sentiment was only 0 or 1, but\n# now there are hundreds of categories.\n\n# From there, we should also consider using *more* epochs than before, as text\n# generation can take a little longer to converge than sentiment analysis, *and*\n# we aren't working with all that much data yet. I'll set it at 200 epochs here\n# since we're only use part of the dataset, and training will tail off quite a\n# bit over that many epochs.\n\n\nmodel = Sequential()\nmodel.add(Embedding(total_words, 64, input_length=max_sequence_len-1))\nmodel.add(Bidirectional(LSTM(20)))\nmodel.add(Dense(total_words, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel,history = savefit(model, input_sequences, one_hot_labels, epochs=200, verbose=0)\n\n### View the Training Graph\n\nimport matplotlib.pyplot as plt\n\ndef plot_graphs(history, string):\n if not history:\n return\n plt.plot(history.history[string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.show()\n\nplot_graphs(history, 'accuracy')\n\n### Generate new lyrics!\n\n# It's finally time to generate some new lyrics from the trained model, and see\n# what we get. To do so, we'll provide some \"seed text\", or an input sequence for\n# the model to start with. We'll also decide just how long of an output sequence\n# we want - this could essentially be infinite, as the input plus the previous\n# output will be continuously fed in for a new output word (at least up to our\n # max sequence length).\n\nseed_text = \"im feeling chills\"\nnext_words = 100\n\nfor _ in range(next_words):\n\ttoken_list = tokenizer.texts_to_sequences([seed_text])[0]\n\ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n\tpredicted = np.argmax(model.predict(token_list), axis=-1)\n\toutput_word = \"\"\n\tfor word, index in tokenizer.word_index.items():\n\t\tif index == predicted:\n\t\t\toutput_word = word\n\t\t\tbreak\n\tseed_text += \" \" + output_word\nprint(seed_text)\n\n\n# Optimizing the Text Generation Model\n# courses/udacity_intro_to_tensorflow_for_deep_learning/l10c04_nlp_optimizing_the_text_generation_model.ipynb\n\n## 250 Songs\n\n# Now we've seen a model trained on just a small sample of songs, and how this\n# often leads to repetition as you get further along in trying to generate new\n# text. Let's switch to using the 250 songs instead, and see if our output\n# improves. This will actually be nearly 10K lines of lyrics, which should be\n# sufficient.\n\n# Note that we won't use the full dataset here as it will take up quite a bit of\n# RAM and processing time, but you're welcome to try doing so on your own later.\n# If interested, you'll likely want to use only some of the more common words for\n# the Tokenizer, which will help shrink processing time and memory needed \n# (or else you'd have an output array hundreds of thousands of words long).\n\n### Preprocessing\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\ndef create_lyrics_corpus(dataset, field):\n # Remove all other punctuation\n dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')\n # Make it lowercase\n dataset[field] = dataset[field].str.lower()\n # Make it one long string to split by line\n lyrics = dataset[field].str.cat()\n corpus = lyrics.split('\\n')\n # Remove any trailing whitespace\n for l in range(len(corpus)):\n corpus[l] = corpus[l].rstrip()\n # Remove any empty lines\n corpus = [l for l in corpus if l != '']\n\n return corpus\n\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\n# Read the dataset from csv - this time with 250 songs\ndataset = pd.read_csv('songdata.csv', dtype=str)[:250]\n# Create the corpus using the 'text' column containing lyrics\ncorpus = create_lyrics_corpus(dataset, 'text')\n# Tokenize the corpus\ntokenizer = tokenize_corpus(corpus, num_words=2000)\ntotal_words = tokenizer.num_words\n\n# There should be a lot more words now\nprint(total_words)\n\n### Create Sequences and Labels\n\nsequences = []\nfor line in corpus:\n\ttoken_list = tokenizer.texts_to_sequences([line])[0]\n\tfor i in range(1, len(token_list)):\n\t\tn_gram_sequence = token_list[:i+1]\n\t\tsequences.append(n_gram_sequence)\n\n# Pad sequences for equal input length \nmax_sequence_len = max([len(seq) for seq in sequences])\nsequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))\n\n# Split sequences between the \"input\" sequence and \"output\" predicted word\ninput_sequences, labels = sequences[:,:-1], sequences[:,-1]\n# One-hot encode the labels\none_hot_labels = tf.keras.utils.to_categorical(labels, num_classes=total_words)\n\n### Train a (Better) Text Generation Model\n\n# With more data, we'll cut off after 100 epochs to avoid keeping you here all\n# day. You'll also want to change your runtime type to GPU if you haven't already\n# (you'll need to re-run the above cells if you change runtimes).\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n\nmodel = Sequential()\nmodel.add(Embedding(total_words, 64, input_length=max_sequence_len-1))\nmodel.add(Bidirectional(LSTM(20)))\nmodel.add(Dense(total_words, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel,history = savefit(model, input_sequences, one_hot_labels, epochs=100, verbose=0)\n\n### View the Training Graph\n\nplot_graphs(history, 'accuracy')\n\n### Generate better lyrics!\n\n# This time around, we should be able to get a more interesting output with less\n# repetition.\n\nseed_text = \"im feeling chills\"\nnext_words = 100\n \nfor _ in range(next_words):\n\ttoken_list = tokenizer.texts_to_sequences([seed_text])[0]\n\ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n\tpredicted = np.argmax(model.predict(token_list), axis=-1)\n\toutput_word = \"\"\n\tfor word, index in tokenizer.word_index.items():\n\t\tif index == predicted:\n\t\t\toutput_word = word\n\t\t\tbreak\n\tseed_text += \" \" + output_word\nprint(seed_text)\n\n### Varying the Possible Outputs\n\n# In running the above, you may notice that the same seed text will generate\n# similar outputs. This is because the code is currently always choosing the top\n# predicted class as the next word. What if you wanted more variance in the\n# output? \n\n# Switching from `model.predict_classes` to `model.predict_proba` will get us all\n# of the class probabilities. We can combine this with `np.random.choice` to\n# select a given predicted output based on a probability, thereby giving a bit\n# more randomness to our outputs.\n\n# Test the method with just the first word after the seed text\nseed_text = \"im feeling chills\"\nnext_words = 100\n \ntoken_list = tokenizer.texts_to_sequences([seed_text])[0]\ntoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\npredicted_probs = model.predict(token_list)[0]\npredicted = np.random.choice([x for x in range(len(predicted_probs))], \n p=predicted_probs)\n# Running this cell multiple times should get you some variance in output\nprint(predicted)\n\n# Use this process for the full output generation\nseed_text = \"im feeling chills\"\nnext_words = 100\n \nfor _ in range(next_words):\n token_list = tokenizer.texts_to_sequences([seed_text])[0]\n token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\n predicted_probs = model.predict(token_list)[0]\n predicted = np.random.choice([x for x in range(len(predicted_probs))],\n p=predicted_probs)\n output_word = \"\"\n for word, index in tokenizer.word_index.items():\n if index == predicted:\n output_word = word\n break\n seed_text += \" \" + output_word\nprint(seed_text)\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.Masking",
"numpy.random.rand",
"tensorflow.keras.Input"
],
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Dense",
"matplotlib.use",
"pandas.read_csv",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.legend",
"tensorflow.keras.utils.get_file",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.LSTM",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
pyomeca/BiorbdOptim | [
"f07094668788d3e1b5e8cd1c65fbf0c7dc7cc978"
] | [
"bioptim/limits/penalty_option.py"
] | [
"from typing import Any, Union, Callable\n\nimport biorbd_casadi as biorbd\nfrom casadi import horzcat, vertcat, Function, MX, SX\nimport numpy as np\n\nfrom .penalty_node import PenaltyNodeList\nfrom ..misc.enums import Node, PlotType, ControlType, ConstraintType, IntegralApproximation\nfrom ..misc.mapping import Mapping, BiMapping\nfrom ..misc.options import OptionGeneric\n\n\nclass PenaltyOption(OptionGeneric):\n \"\"\"\n A placeholder for a penalty\n\n Attributes\n ----------\n node: Node\n The node within a phase on which the penalty is acting on\n quadratic: bool\n If the penalty is quadratic\n rows: Union[list, tuple, range, np.ndarray]\n The index of the rows in the penalty to keep\n cols: Union[list, tuple, range, np.ndarray]\n The index of the columns in the penalty to keep\n expand: bool\n If the penalty should be expanded or not\n target: np.array(target)\n A target to track for the penalty\n target_plot_name: str\n The plot name of the target\n target_to_plot: np.ndarray\n The subset of the target to plot\n plot_target: bool\n If the target should be plotted\n custom_function: Callable\n A user defined function to call to get the penalty\n node_idx: Union[list, tuple, Node]\n The index in nlp to apply the penalty to\n dt: float\n The delta time\n function: Function\n The casadi function of the penalty\n weighted_function: Function\n The casadi function of the penalty weighted\n derivative: bool\n If the minimization is applied on the numerical derivative of the state [f(t+1) - f(t)]\n explicit_derivative: bool\n If the minimization is applied to derivative of the penalty [f(t, t+1)]\n integration_rule: IntegralApproximation\n The integration rule to use for the penalty\n transition: bool\n If the penalty is a transition\n phase_pre_idx: int\n The index of the nlp of pre when penalty is transition\n phase_post_idx: int\n The index of the nlp of post when penalty is transition\n constraint_type: ConstraintType\n If the penalty is from the user or from bioptim (implicit or internal)\n multi_thread: bool\n If the penalty is multithreaded\n\n Methods\n -------\n set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList)\n Prepare the dimension and index of the penalty (including the target)\n _set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int)\n Checks if the variable index is consistent with the requested variable.\n _check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int)\n Checks if the variable index is consistent with the requested variable.\n If the function returns, all is okay\n _set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX])\n Finalize the preparation of the penalty (setting function and weighted_function)\n add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str)\n Interface to the plot so it can be properly added to the proper plot\n _finish_add_target_to_plot(self, all_pn: PenaltyNodeList)\n Internal interface to add (after having check the target dimensions) the target to the plot if needed\n add_or_replace_to_penalty_pool(self, ocp, nlp)\n Doing some configuration on the penalty and add it to the list of penalty\n _add_penalty_to_pool(self, all_pn: PenaltyNodeList)\n Return the penalty pool for the specified penalty (abstract)\n clear_penalty(self, ocp, nlp)\n Resets a penalty. A negative penalty index creates a new empty penalty (abstract)\n _get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList\n Get the actual node (time, X and U) specified in the penalty\n \"\"\"\n\n def __init__(\n self,\n penalty: Any,\n phase: int = 0,\n node: Union[Node, list, tuple] = Node.DEFAULT,\n target: Union[int, float, np.array, list[int], list[float], list[np.array]] = None,\n quadratic: bool = None,\n weight: float = 1,\n derivative: bool = False,\n explicit_derivative: bool = False,\n integrate: bool = False,\n integration_rule: IntegralApproximation = IntegralApproximation.DEFAULT,\n index: list = None,\n rows: Union[list, tuple, range, np.ndarray] = None,\n cols: Union[list, tuple, range, np.ndarray] = None,\n states_mapping: BiMapping = None,\n custom_function: Callable = None,\n constraint_type: ConstraintType = ConstraintType.USER,\n multi_thread: bool = None,\n expand: bool = False,\n **params: Any,\n ):\n \"\"\"\n Parameters\n ----------\n penalty: PenaltyType\n The actual penalty\n phase: int\n The phase the penalty is acting on\n node: Union[Node, list, tuple]\n The node within a phase on which the penalty is acting on\n target: Union[int, float, np.array, list[int], list[float], list[np.array]]\n A target to track for the penalty\n quadratic: bool\n If the penalty is quadratic\n weight: float\n The weighting applied to this specific penalty\n derivative: bool\n If the function should be evaluated at X and X+1\n explicit_derivative: bool\n If the function should be evaluated at [X, X+1]\n integrate: bool\n If the function should be integrated\n integration_rule: IntegralApproximation\n The rule to use for the integration\n index: int\n The component index the penalty is acting on\n custom_function: Callable\n A user defined function to call to get the penalty\n constraint_type: ConstraintType\n If the penalty is from the user or from bioptim (implicit or internal)\n **params: dict\n Generic parameters for the penalty\n \"\"\"\n\n super(PenaltyOption, self).__init__(phase=phase, type=penalty, **params)\n self.node: Union[Node, list, tuple] = node\n self.quadratic = quadratic\n self.integration_rule = integration_rule\n\n if index is not None and rows is not None:\n raise ValueError(\"rows and index cannot be defined simultaneously since they are the same variable\")\n self.rows = rows if rows is not None else index\n self.cols = cols\n self.expand = expand\n\n self.target = None\n if target is not None:\n target = np.array(target)\n if isinstance(target, int) or isinstance(target, float) or isinstance(target, np.ndarray):\n target = [target]\n self.target = []\n for t in target:\n self.target.append(np.array(t))\n if len(self.target[-1].shape) == 0:\n self.target[-1] = self.target[-1][np.newaxis]\n if len(self.target[-1].shape) == 1:\n self.target[-1] = self.target[-1][:, np.newaxis]\n if len(self.target) == 1 and (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n ):\n if self.node == Node.ALL or self.node == Node.DEFAULT:\n self.target = [self.target[0][:, :-1], self.target[0][:, 1:]]\n else:\n raise NotImplementedError(\n f\"A list of 2 elements is required with {self.node} and TRAPEZOIDAL Integration\"\n f\"except for Node.NODE_ALL and Node.NODE_DEFAULT\"\n \"which can be automatically generated\"\n )\n\n self.target_plot_name = None\n self.target_to_plot = None\n # todo: not implemented yet for trapezoidal integration\n self.plot_target = (\n False\n if (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n else True\n )\n\n self.states_mapping = states_mapping\n\n self.custom_function = custom_function\n\n self.node_idx = []\n self.dt = 0\n self.weight = weight\n self.function: Union[Function, None] = None\n self.function_non_threaded: Union[Function, None] = None\n self.weighted_function: Union[Function, None] = None\n self.weighted_function_non_threaded: Union[Function, None] = None\n self.derivative = derivative\n self.explicit_derivative = explicit_derivative\n self.integrate = integrate\n self.transition = False\n self.multinode_constraint = False\n self.phase_pre_idx = None\n self.phase_post_idx = None\n if self.derivative and self.explicit_derivative:\n raise ValueError(\"derivative and explicit_derivative cannot be both True\")\n self.constraint_type = constraint_type\n\n self.multi_thread = multi_thread\n\n def set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList):\n \"\"\"\n Prepare the dimension and index of the penalty (including the target)\n\n Parameters\n ----------\n penalty: Union[MX, SX],\n The actual penalty function\n all_pn: PenaltyNodeList\n The penalty node elements\n \"\"\"\n\n self.rows = self._set_dim_idx(self.rows, penalty.rows())\n self.cols = self._set_dim_idx(self.cols, penalty.columns())\n if self.target is not None:\n self._check_target_dimensions(all_pn, len(all_pn.t))\n if self.plot_target:\n self._finish_add_target_to_plot(all_pn)\n self._set_penalty_function(all_pn, penalty)\n self._add_penalty_to_pool(all_pn)\n\n def _set_dim_idx(self, dim: Union[list, tuple, range, np.ndarray], n_rows: int):\n \"\"\"\n Checks if the variable index is consistent with the requested variable.\n\n Parameters\n ----------\n dim: Union[list, tuple, range]\n The dimension to set\n n_rows: int\n The expected row shape\n\n Returns\n -------\n The formatted indices\n \"\"\"\n\n if dim is None:\n dim = range(n_rows)\n else:\n if isinstance(dim, int):\n dim = [dim]\n if max(dim) > n_rows:\n raise RuntimeError(f\"{self.name} index cannot be higher than nx ({n_rows})\")\n dim = np.array(dim)\n if not np.issubdtype(dim.dtype, np.integer):\n raise RuntimeError(f\"{self.name} index must be a list of integer\")\n return dim\n\n def _check_target_dimensions(self, all_pn: PenaltyNodeList, n_time_expected: int):\n \"\"\"\n Checks if the variable index is consistent with the requested variable.\n If the function returns, all is okay\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n n_time_expected: Union[list, tuple]\n The expected shape (n_rows, ns) of the data to track\n \"\"\"\n\n if self.integration_rule == IntegralApproximation.RECTANGLE:\n n_dim = len(self.target[0].shape)\n if n_dim != 2 and n_dim != 3:\n raise RuntimeError(\n f\"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)\"\n )\n if self.target[0].shape[-1] == 1:\n self.target = np.repeat(self.target, n_time_expected, axis=-1)\n\n shape = (\n (len(self.rows), n_time_expected) if n_dim == 2 else (len(self.rows), len(self.cols), n_time_expected)\n )\n if self.target[0].shape != shape:\n raise RuntimeError(\n f\"target {self.target[0].shape} does not correspond to expected size {shape} for penalty {self.name}\"\n )\n\n # If the target is on controls and control is constant, there will be one value missing\n if all_pn is not None:\n if (\n all_pn.nlp.control_type == ControlType.CONSTANT\n and all_pn.nlp.ns in all_pn.t\n and self.target[0].shape[-1] == all_pn.nlp.ns\n ):\n if all_pn.t[-1] != all_pn.nlp.ns:\n raise NotImplementedError(\"Modifying target for END not being last is not implemented yet\")\n self.target[0] = np.concatenate(\n (self.target[0], np.nan * np.zeros((self.target[0].shape[0], 1))), axis=1\n )\n elif (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n ):\n\n target_dim = len(self.target)\n if target_dim != 2:\n raise RuntimeError(f\"targets with trapezoidal integration rule need to get a list of two elements.\")\n\n for target in self.target:\n n_dim = len(target.shape)\n if n_dim != 2 and n_dim != 3:\n raise RuntimeError(\n f\"target cannot be a vector (it can be a matrix with time dimension equals to 1 though)\"\n )\n if target.shape[-1] == 1:\n target = np.repeat(target, n_time_expected, axis=-1)\n\n shape = (\n (len(self.rows), n_time_expected - 1)\n if n_dim == 2\n else (len(self.rows), len(self.cols), n_time_expected - 1)\n )\n\n for target in self.target:\n if target.shape != shape:\n raise RuntimeError(\n f\"target {target.shape} does not correspond to expected size {shape} for penalty {self.name}\"\n )\n\n # If the target is on controls and control is constant, there will be one value missing\n if all_pn is not None:\n if (\n all_pn.nlp.control_type == ControlType.CONSTANT\n and all_pn.nlp.ns in all_pn.t\n and self.target[0].shape[-1] == all_pn.nlp.ns - 1\n and self.target[1].shape[-1] == all_pn.nlp.ns - 1\n ):\n if all_pn.t[-1] != all_pn.nlp.ns:\n raise NotImplementedError(\"Modifying target for END not being last is not implemented yet\")\n self.target = np.concatenate((self.target, np.nan * np.zeros((self.target.shape[0], 1))), axis=1)\n\n def _set_penalty_function(self, all_pn: Union[PenaltyNodeList, list, tuple], fcn: Union[MX, SX]):\n \"\"\"\n Finalize the preparation of the penalty (setting function and weighted_function)\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The nodes\n fcn: Union[MX, SX]\n The value of the penalty function\n \"\"\"\n\n # Sanity checks\n if self.transition and self.explicit_derivative:\n raise ValueError(\"transition and explicit_derivative cannot be true simultaneously\")\n if self.transition and self.derivative:\n raise ValueError(\"transition and derivative cannot be true simultaneously\")\n if self.derivative and self.explicit_derivative:\n raise ValueError(\"derivative and explicit_derivative cannot be true simultaneously\")\n\n def get_u(nlp, u: Union[MX, SX], dt: Union[MX, SX]):\n \"\"\"\n Get the control at a given time\n\n Parameters\n ----------\n nlp: NonlinearProgram\n The nonlinear program\n u: Union[MX, SX]\n The control matrix\n dt: Union[MX, SX]\n The time a which control should be computed\n\n Returns\n -------\n The control at a given time\n \"\"\"\n\n if nlp.control_type == ControlType.CONSTANT:\n return u\n elif nlp.control_type == ControlType.LINEAR_CONTINUOUS:\n return u[:, 0] + (u[:, 1] - u[:, 0]) * dt\n else:\n raise RuntimeError(f\"{nlp.control_type} ControlType not implemented yet\")\n\n return u\n\n if self.multinode_constraint or self.transition:\n ocp = all_pn[0].ocp\n nlp = all_pn[0].nlp\n nlp_post = all_pn[1].nlp\n name = self.name.replace(\"->\", \"_\").replace(\" \", \"_\").replace(\",\", \"_\")\n states_pre = nlp.states.cx_end\n states_post = nlp_post.states.cx\n controls_pre = nlp.controls.cx_end\n controls_post = nlp_post.controls.cx\n state_cx = vertcat(states_pre, states_post)\n control_cx = vertcat(controls_pre, controls_post)\n\n else:\n ocp = all_pn.ocp\n nlp = all_pn.nlp\n name = self.name\n if self.integrate:\n state_cx = horzcat(*([all_pn.nlp.states.cx] + all_pn.nlp.states.cx_intermediates_list))\n control_cx = all_pn.nlp.controls.cx\n else:\n state_cx = all_pn.nlp.states.cx\n control_cx = all_pn.nlp.controls.cx\n if self.explicit_derivative:\n if self.derivative:\n raise RuntimeError(\"derivative and explicit_derivative cannot be simultaneously true\")\n state_cx = horzcat(state_cx, all_pn.nlp.states.cx_end)\n control_cx = horzcat(control_cx, all_pn.nlp.controls.cx_end)\n\n param_cx = nlp.cx(nlp.parameters.cx)\n\n # Do not use nlp.add_casadi_func because all functions must be registered\n sub_fcn = fcn[self.rows, self.cols]\n self.function = biorbd.to_casadi_func(name, sub_fcn, state_cx, control_cx, param_cx, expand=self.expand)\n self.function_non_threaded = self.function\n\n if self.derivative:\n state_cx = horzcat(all_pn.nlp.states.cx_end, all_pn.nlp.states.cx)\n control_cx = horzcat(all_pn.nlp.controls.cx_end, all_pn.nlp.controls.cx)\n self.function = biorbd.to_casadi_func(\n f\"{name}\",\n self.function(all_pn.nlp.states.cx_end, all_pn.nlp.controls.cx_end, param_cx)\n - self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx),\n state_cx,\n control_cx,\n param_cx,\n )\n\n dt_cx = nlp.cx.sym(\"dt\", 1, 1)\n is_trapezoidal = (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n target_shape = tuple(\n [\n len(self.rows),\n len(self.cols) + 1 if is_trapezoidal else len(self.cols),\n ]\n )\n target_cx = nlp.cx.sym(\"target\", target_shape)\n weight_cx = nlp.cx.sym(\"weight\", 1, 1)\n exponent = 2 if self.quadratic and self.weight else 1\n\n if is_trapezoidal:\n # Hypothesis: the function is continuous on states\n # it neglects the discontinuities at the beginning of the optimization\n state_cx = (\n horzcat(all_pn.nlp.states.cx, all_pn.nlp.states.cx_end)\n if self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n else all_pn.nlp.states.cx\n )\n # to handle piecewise constant in controls we have to compute the value for the end of the interval\n # which only relies on the value of the control at the beginning of the interval\n control_cx = (\n horzcat(all_pn.nlp.controls.cx)\n if nlp.control_type == ControlType.CONSTANT\n else horzcat(all_pn.nlp.controls.cx, all_pn.nlp.controls.cx_end)\n )\n control_cx_end = get_u(nlp, control_cx, dt_cx)\n state_cx_end = (\n all_pn.nlp.states.cx_end\n if self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n else nlp.dynamics[0](x0=state_cx, p=control_cx_end, params=nlp.parameters.cx)[\"xf\"]\n )\n self.modified_function = biorbd.to_casadi_func(\n f\"{name}\",\n (\n (self.function(all_pn.nlp.states.cx, all_pn.nlp.controls.cx, param_cx) - target_cx[:, 0])\n ** exponent\n + (self.function(state_cx_end, control_cx_end, param_cx) - target_cx[:, 1]) ** exponent\n )\n / 2,\n state_cx,\n control_cx,\n param_cx,\n target_cx,\n dt_cx,\n )\n modified_fcn = self.modified_function(state_cx, control_cx, param_cx, target_cx, dt_cx)\n else:\n modified_fcn = (self.function(state_cx, control_cx, param_cx) - target_cx) ** exponent\n\n modified_fcn = weight_cx * modified_fcn * dt_cx if self.weight else modified_fcn * dt_cx\n\n # Do not use nlp.add_casadi_func because all of them must be registered\n self.weighted_function = Function(\n name, [state_cx, control_cx, param_cx, weight_cx, target_cx, dt_cx], [modified_fcn]\n )\n self.weighted_function_non_threaded = self.weighted_function\n\n if ocp.n_threads > 1 and self.multi_thread and len(self.node_idx) > 1:\n self.function = self.function.map(len(self.node_idx), \"thread\", ocp.n_threads)\n self.weighted_function = self.weighted_function.map(len(self.node_idx), \"thread\", ocp.n_threads)\n else:\n self.multi_thread = False # Override the multi_threading, since only one node is optimized\n\n if self.expand:\n self.function = self.function.expand()\n self.weighted_function = self.weighted_function.expand()\n\n def add_target_to_plot(self, all_pn: PenaltyNodeList, combine_to: str):\n \"\"\"\n Interface to the plot so it can be properly added to the proper plot\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n combine_to: str\n The name of the underlying plot to combine the tracking data to\n \"\"\"\n\n if self.target is None or combine_to is None:\n return\n\n self.target_plot_name = combine_to\n # if the target is n x ns, we need to add a dimension (n x ns + 1) to make it compatible with the plot\n if self.target[0].shape[1] == all_pn.nlp.ns:\n self.target_to_plot = np.concatenate(\n (self.target[0], np.nan * np.ndarray((self.target[0].shape[0], 1))), axis=1\n )\n else:\n self.target_to_plot = self.target[0]\n\n def _finish_add_target_to_plot(self, all_pn: PenaltyNodeList):\n \"\"\"\n Internal interface to add (after having check the target dimensions) the target to the plot if needed\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n\n \"\"\"\n\n def plot_function(t, x, u, p):\n if isinstance(t, (list, tuple)):\n return self.target_to_plot[:, [self.node_idx.index(_t) for _t in t]]\n else:\n return self.target_to_plot[:, self.node_idx.index(t)]\n\n if self.target_to_plot is not None:\n if self.target_to_plot.shape[1] > 1:\n plot_type = PlotType.STEP\n else:\n plot_type = PlotType.POINT\n\n all_pn.ocp.add_plot(\n self.target_plot_name,\n plot_function,\n color=\"tab:red\",\n plot_type=plot_type,\n phase=all_pn.nlp.phase_idx,\n axes_idx=Mapping(self.rows),\n node_idx=self.node_idx,\n )\n\n def add_or_replace_to_penalty_pool(self, ocp, nlp):\n \"\"\"\n Doing some configuration on the penalty and add it to the list of penalty\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n \"\"\"\n if not self.name:\n if self.type.name == \"CUSTOM\":\n self.name = self.custom_function.__name__\n else:\n self.name = self.type.name\n\n penalty_type = self.type.get_type()\n if self.node == Node.TRANSITION:\n all_pn = []\n\n # Make sure the penalty behave like a PhaseTransition, even though it may be an Objective or Constraint\n self.node = Node.END\n self.node_idx = [0]\n self.transition = True\n self.dt = 1\n self.phase_pre_idx = nlp.phase_idx\n self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases\n if not self.states_mapping:\n self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))\n\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n all_pn[0].u = [nlp.U[-1]] # Make an exception to the fact that U is not available for the last node\n\n nlp = ocp.nlp[(nlp.phase_idx + 1) % ocp.n_phases]\n self.node = Node.START\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n\n self.node = Node.TRANSITION\n\n penalty_type.validate_penalty_time_index(self, all_pn[0])\n penalty_type.validate_penalty_time_index(self, all_pn[1])\n self.clear_penalty(ocp, all_pn[0].nlp)\n\n elif isinstance(self.node, tuple) and self.multinode_constraint:\n all_pn = []\n self.node_list = self.node\n # Make sure the penalty behave like a MultinodeConstraint, even though it may be an Objective or Constraint\n # self.transition = True\n self.dt = 1\n # self.phase_pre_idx\n # self.phase_post_idx = (nlp.phase_idx + 1) % ocp.n_phases\n if not self.states_mapping:\n self.states_mapping = BiMapping(range(nlp.states.shape), range(nlp.states.shape))\n self.node = self.node_list[0]\n nlp = ocp.nlp[self.phase_first_idx]\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n if self.node == Node.END:\n all_pn[0].u = [nlp.U[-1]]\n # Make an exception to the fact that U is not available for the last node\n\n self.node = self.node_list[1]\n nlp = ocp.nlp[self.phase_second_idx]\n all_pn.append(self._get_penalty_node_list(ocp, nlp))\n if self.node == Node.END:\n all_pn[1].u = [nlp.U[-1]]\n # Make an exception to the fact that U is not available for the last node\n\n # reset the node list\n self.node = self.node_list\n\n penalty_type.validate_penalty_time_index(self, all_pn[0])\n penalty_type.validate_penalty_time_index(self, all_pn[1])\n self.node_idx = [all_pn[0].t[0], all_pn[1].t[0]]\n self.clear_penalty(ocp, all_pn[0].nlp)\n else:\n all_pn = self._get_penalty_node_list(ocp, nlp)\n penalty_type.validate_penalty_time_index(self, all_pn)\n self.clear_penalty(all_pn.ocp, all_pn.nlp)\n self.dt = penalty_type.get_dt(all_pn.nlp)\n self.node_idx = (\n all_pn.t[:-1]\n if (\n self.integration_rule == IntegralApproximation.TRAPEZOIDAL\n or self.integration_rule == IntegralApproximation.TRUE_TRAPEZOIDAL\n )\n and self.target is not None\n else all_pn.t\n )\n\n penalty_function = self.type.value[0](self, all_pn, **self.params)\n self.set_penalty(penalty_function, all_pn)\n\n def _add_penalty_to_pool(self, all_pn: PenaltyNodeList):\n \"\"\"\n Return the penalty pool for the specified penalty (abstract)\n\n Parameters\n ----------\n all_pn: PenaltyNodeList\n The penalty node elements\n \"\"\"\n\n raise RuntimeError(\"get_dt cannot be called from an abstract class\")\n\n def clear_penalty(self, ocp, nlp):\n \"\"\"\n Resets a penalty. A negative penalty index creates a new empty penalty (abstract)\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n \"\"\"\n\n raise RuntimeError(\"_reset_penalty cannot be called from an abstract class\")\n\n def _get_penalty_node_list(self, ocp, nlp) -> PenaltyNodeList:\n \"\"\"\n Get the actual node (time, X and U) specified in the penalty\n\n Parameters\n ----------\n ocp: OptimalControlProgram\n A reference to the ocp\n nlp: NonLinearProgram\n A reference to the current phase of the ocp\n\n Returns\n -------\n The actual node (time, X and U) specified in the penalty\n \"\"\"\n\n if not isinstance(self.node, (list, tuple)):\n self.node = (self.node,)\n\n t = []\n for node in self.node:\n if isinstance(node, int):\n if node < 0 or node > nlp.ns:\n raise RuntimeError(f\"Invalid node, {node} must be between 0 and {nlp.ns}\")\n t.append(node)\n elif node == Node.START:\n t.append(0)\n elif node == Node.MID:\n if nlp.ns % 2 == 1:\n raise (ValueError(\"Number of shooting points must be even to use MID\"))\n t.append(nlp.ns // 2)\n elif node == Node.INTERMEDIATES:\n t.extend(list(i for i in range(1, nlp.ns - 1)))\n elif node == Node.PENULTIMATE:\n if nlp.ns < 2:\n raise (ValueError(\"Number of shooting points must be greater than 1\"))\n t.append(nlp.ns - 1)\n elif node == Node.END:\n t.append(nlp.ns)\n elif node == Node.ALL_SHOOTING:\n t.extend(range(nlp.ns))\n elif node == Node.ALL:\n t.extend(range(nlp.ns + 1))\n else:\n raise RuntimeError(\" is not a valid node\")\n\n x = [nlp.X[idx] for idx in t]\n u = [nlp.U[idx] for idx in t if idx != nlp.ns]\n return PenaltyNodeList(ocp, nlp, t, x, u, nlp.parameters.cx)\n"
] | [
[
"numpy.zeros",
"numpy.issubdtype",
"numpy.repeat",
"numpy.ndarray",
"numpy.array"
]
] |
nerdslab/SwapVAE | [
"f43e59c93d0b9f7f1de51a63e25b17b7be1da2d9"
] | [
"vae_kits/classification.py"
] | [
"import torch\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom tqdm import tqdm\r\n\r\nclass Simple_Trans(Dataset):\r\n def __init__(self, data, transform=None):\r\n # [reps, labels]\r\n self.reps = data[0]\r\n self.labels = data[1]\r\n # print(self.reps.shape, self.labels.shape) # torch.Size([60000, 64]) torch.Size([60000])\r\n\r\n def __len__(self):\r\n return self.labels.shape[0]\r\n\r\n def __getitem__(self, idx):\r\n return self.reps[idx, :], self.labels[idx]\r\n\r\n\r\nclass linear_clf(object):\r\n def __init__(self, net, classifier, optimizer, train_dataloader, test_dataloader, device = \"cpu\", batch_size=1024,\r\n num_epochs = 10, disable_tqdm = False, writer=None, writer_tag = \"\", pair=False):\r\n self.net = net\r\n #self.net.eval()\r\n\r\n self.classifier = classifier\r\n self.optimizer = optimizer\r\n self.writer = writer\r\n self.tag = writer_tag\r\n\r\n self.disable_tqdm = disable_tqdm\r\n self.device = device\r\n self.batch_size = batch_size\r\n self.num_epochs = num_epochs\r\n\r\n self.data_train = Simple_Trans(self.compute_representations(train_dataloader))\r\n self.data_test = Simple_Trans(self.compute_representations(test_dataloader))\r\n\r\n self.best_number = 0\r\n self.train_linear_layer()\r\n\r\n self.train_acc = self.compute_accuracy(DataLoader(self.data_train, batch_size=batch_size))\r\n self.test_acc = self.compute_accuracy(DataLoader(self.data_test, batch_size=batch_size))\r\n #self.net.train()\r\n\r\n def compute_representations(self, dataloader):\r\n \"\"\" store the representations\r\n :param net: ResNet or smth\r\n :param dataloader: train_loader and test_loader\r\n \"\"\"\r\n #self.net.eval()\r\n reps, labels = [], []\r\n\r\n for i, (x, label) in enumerate(dataloader):\r\n # load data\r\n x = x.to(self.device)\r\n labels.append(label)\r\n\r\n # forward\r\n with torch.no_grad():\r\n representation = self.net(x)\r\n reps.append(representation.detach().cpu())\r\n\r\n if i % 100 == 0:\r\n reps = [torch.cat(reps, dim=0)]\r\n labels = [torch.cat(labels, dim=0)]\r\n\r\n reps = torch.cat(reps, dim=0)\r\n labels = torch.cat(labels, dim=0)\r\n #self.net.train()\r\n return [reps, labels]\r\n\r\n def compute_accuracy(self, dataloader):\r\n #self.net.eval()\r\n self.classifier.eval()\r\n right = []\r\n total = []\r\n for x, label in dataloader:\r\n x, label = x.to(self.device), label.to(self.device)\r\n # feed to network and classifier\r\n with torch.no_grad():\r\n pred_logits = self.classifier(x)\r\n # compute accuracy\r\n _, pred_class = torch.max(pred_logits, 1)\r\n right.append((pred_class == label).sum().item())\r\n total.append(label.size(0))\r\n self.classifier.train()\r\n #self.net.train()\r\n return sum(right) / sum(total)\r\n\r\n def train_linear_layer(self):\r\n #self.net.eval()\r\n class_criterion = torch.nn.CrossEntropyLoss()\r\n progress_bar = tqdm(range(self.num_epochs), disable=self.disable_tqdm, position=0, leave=True)\r\n for epoch in progress_bar:\r\n for x, label in DataLoader(self.data_train, batch_size=self.batch_size):\r\n self.classifier.train()\r\n x, label = x.to(self.device), label.to(self.device)\r\n pred_class = self.classifier(x)\r\n loss = class_criterion(pred_class, label)\r\n\r\n # backward\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n curr_number = self.compute_accuracy(DataLoader(self.data_test, batch_size=self.batch_size))\r\n if curr_number >= self.best_number:\r\n self.best_number = curr_number\r\n\r\n if self.writer is not None:\r\n self.writer.log_metrics({'CLFtraining/val-tag{}'.format(self.tag): curr_number}, step = epoch)\r\n\r\n progress_bar.set_description('Linear_CLF Epoch: [{}/{}] Acc@1:{:.3f}% BestAcc@1:{:.3f}%'\r\n .format(epoch, self.num_epochs, curr_number, self.best_number))\r\n #self.net.train()\r\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.cat"
]
] |
opennlp/Large-Scale-Text-Classification | [
"a803c8d89357e5ec897031a41dda807d91f00431"
] | [
"interpretation/instance_explanation.py"
] | [
"from factory import vectorizer_factory\nfrom sklearn.base import TransformerMixin\nfrom sklearn.pipeline import make_pipeline\nfrom lime.lime_text import LimeTextExplainer\n\n\nclass VectorTransformer(TransformerMixin):\n def __init__(self, vectorizer_name):\n self.vectorizer_name = vectorizer_name\n\n def fit(self,X, y=None):\n pass\n\n def transform(self, sentence_list, y=None):\n return vectorizer_factory.get_vectorized_text(sentence_list,self.vectorizer_name)\n\n\ndef get_pipeline_for_classification(feature_transformer, trained_model):\n return make_pipeline(feature_transformer, trained_model)\n\n\ndef get_explanation_for_instance(text_string,classifier_function, class_list, max_num_features_to_show=10, file_to_save='explain.html'):\n explainer = LimeTextExplainer(class_names=class_list,random_state=42)\n explained_instance = explainer.explain_instance(text_string, classifier_function.predict_proba,\n num_features=max_num_features_to_show, top_labels=len(class_list))\n explained_instance.save_to_file(file_to_save)\n return explained_instance.as_list()\n"
] | [
[
"sklearn.pipeline.make_pipeline"
]
] |
lefevre-fraser/openmeta-mms | [
"08f3115e76498df1f8d70641d71f5c52cab4ce5f",
"08f3115e76498df1f8d70641d71f5c52cab4ce5f"
] | [
"bin/Python27/Lib/site-packages/numpy/core/tests/test_regression.py",
"bin/Python27/Lib/site-packages/scipy/linalg/tests/test_decomp.py"
] | [
"from __future__ import division, absolute_import, print_function\r\n\r\nimport copy\r\nimport pickle\r\nimport sys\r\nimport platform\r\nimport gc\r\nimport warnings\r\nimport tempfile\r\nfrom os import path\r\nfrom io import BytesIO\r\nfrom itertools import chain\r\n\r\nimport numpy as np\r\nfrom numpy.testing import (\r\n run_module_suite, TestCase, assert_, assert_equal,\r\n assert_almost_equal, assert_array_equal, assert_array_almost_equal,\r\n assert_raises, assert_warns, dec\r\n )\r\nfrom numpy.testing.utils import _assert_valid_refcount\r\nfrom numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu\r\n\r\nrlevel = 1\r\n\r\nclass TestRegression(TestCase):\r\n def test_invalid_round(self,level=rlevel):\r\n # Ticket #3\r\n v = 4.7599999999999998\r\n assert_array_equal(np.array([v]), np.array(v))\r\n\r\n def test_mem_empty(self,level=rlevel):\r\n # Ticket #7\r\n np.empty((1,), dtype=[('x', np.int64)])\r\n\r\n def test_pickle_transposed(self,level=rlevel):\r\n # Ticket #16\r\n a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))\r\n f = BytesIO()\r\n pickle.dump(a, f)\r\n f.seek(0)\r\n b = pickle.load(f)\r\n f.close()\r\n assert_array_equal(a, b)\r\n\r\n def test_typeNA(self,level=rlevel):\r\n # Ticket #31\r\n assert_equal(np.typeNA[np.int64], 'Int64')\r\n assert_equal(np.typeNA[np.uint64], 'UInt64')\r\n\r\n def test_dtype_names(self,level=rlevel):\r\n # Ticket #35\r\n # Should succeed\r\n np.dtype([(('name', 'label'), np.int32, 3)])\r\n\r\n def test_reduce(self,level=rlevel):\r\n # Ticket #40\r\n assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)\r\n\r\n def test_zeros_order(self,level=rlevel):\r\n # Ticket #43\r\n np.zeros([3], int, 'C')\r\n np.zeros([3], order='C')\r\n np.zeros([3], int, order='C')\r\n\r\n def test_asarray_with_order(self,level=rlevel):\r\n # Check that nothing is done when order='F' and array C/F-contiguous\r\n a = np.ones(2)\r\n assert_(a is np.asarray(a, order='F'))\r\n\r\n def test_ravel_with_order(self,level=rlevel):\r\n # Check that ravel works when order='F' and array C/F-contiguous\r\n a = np.ones(2)\r\n assert_(not a.ravel('F').flags.owndata)\r\n\r\n def test_sort_bigendian(self,level=rlevel):\r\n # Ticket #47\r\n a = np.linspace(0, 10, 11)\r\n c = a.astype(np.dtype('<f8'))\r\n c.sort()\r\n assert_array_almost_equal(c, a)\r\n\r\n def test_negative_nd_indexing(self,level=rlevel):\r\n # Ticket #49\r\n c = np.arange(125).reshape((5, 5, 5))\r\n origidx = np.array([-1, 0, 1])\r\n idx = np.array(origidx)\r\n c[idx]\r\n assert_array_equal(idx, origidx)\r\n\r\n def test_char_dump(self,level=rlevel):\r\n # Ticket #50\r\n f = BytesIO()\r\n ca = np.char.array(np.arange(1000, 1010), itemsize=4)\r\n ca.dump(f)\r\n f.seek(0)\r\n ca = np.load(f)\r\n f.close()\r\n\r\n def test_noncontiguous_fill(self,level=rlevel):\r\n # Ticket #58.\r\n a = np.zeros((5, 3))\r\n b = a[:, :2,]\r\n\r\n def rs():\r\n b.shape = (10,)\r\n\r\n self.assertRaises(AttributeError, rs)\r\n\r\n def test_bool(self,level=rlevel):\r\n # Ticket #60\r\n np.bool_(1) # Should succeed\r\n\r\n def test_indexing1(self,level=rlevel):\r\n # Ticket #64\r\n descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]\r\n buffer = ((([6j, 4j],),),)\r\n h = np.array(buffer, dtype=descr)\r\n h['x']['y']['z']\r\n\r\n def test_indexing2(self,level=rlevel):\r\n # Ticket #65\r\n descr = [('x', 'i4', (2,))]\r\n buffer = ([3, 2],)\r\n h = np.array(buffer, dtype=descr)\r\n h['x']\r\n\r\n def test_round(self,level=rlevel):\r\n # Ticket #67\r\n x = np.array([1+2j])\r\n assert_almost_equal(x**(-1), [1/(1+2j)])\r\n\r\n def test_scalar_compare(self,level=rlevel):\r\n # Trac Ticket #72\r\n # https://github.com/numpy/numpy/issues/565\r\n a = np.array(['test', 'auto'])\r\n assert_array_equal(a == 'auto', np.array([False, True]))\r\n self.assertTrue(a[1] == 'auto')\r\n self.assertTrue(a[0] != 'auto')\r\n b = np.linspace(0, 10, 11)\r\n # This should return true for now, but will eventually raise an error:\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n self.assertTrue(b != 'auto')\r\n self.assertTrue(b[0] != 'auto')\r\n\r\n def test_unicode_swapping(self,level=rlevel):\r\n # Ticket #79\r\n ulen = 1\r\n ucs_value = sixu('\\U0010FFFF')\r\n ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)\r\n ua.newbyteorder() # Should succeed.\r\n\r\n def test_object_array_fill(self,level=rlevel):\r\n # Ticket #86\r\n x = np.zeros(1, 'O')\r\n x.fill([])\r\n\r\n def test_mem_dtype_align(self,level=rlevel):\r\n # Ticket #93\r\n self.assertRaises(TypeError, np.dtype,\r\n {'names':['a'],'formats':['foo']}, align=1)\r\n\r\n @dec.knownfailureif((sys.version_info[0] >= 3) or\r\n (sys.platform == \"win32\" and\r\n platform.architecture()[0] == \"64bit\"),\r\n \"numpy.intp('0xff', 16) not supported on Py3, \"\r\n \"as it does not inherit from Python int\")\r\n def test_intp(self,level=rlevel):\r\n # Ticket #99\r\n i_width = np.int_(0).nbytes*2 - 1\r\n np.intp('0x' + 'f'*i_width, 16)\r\n self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)\r\n self.assertRaises(ValueError, np.intp, '0x1', 32)\r\n assert_equal(255, np.intp('0xFF', 16))\r\n assert_equal(1024, np.intp(1024))\r\n\r\n def test_endian_bool_indexing(self,level=rlevel):\r\n # Ticket #105\r\n a = np.arange(10., dtype='>f8')\r\n b = np.arange(10., dtype='<f8')\r\n xa = np.where((a > 2) & (a < 6))\r\n xb = np.where((b > 2) & (b < 6))\r\n ya = ((a > 2) & (a < 6))\r\n yb = ((b > 2) & (b < 6))\r\n assert_array_almost_equal(xa, ya.nonzero())\r\n assert_array_almost_equal(xb, yb.nonzero())\r\n assert_(np.all(a[ya] > 0.5))\r\n assert_(np.all(b[yb] > 0.5))\r\n\r\n def test_endian_where(self,level=rlevel):\r\n # GitHub issue #369\r\n net = np.zeros(3, dtype='>f4')\r\n net[1] = 0.00458849\r\n net[2] = 0.605202\r\n max_net = net.max()\r\n test = np.where(net <= 0., max_net, net)\r\n correct = np.array([ 0.60520202, 0.00458849, 0.60520202])\r\n assert_array_almost_equal(test, correct)\r\n\r\n def test_endian_recarray(self,level=rlevel):\r\n # Ticket #2185\r\n dt = np.dtype([\r\n ('head', '>u4'),\r\n ('data', '>u4', 2),\r\n ])\r\n buf = np.recarray(1, dtype=dt)\r\n buf[0]['head'] = 1\r\n buf[0]['data'][:] = [1, 1]\r\n\r\n h = buf[0]['head']\r\n d = buf[0]['data'][0]\r\n buf[0]['head'] = h\r\n buf[0]['data'][0] = d\r\n assert_(buf[0]['head'] == 1)\r\n\r\n def test_mem_dot(self,level=rlevel):\r\n # Ticket #106\r\n x = np.random.randn(0, 1)\r\n y = np.random.randn(10, 1)\r\n # Dummy array to detect bad memory access:\r\n _z = np.ones(10)\r\n _dummy = np.empty((0, 10))\r\n z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)\r\n np.dot(x, np.transpose(y), out=z)\r\n assert_equal(_z, np.ones(10))\r\n # Do the same for the built-in dot:\r\n np.core.multiarray.dot(x, np.transpose(y), out=z)\r\n assert_equal(_z, np.ones(10))\r\n\r\n def test_arange_endian(self,level=rlevel):\r\n # Ticket #111\r\n ref = np.arange(10)\r\n x = np.arange(10, dtype='<f8')\r\n assert_array_equal(ref, x)\r\n x = np.arange(10, dtype='>f8')\r\n assert_array_equal(ref, x)\r\n\r\n def test_argmax(self,level=rlevel):\r\n # Ticket #119\r\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\r\n for i in range(a.ndim):\r\n a.argmax(i) # Should succeed\r\n\r\n def test_mem_divmod(self,level=rlevel):\r\n # Ticket #126\r\n for i in range(10):\r\n divmod(np.array([i])[0], 10)\r\n\r\n def test_hstack_invalid_dims(self,level=rlevel):\r\n # Ticket #128\r\n x = np.arange(9).reshape((3, 3))\r\n y = np.array([0, 0, 0])\r\n self.assertRaises(ValueError, np.hstack, (x, y))\r\n\r\n def test_squeeze_type(self,level=rlevel):\r\n # Ticket #133\r\n a = np.array([3])\r\n b = np.array(3)\r\n assert_(type(a.squeeze()) is np.ndarray)\r\n assert_(type(b.squeeze()) is np.ndarray)\r\n\r\n def test_add_identity(self,level=rlevel):\r\n # Ticket #143\r\n assert_equal(0, np.add.identity)\r\n\r\n def test_numpy_float_python_long_addition(self):\r\n # Check that numpy float and python longs can be added correctly.\r\n a = np.float_(23.) + 2**135\r\n assert_equal(a, 23. + 2**135)\r\n\r\n def test_binary_repr_0(self,level=rlevel):\r\n # Ticket #151\r\n assert_equal('0', np.binary_repr(0))\r\n\r\n def test_rec_iterate(self,level=rlevel):\r\n # Ticket #160\r\n descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])\r\n x = np.rec.array([(1, 1.1, '1.0'),\r\n (2, 2.2, '2.0')], dtype=descr)\r\n x[0].tolist()\r\n [i for i in x[0]]\r\n\r\n def test_unicode_string_comparison(self,level=rlevel):\r\n # Ticket #190\r\n a = np.array('hello', np.unicode_)\r\n b = np.array('world')\r\n a == b\r\n\r\n def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):\r\n # Fix in r2836\r\n # Create non-contiguous Fortran ordered array\r\n x = np.array(np.random.rand(3, 3), order='F')[:, :2]\r\n assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))\r\n\r\n def test_flat_assignment(self,level=rlevel):\r\n # Correct behaviour of ticket #194\r\n x = np.empty((3, 1))\r\n x.flat = np.arange(3)\r\n assert_array_almost_equal(x, [[0], [1], [2]])\r\n x.flat = np.arange(3, dtype=float)\r\n assert_array_almost_equal(x, [[0], [1], [2]])\r\n\r\n def test_broadcast_flat_assignment(self,level=rlevel):\r\n # Ticket #194\r\n x = np.empty((3, 1))\r\n\r\n def bfa():\r\n x[:] = np.arange(3)\r\n\r\n def bfb():\r\n x[:] = np.arange(3, dtype=float)\r\n\r\n self.assertRaises(ValueError, bfa)\r\n self.assertRaises(ValueError, bfb)\r\n\r\n def test_nonarray_assignment(self):\r\n # See also Issue gh-2870, test for non-array assignment\r\n # and equivalent unsafe casted array assignment\r\n a = np.arange(10)\r\n b = np.ones(10, dtype=bool)\r\n r = np.arange(10)\r\n\r\n def assign(a, b, c):\r\n a[b] = c\r\n\r\n assert_raises(ValueError, assign, a, b, np.nan)\r\n a[b] = np.array(np.nan) # but not this.\r\n assert_raises(ValueError, assign, a, r, np.nan)\r\n a[r] = np.array(np.nan)\r\n\r\n def test_unpickle_dtype_with_object(self,level=rlevel):\r\n # Implemented in r2840\r\n dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])\r\n f = BytesIO()\r\n pickle.dump(dt, f)\r\n f.seek(0)\r\n dt_ = pickle.load(f)\r\n f.close()\r\n assert_equal(dt, dt_)\r\n\r\n def test_mem_array_creation_invalid_specification(self,level=rlevel):\r\n # Ticket #196\r\n dt = np.dtype([('x', int), ('y', np.object_)])\r\n # Wrong way\r\n self.assertRaises(ValueError, np.array, [1, 'object'], dt)\r\n # Correct way\r\n np.array([(1, 'object')], dt)\r\n\r\n def test_recarray_single_element(self,level=rlevel):\r\n # Ticket #202\r\n a = np.array([1, 2, 3], dtype=np.int32)\r\n b = a.copy()\r\n r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])\r\n assert_array_equal(a, b)\r\n assert_equal(a, r[0][0])\r\n\r\n def test_zero_sized_array_indexing(self,level=rlevel):\r\n # Ticket #205\r\n tmp = np.array([])\r\n\r\n def index_tmp():\r\n tmp[np.array(10)]\r\n\r\n self.assertRaises(IndexError, index_tmp)\r\n\r\n def test_chararray_rstrip(self,level=rlevel):\r\n # Ticket #222\r\n x = np.chararray((1,), 5)\r\n x[0] = asbytes('a ')\r\n x = x.rstrip()\r\n assert_equal(x[0], asbytes('a'))\r\n\r\n def test_object_array_shape(self,level=rlevel):\r\n # Ticket #239\r\n assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))\r\n assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))\r\n assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))\r\n assert_equal(np.array([], dtype=object).shape, (0,))\r\n assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))\r\n assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))\r\n\r\n def test_mem_around(self,level=rlevel):\r\n # Ticket #243\r\n x = np.zeros((1,))\r\n y = [0]\r\n decimal = 6\r\n np.around(abs(x-y), decimal) <= 10.0**(-decimal)\r\n\r\n def test_character_array_strip(self,level=rlevel):\r\n # Ticket #246\r\n x = np.char.array((\"x\", \"x \", \"x \"))\r\n for c in x:\r\n assert_equal(c, \"x\")\r\n\r\n def test_lexsort(self,level=rlevel):\r\n # Lexsort memory error\r\n v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\r\n assert_equal(np.lexsort(v), 0)\r\n\r\n def test_lexsort_invalid_sequence(self):\r\n # Issue gh-4123\r\n class BuggySequence(object):\r\n def __len__(self):\r\n return 4\r\n\r\n def __getitem__(self, key):\r\n raise KeyError\r\n\r\n assert_raises(KeyError, np.lexsort, BuggySequence())\r\n\r\n def test_pickle_py2_bytes_encoding(self):\r\n # Check that arrays and scalars pickled on Py2 are\r\n # unpickleable on Py3 using encoding='bytes'\r\n\r\n test_data = [\r\n # (original, py2_pickle)\r\n (np.unicode_('\\u6f2c'),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\r\n \"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\n\"\r\n \"I0\\ntp6\\nbS',o\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\")),\r\n\r\n (np.array([9e123], dtype=np.float64),\r\n asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\n\"\r\n \"p1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\n\"\r\n \"p7\\n(S'f8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'<'\\np11\\nNNNI-1\\nI-1\\n\"\r\n \"I0\\ntp12\\nbI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np13\\ntp14\\nb.\")),\r\n\r\n (np.array([(9e123,)], dtype=[('name', float)]),\r\n asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n\"\r\n \"(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n\"\r\n \"(S'V8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'name'\\np12\\ntp13\\n\"\r\n \"(dp14\\ng12\\n(g7\\n(S'f8'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'<'\\np18\\nNNNI-1\\n\"\r\n \"I-1\\nI0\\ntp19\\nbI0\\ntp20\\nsI8\\nI1\\nI0\\ntp21\\n\"\r\n \"bI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np22\\ntp23\\nb.\")),\r\n ]\r\n\r\n if sys.version_info[:2] >= (3, 4):\r\n # encoding='bytes' was added in Py3.4\r\n for original, data in test_data:\r\n result = pickle.loads(data, encoding='bytes')\r\n assert_equal(result, original)\r\n\r\n if isinstance(result, np.ndarray) and result.dtype.names:\r\n for name in result.dtype.names:\r\n assert_(isinstance(name, str))\r\n\r\n def test_pickle_dtype(self,level=rlevel):\r\n # Ticket #251\r\n pickle.dumps(np.float)\r\n\r\n def test_swap_real(self, level=rlevel):\r\n # Ticket #265\r\n assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)\r\n assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)\r\n assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)\r\n assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)\r\n\r\n def test_object_array_from_list(self, level=rlevel):\r\n # Ticket #270\r\n np.array([1, 'A', None]) # Should succeed\r\n\r\n def test_multiple_assign(self, level=rlevel):\r\n # Ticket #273\r\n a = np.zeros((3, 1), int)\r\n a[[1, 2]] = 1\r\n\r\n def test_empty_array_type(self, level=rlevel):\r\n assert_equal(np.array([]).dtype, np.zeros(0).dtype)\r\n\r\n def test_void_copyswap(self, level=rlevel):\r\n dt = np.dtype([('one', '<i4'), ('two', '<i4')])\r\n x = np.array((1, 2), dtype=dt)\r\n x = x.byteswap()\r\n assert_(x['one'] > 1 and x['two'] > 2)\r\n\r\n def test_method_args(self, level=rlevel):\r\n # Make sure methods and functions have same default axis\r\n # keyword and arguments\r\n funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),\r\n ('sometrue', 'any'),\r\n ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),\r\n 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',\r\n 'round', 'min', 'max', 'argsort', 'sort']\r\n funcs2 = ['compress', 'take', 'repeat']\r\n\r\n for func in funcs1:\r\n arr = np.random.rand(8, 7)\r\n arr2 = arr.copy()\r\n if isinstance(func, tuple):\r\n func_meth = func[1]\r\n func = func[0]\r\n else:\r\n func_meth = func\r\n res1 = getattr(arr, func_meth)()\r\n res2 = getattr(np, func)(arr2)\r\n if res1 is None:\r\n res1 = arr\r\n\r\n if res1.dtype.kind in 'uib':\r\n assert_((res1 == res2).all(), func)\r\n else:\r\n assert_(abs(res1-res2).max() < 1e-8, func)\r\n\r\n for func in funcs2:\r\n arr1 = np.random.rand(8, 7)\r\n arr2 = np.random.rand(8, 7)\r\n res1 = None\r\n if func == 'compress':\r\n arr1 = arr1.ravel()\r\n res1 = getattr(arr2, func)(arr1)\r\n else:\r\n arr2 = (15*arr2).astype(int).ravel()\r\n if res1 is None:\r\n res1 = getattr(arr1, func)(arr2)\r\n res2 = getattr(np, func)(arr1, arr2)\r\n assert_(abs(res1-res2).max() < 1e-8, func)\r\n\r\n def test_mem_lexsort_strings(self, level=rlevel):\r\n # Ticket #298\r\n lst = ['abc', 'cde', 'fgh']\r\n np.lexsort((lst,))\r\n\r\n def test_fancy_index(self, level=rlevel):\r\n # Ticket #302\r\n x = np.array([1, 2])[np.array([0])]\r\n assert_equal(x.shape, (1,))\r\n\r\n def test_recarray_copy(self, level=rlevel):\r\n # Ticket #312\r\n dt = [('x', np.int16), ('y', np.float64)]\r\n ra = np.array([(1, 2.3)], dtype=dt)\r\n rb = np.rec.array(ra, dtype=dt)\r\n rb['x'] = 2.\r\n assert_(ra['x'] != rb['x'])\r\n\r\n def test_rec_fromarray(self, level=rlevel):\r\n # Ticket #322\r\n x1 = np.array([[1, 2], [3, 4], [5, 6]])\r\n x2 = np.array(['a', 'dd', 'xyz'])\r\n x3 = np.array([1.1, 2, 3])\r\n np.rec.fromarrays([x1, x2, x3], formats=\"(2,)i4,a3,f8\")\r\n\r\n def test_object_array_assign(self, level=rlevel):\r\n x = np.empty((2, 2), object)\r\n x.flat[2] = (1, 2, 3)\r\n assert_equal(x.flat[2], (1, 2, 3))\r\n\r\n def test_ndmin_float64(self, level=rlevel):\r\n # Ticket #324\r\n x = np.array([1, 2, 3], dtype=np.float64)\r\n assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)\r\n assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)\r\n\r\n def test_ndmin_order(self, level=rlevel):\r\n # Issue #465 and related checks\r\n assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)\r\n assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)\r\n assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)\r\n assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)\r\n\r\n def test_mem_axis_minimization(self, level=rlevel):\r\n # Ticket #327\r\n data = np.arange(5)\r\n data = np.add.outer(data, data)\r\n\r\n def test_mem_float_imag(self, level=rlevel):\r\n # Ticket #330\r\n np.float64(1.0).imag\r\n\r\n def test_dtype_tuple(self, level=rlevel):\r\n # Ticket #334\r\n assert_(np.dtype('i4') == np.dtype(('i4', ())))\r\n\r\n def test_dtype_posttuple(self, level=rlevel):\r\n # Ticket #335\r\n np.dtype([('col1', '()i4')])\r\n\r\n def test_numeric_carray_compare(self, level=rlevel):\r\n # Ticket #341\r\n assert_equal(np.array(['X'], 'c'), asbytes('X'))\r\n\r\n def test_string_array_size(self, level=rlevel):\r\n # Ticket #342\r\n self.assertRaises(ValueError,\r\n np.array, [['X'], ['X', 'X', 'X']], '|S1')\r\n\r\n def test_dtype_repr(self, level=rlevel):\r\n # Ticket #344\r\n dt1 = np.dtype(('uint32', 2))\r\n dt2 = np.dtype(('uint32', (2,)))\r\n assert_equal(dt1.__repr__(), dt2.__repr__())\r\n\r\n def test_reshape_order(self, level=rlevel):\r\n # Make sure reshape order works.\r\n a = np.arange(6).reshape(2, 3, order='F')\r\n assert_equal(a, [[0, 2, 4], [1, 3, 5]])\r\n a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\r\n b = a[:, 1]\r\n assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])\r\n\r\n def test_reshape_zero_strides(self, level=rlevel):\r\n # Issue #380, test reshaping of zero strided arrays\r\n a = np.ones(1)\r\n a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))\r\n assert_(a.reshape(5, 1).strides[0] == 0)\r\n\r\n def test_reshape_zero_size(self, level=rlevel):\r\n # GitHub Issue #2700, setting shape failed for 0-sized arrays\r\n a = np.ones((0, 2))\r\n a.shape = (-1, 2)\r\n\r\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\r\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.\r\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\r\n def test_reshape_trailing_ones_strides(self):\r\n # GitHub issue gh-2949, bad strides for trailing ones of new shape\r\n a = np.zeros(12, dtype=np.int32)[::2] # not contiguous\r\n strides_c = (16, 8, 8, 8)\r\n strides_f = (8, 24, 48, 48)\r\n assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)\r\n assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)\r\n assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))\r\n\r\n def test_repeat_discont(self, level=rlevel):\r\n # Ticket #352\r\n a = np.arange(12).reshape(4, 3)[:, 2]\r\n assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])\r\n\r\n def test_array_index(self, level=rlevel):\r\n # Make sure optimization is not called in this case.\r\n a = np.array([1, 2, 3])\r\n a2 = np.array([[1, 2, 3]])\r\n assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])\r\n\r\n def test_object_argmax(self, level=rlevel):\r\n a = np.array([1, 2, 3], dtype=object)\r\n assert_(a.argmax() == 2)\r\n\r\n def test_recarray_fields(self, level=rlevel):\r\n # Ticket #372\r\n dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])\r\n dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])\r\n for a in [np.array([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.array([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.array([(1, 2), (3, 4)]),\r\n np.rec.fromarrays([(1, 2), (3, 4)], \"i4,i4\"),\r\n np.rec.fromarrays([(1, 2), (3, 4)])]:\r\n assert_(a.dtype in [dt0, dt1])\r\n\r\n def test_random_shuffle(self, level=rlevel):\r\n # Ticket #374\r\n a = np.arange(5).reshape((5, 1))\r\n b = a.copy()\r\n np.random.shuffle(b)\r\n assert_equal(np.sort(b, axis=0), a)\r\n\r\n def test_refcount_vdot(self, level=rlevel):\r\n # Changeset #3443\r\n _assert_valid_refcount(np.vdot)\r\n\r\n def test_startswith(self, level=rlevel):\r\n ca = np.char.array(['Hi', 'There'])\r\n assert_equal(ca.startswith('H'), [True, False])\r\n\r\n def test_noncommutative_reduce_accumulate(self, level=rlevel):\r\n # Ticket #413\r\n tosubtract = np.arange(5)\r\n todivide = np.array([2.0, 0.5, 0.25])\r\n assert_equal(np.subtract.reduce(tosubtract), -10)\r\n assert_equal(np.divide.reduce(todivide), 16.0)\r\n assert_array_equal(np.subtract.accumulate(tosubtract),\r\n np.array([0, -1, -3, -6, -10]))\r\n assert_array_equal(np.divide.accumulate(todivide),\r\n np.array([2., 4., 16.]))\r\n\r\n def test_convolve_empty(self, level=rlevel):\r\n # Convolve should raise an error for empty input array.\r\n self.assertRaises(ValueError, np.convolve, [], [1])\r\n self.assertRaises(ValueError, np.convolve, [1], [])\r\n\r\n def test_multidim_byteswap(self, level=rlevel):\r\n # Ticket #449\r\n r = np.array([(1, (0, 1, 2))], dtype=\"i2,3i2\")\r\n assert_array_equal(r.byteswap(),\r\n np.array([(256, (0, 256, 512))], r.dtype))\r\n\r\n def test_string_NULL(self, level=rlevel):\r\n # Changeset 3557\r\n assert_equal(np.array(\"a\\x00\\x0b\\x0c\\x00\").item(),\r\n 'a\\x00\\x0b\\x0c')\r\n\r\n def test_junk_in_string_fields_of_recarray(self, level=rlevel):\r\n # Ticket #483\r\n r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])\r\n assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))\r\n\r\n def test_take_output(self, level=rlevel):\r\n # Ensure that 'take' honours output parameter.\r\n x = np.arange(12).reshape((3, 4))\r\n a = np.take(x, [0, 2], axis=1)\r\n b = np.zeros_like(a)\r\n np.take(x, [0, 2], axis=1, out=b)\r\n assert_array_equal(a, b)\r\n\r\n def test_take_object_fail(self):\r\n # Issue gh-3001\r\n d = 123.\r\n a = np.array([d, 1], dtype=object)\r\n ref_d = sys.getrefcount(d)\r\n try:\r\n a.take([0, 100])\r\n except IndexError:\r\n pass\r\n assert_(ref_d == sys.getrefcount(d))\r\n\r\n def test_array_str_64bit(self, level=rlevel):\r\n # Ticket #501\r\n s = np.array([1, np.nan], dtype=np.float64)\r\n with np.errstate(all='raise'):\r\n np.array_str(s) # Should succeed\r\n\r\n def test_frompyfunc_endian(self, level=rlevel):\r\n # Ticket #503\r\n from math import radians\r\n uradians = np.frompyfunc(radians, 1, 1)\r\n big_endian = np.array([83.4, 83.5], dtype='>f8')\r\n little_endian = np.array([83.4, 83.5], dtype='<f8')\r\n assert_almost_equal(uradians(big_endian).astype(float),\r\n uradians(little_endian).astype(float))\r\n\r\n def test_mem_string_arr(self, level=rlevel):\r\n # Ticket #514\r\n s = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\r\n t = []\r\n np.hstack((t, s))\r\n\r\n def test_arr_transpose(self, level=rlevel):\r\n # Ticket #516\r\n x = np.random.rand(*(2,)*16)\r\n x.transpose(list(range(16))) # Should succeed\r\n\r\n def test_string_mergesort(self, level=rlevel):\r\n # Ticket #540\r\n x = np.array(['a']*32)\r\n assert_array_equal(x.argsort(kind='m'), np.arange(32))\r\n\r\n def test_argmax_byteorder(self, level=rlevel):\r\n # Ticket #546\r\n a = np.arange(3, dtype='>f')\r\n assert_(a[a.argmax()] == a.max())\r\n\r\n def test_rand_seed(self, level=rlevel):\r\n # Ticket #555\r\n for l in np.arange(4):\r\n np.random.seed(l)\r\n\r\n def test_mem_deallocation_leak(self, level=rlevel):\r\n # Ticket #562\r\n a = np.zeros(5, dtype=float)\r\n b = np.array(a, dtype=float)\r\n del a, b\r\n\r\n def test_mem_on_invalid_dtype(self):\r\n \"Ticket #583\"\r\n self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)\r\n\r\n def test_dot_negative_stride(self, level=rlevel):\r\n # Ticket #588\r\n x = np.array([[1, 5, 25, 125., 625]])\r\n y = np.array([[20.], [160.], [640.], [1280.], [1024.]])\r\n z = y[::-1].copy()\r\n y2 = y[::-1]\r\n assert_equal(np.dot(x, z), np.dot(x, y2))\r\n\r\n def test_object_casting(self, level=rlevel):\r\n # This used to trigger the object-type version of\r\n # the bitwise_or operation, because float64 -> object\r\n # casting succeeds\r\n def rs():\r\n x = np.ones([484, 286])\r\n y = np.zeros([484, 286])\r\n x |= y\r\n\r\n self.assertRaises(TypeError, rs)\r\n\r\n def test_unicode_scalar(self, level=rlevel):\r\n # Ticket #600\r\n x = np.array([\"DROND\", \"DROND1\"], dtype=\"U6\")\r\n el = x[1]\r\n new = pickle.loads(pickle.dumps(el))\r\n assert_equal(new, el)\r\n\r\n def test_arange_non_native_dtype(self, level=rlevel):\r\n # Ticket #616\r\n for T in ('>f4', '<f4'):\r\n dt = np.dtype(T)\r\n assert_equal(np.arange(0, dtype=dt).dtype, dt)\r\n assert_equal(np.arange(0.5, dtype=dt).dtype, dt)\r\n assert_equal(np.arange(5, dtype=dt).dtype, dt)\r\n\r\n def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):\r\n s = np.ones(10, dtype=float)\r\n x = np.array((15,), dtype=float)\r\n\r\n def ia(x, s, v):\r\n x[(s > 0)] = v\r\n\r\n # After removing deprecation, the following are ValueErrors.\r\n # This might seem odd as compared to the value error below. This\r\n # is due to the fact that the new code always uses \"nonzero\" logic\r\n # and the boolean special case is not taken.\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DeprecationWarning)\r\n warnings.simplefilter('ignore', np.VisibleDeprecationWarning)\r\n self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))\r\n self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))\r\n # Old special case (different code path):\r\n self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))\r\n self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))\r\n\r\n def test_mem_scalar_indexing(self, level=rlevel):\r\n # Ticket #603\r\n x = np.array([0], dtype=float)\r\n index = np.array(0, dtype=np.int32)\r\n x[index]\r\n\r\n def test_binary_repr_0_width(self, level=rlevel):\r\n assert_equal(np.binary_repr(0, width=3), '000')\r\n\r\n def test_fromstring(self, level=rlevel):\r\n assert_equal(np.fromstring(\"12:09:09\", dtype=int, sep=\":\"),\r\n [12, 9, 9])\r\n\r\n def test_searchsorted_variable_length(self, level=rlevel):\r\n x = np.array(['a', 'aa', 'b'])\r\n y = np.array(['d', 'e'])\r\n assert_equal(x.searchsorted(y), [3, 3])\r\n\r\n def test_string_argsort_with_zeros(self, level=rlevel):\r\n # Check argsort for strings containing zeros.\r\n x = np.fromstring(\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\r\n assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))\r\n assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))\r\n\r\n def test_string_sort_with_zeros(self, level=rlevel):\r\n # Check sort for strings containing zeros.\r\n x = np.fromstring(\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\r\n y = np.fromstring(\"\\x00\\x01\\x00\\x02\", dtype=\"|S2\")\r\n assert_array_equal(np.sort(x, kind=\"q\"), y)\r\n\r\n def test_copy_detection_zero_dim(self, level=rlevel):\r\n # Ticket #658\r\n np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n\r\n def test_flat_byteorder(self, level=rlevel):\r\n # Ticket #657\r\n x = np.arange(10)\r\n assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])\r\n assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))\r\n\r\n def test_uint64_from_negative(self, level=rlevel):\r\n assert_equal(np.uint64(-2), np.uint64(18446744073709551614))\r\n\r\n def test_sign_bit(self, level=rlevel):\r\n x = np.array([0, -0.0, 0])\r\n assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')\r\n\r\n def test_flat_index_byteswap(self, level=rlevel):\r\n for dt in (np.dtype('<i4'), np.dtype('>i4')):\r\n x = np.array([-1, 0, 1], dtype=dt)\r\n assert_equal(x.flat[0].dtype, x[0].dtype)\r\n\r\n def test_copy_detection_corner_case(self, level=rlevel):\r\n # Ticket #658\r\n np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n\r\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\r\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,\r\n # 0-sized reshape itself is tested elsewhere.\r\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\r\n def test_copy_detection_corner_case2(self, level=rlevel):\r\n # Ticket #771: strides are not set correctly when reshaping 0-sized\r\n # arrays\r\n b = np.indices((0, 3, 4)).T.reshape(-1, 3)\r\n assert_equal(b.strides, (3 * b.itemsize, b.itemsize))\r\n\r\n def test_object_array_refcounting(self, level=rlevel):\r\n # Ticket #633\r\n if not hasattr(sys, 'getrefcount'):\r\n return\r\n\r\n # NB. this is probably CPython-specific\r\n\r\n cnt = sys.getrefcount\r\n\r\n a = object()\r\n b = object()\r\n c = object()\r\n\r\n cnt0_a = cnt(a)\r\n cnt0_b = cnt(b)\r\n cnt0_c = cnt(c)\r\n\r\n # -- 0d -> 1-d broadcast slice assignment\r\n\r\n arr = np.zeros(5, dtype=np.object_)\r\n\r\n arr[:] = a\r\n assert_equal(cnt(a), cnt0_a + 5)\r\n\r\n arr[:] = b\r\n assert_equal(cnt(a), cnt0_a)\r\n assert_equal(cnt(b), cnt0_b + 5)\r\n\r\n arr[:2] = c\r\n assert_equal(cnt(b), cnt0_b + 3)\r\n assert_equal(cnt(c), cnt0_c + 2)\r\n\r\n del arr\r\n\r\n # -- 1-d -> 2-d broadcast slice assignment\r\n\r\n arr = np.zeros((5, 2), dtype=np.object_)\r\n arr0 = np.zeros(2, dtype=np.object_)\r\n\r\n arr0[0] = a\r\n assert_(cnt(a) == cnt0_a + 1)\r\n arr0[1] = b\r\n assert_(cnt(b) == cnt0_b + 1)\r\n\r\n arr[:,:] = arr0\r\n assert_(cnt(a) == cnt0_a + 6)\r\n assert_(cnt(b) == cnt0_b + 6)\r\n\r\n arr[:, 0] = None\r\n assert_(cnt(a) == cnt0_a + 1)\r\n\r\n del arr, arr0\r\n\r\n # -- 2-d copying + flattening\r\n\r\n arr = np.zeros((5, 2), dtype=np.object_)\r\n\r\n arr[:, 0] = a\r\n arr[:, 1] = b\r\n assert_(cnt(a) == cnt0_a + 5)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n arr2 = arr.copy()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 10)\r\n\r\n arr2 = arr[:, 0].copy()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n arr2 = arr.flatten()\r\n assert_(cnt(a) == cnt0_a + 10)\r\n assert_(cnt(b) == cnt0_b + 10)\r\n\r\n del arr, arr2\r\n\r\n # -- concatenate, repeat, take, choose\r\n\r\n arr1 = np.zeros((5, 1), dtype=np.object_)\r\n arr2 = np.zeros((5, 1), dtype=np.object_)\r\n\r\n arr1[...] = a\r\n arr2[...] = b\r\n assert_(cnt(a) == cnt0_a + 5)\r\n assert_(cnt(b) == cnt0_b + 5)\r\n\r\n tmp = np.concatenate((arr1, arr2))\r\n assert_(cnt(a) == cnt0_a + 5 + 5)\r\n assert_(cnt(b) == cnt0_b + 5 + 5)\r\n\r\n tmp = arr1.repeat(3, axis=0)\r\n assert_(cnt(a) == cnt0_a + 5 + 3*5)\r\n\r\n tmp = arr1.take([1, 2, 3], axis=0)\r\n assert_(cnt(a) == cnt0_a + 5 + 3)\r\n\r\n x = np.array([[0], [1], [0], [1], [1]], int)\r\n tmp = x.choose(arr1, arr2)\r\n assert_(cnt(a) == cnt0_a + 5 + 2)\r\n assert_(cnt(b) == cnt0_b + 5 + 3)\r\n\r\n del tmp # Avoid pyflakes unused variable warning\r\n\r\n def test_mem_custom_float_to_array(self, level=rlevel):\r\n # Ticket 702\r\n class MyFloat(object):\r\n def __float__(self):\r\n return 1.0\r\n\r\n tmp = np.atleast_1d([MyFloat()])\r\n tmp.astype(float) # Should succeed\r\n\r\n def test_object_array_refcount_self_assign(self, level=rlevel):\r\n # Ticket #711\r\n class VictimObject(object):\r\n deleted = False\r\n\r\n def __del__(self):\r\n self.deleted = True\r\n\r\n d = VictimObject()\r\n arr = np.zeros(5, dtype=np.object_)\r\n arr[:] = d\r\n del d\r\n arr[:] = arr # refcount of 'd' might hit zero here\r\n assert_(not arr[0].deleted)\r\n arr[:] = arr # trying to induce a segfault by doing it again...\r\n assert_(not arr[0].deleted)\r\n\r\n def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):\r\n x = [1, 2, 3]\r\n self.assertRaises(ValueError,\r\n np.fromiter, [xi for xi in x], dtype='S')\r\n\r\n def test_reduce_big_object_array(self, level=rlevel):\r\n # Ticket #713\r\n oldsize = np.setbufsize(10*16)\r\n a = np.array([None]*161, object)\r\n assert_(not np.any(a))\r\n np.setbufsize(oldsize)\r\n\r\n def test_mem_0d_array_index(self, level=rlevel):\r\n # Ticket #714\r\n np.zeros(10)[np.array(0)]\r\n\r\n def test_floats_from_string(self, level=rlevel):\r\n # Ticket #640, floats from string\r\n fsingle = np.single('1.234')\r\n fdouble = np.double('1.234')\r\n flongdouble = np.longdouble('1.234')\r\n assert_almost_equal(fsingle, 1.234)\r\n assert_almost_equal(fdouble, 1.234)\r\n assert_almost_equal(flongdouble, 1.234)\r\n\r\n def test_nonnative_endian_fill(self, level=rlevel):\r\n # Non-native endian arrays were incorrectly filled with scalars\r\n # before r5034.\r\n if sys.byteorder == 'little':\r\n dtype = np.dtype('>i4')\r\n else:\r\n dtype = np.dtype('<i4')\r\n x = np.empty([1], dtype=dtype)\r\n x.fill(1)\r\n assert_equal(x, np.array([1], dtype=dtype))\r\n\r\n def test_dot_alignment_sse2(self, level=rlevel):\r\n # Test for ticket #551, changeset r5140\r\n x = np.zeros((30, 40))\r\n y = pickle.loads(pickle.dumps(x))\r\n # y is now typically not aligned on a 8-byte boundary\r\n z = np.ones((1, y.shape[0]))\r\n # This shouldn't cause a segmentation fault:\r\n np.dot(z, y)\r\n\r\n def test_astype_copy(self, level=rlevel):\r\n # Ticket #788, changeset r5155\r\n # The test data file was generated by scipy.io.savemat.\r\n # The dtype is float64, but the isbuiltin attribute is 0.\r\n data_dir = path.join(path.dirname(__file__), 'data')\r\n filename = path.join(data_dir, \"astype_copy.pkl\")\r\n if sys.version_info[0] >= 3:\r\n f = open(filename, 'rb')\r\n xp = pickle.load(f, encoding='latin1')\r\n f.close()\r\n else:\r\n f = open(filename)\r\n xp = pickle.load(f)\r\n f.close()\r\n xpd = xp.astype(np.float64)\r\n assert_((xp.__array_interface__['data'][0] !=\r\n xpd.__array_interface__['data'][0]))\r\n\r\n def test_compress_small_type(self, level=rlevel):\r\n # Ticket #789, changeset 5217.\r\n # compress with out argument segfaulted if cannot cast safely\r\n import numpy as np\r\n a = np.array([[1, 2], [3, 4]])\r\n b = np.zeros((2, 1), dtype=np.single)\r\n try:\r\n a.compress([True, False], axis=1, out=b)\r\n raise AssertionError(\"compress with an out which cannot be \"\r\n \"safely casted should not return \"\r\n \"successfully\")\r\n except TypeError:\r\n pass\r\n\r\n def test_attributes(self, level=rlevel):\r\n # Ticket #791\r\n class TestArray(np.ndarray):\r\n def __new__(cls, data, info):\r\n result = np.array(data)\r\n result = result.view(cls)\r\n result.info = info\r\n return result\r\n\r\n def __array_finalize__(self, obj):\r\n self.info = getattr(obj, 'info', '')\r\n\r\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\r\n assert_(dat.info == 'jubba')\r\n dat.resize((4, 2))\r\n assert_(dat.info == 'jubba')\r\n dat.sort()\r\n assert_(dat.info == 'jubba')\r\n dat.fill(2)\r\n assert_(dat.info == 'jubba')\r\n dat.put([2, 3, 4], [6, 3, 4])\r\n assert_(dat.info == 'jubba')\r\n dat.setfield(4, np.int32, 0)\r\n assert_(dat.info == 'jubba')\r\n dat.setflags()\r\n assert_(dat.info == 'jubba')\r\n assert_(dat.all(1).info == 'jubba')\r\n assert_(dat.any(1).info == 'jubba')\r\n assert_(dat.argmax(1).info == 'jubba')\r\n assert_(dat.argmin(1).info == 'jubba')\r\n assert_(dat.argsort(1).info == 'jubba')\r\n assert_(dat.astype(TestArray).info == 'jubba')\r\n assert_(dat.byteswap().info == 'jubba')\r\n assert_(dat.clip(2, 7).info == 'jubba')\r\n assert_(dat.compress([0, 1, 1]).info == 'jubba')\r\n assert_(dat.conj().info == 'jubba')\r\n assert_(dat.conjugate().info == 'jubba')\r\n assert_(dat.copy().info == 'jubba')\r\n dat2 = TestArray([2, 3, 1, 0], 'jubba')\r\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\r\n [20, 21, 22, 23], [30, 31, 32, 33]]\r\n assert_(dat2.choose(choices).info == 'jubba')\r\n assert_(dat.cumprod(1).info == 'jubba')\r\n assert_(dat.cumsum(1).info == 'jubba')\r\n assert_(dat.diagonal().info == 'jubba')\r\n assert_(dat.flatten().info == 'jubba')\r\n assert_(dat.getfield(np.int32, 0).info == 'jubba')\r\n assert_(dat.imag.info == 'jubba')\r\n assert_(dat.max(1).info == 'jubba')\r\n assert_(dat.mean(1).info == 'jubba')\r\n assert_(dat.min(1).info == 'jubba')\r\n assert_(dat.newbyteorder().info == 'jubba')\r\n assert_(dat.prod(1).info == 'jubba')\r\n assert_(dat.ptp(1).info == 'jubba')\r\n assert_(dat.ravel().info == 'jubba')\r\n assert_(dat.real.info == 'jubba')\r\n assert_(dat.repeat(2).info == 'jubba')\r\n assert_(dat.reshape((2, 4)).info == 'jubba')\r\n assert_(dat.round().info == 'jubba')\r\n assert_(dat.squeeze().info == 'jubba')\r\n assert_(dat.std(1).info == 'jubba')\r\n assert_(dat.sum(1).info == 'jubba')\r\n assert_(dat.swapaxes(0, 1).info == 'jubba')\r\n assert_(dat.take([2, 3, 5]).info == 'jubba')\r\n assert_(dat.transpose().info == 'jubba')\r\n assert_(dat.T.info == 'jubba')\r\n assert_(dat.var(1).info == 'jubba')\r\n assert_(dat.view(TestArray).info == 'jubba')\r\n # These methods do not preserve subclasses\r\n assert_(type(dat.nonzero()[0]) is np.ndarray)\r\n assert_(type(dat.nonzero()[1]) is np.ndarray)\r\n\r\n def test_recarray_tolist(self, level=rlevel):\r\n # Ticket #793, changeset r5215\r\n # Comparisons fail for NaN, so we can't use random memory\r\n # for the test.\r\n buf = np.zeros(40, dtype=np.int8)\r\n a = np.recarray(2, formats=\"i4,f8,f8\", names=\"id,x,y\", buf=buf)\r\n b = a.tolist()\r\n assert_( a[0].tolist() == b[0])\r\n assert_( a[1].tolist() == b[1])\r\n\r\n def test_nonscalar_item_method(self):\r\n # Make sure that .item() fails graciously when it should\r\n a = np.arange(5)\r\n assert_raises(ValueError, a.item)\r\n\r\n def test_char_array_creation(self, level=rlevel):\r\n a = np.array('123', dtype='c')\r\n b = np.array(asbytes_nested(['1', '2', '3']))\r\n assert_equal(a, b)\r\n\r\n def test_unaligned_unicode_access(self, level=rlevel):\r\n # Ticket #825\r\n for i in range(1, 9):\r\n msg = 'unicode offset: %d chars' % i\r\n t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])\r\n x = np.array([(asbytes('a'), sixu('b'))], dtype=t)\r\n if sys.version_info[0] >= 3:\r\n assert_equal(str(x), \"[(b'a', 'b')]\", err_msg=msg)\r\n else:\r\n assert_equal(str(x), \"[('a', u'b')]\", err_msg=msg)\r\n\r\n def test_sign_for_complex_nan(self, level=rlevel):\r\n # Ticket 794.\r\n with np.errstate(invalid='ignore'):\r\n C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])\r\n have = np.sign(C)\r\n want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])\r\n assert_equal(have, want)\r\n\r\n def test_for_equal_names(self, level=rlevel):\r\n # Ticket #674\r\n dt = np.dtype([('foo', float), ('bar', float)])\r\n a = np.zeros(10, dt)\r\n b = list(a.dtype.names)\r\n b[0] = \"notfoo\"\r\n a.dtype.names = b\r\n assert_(a.dtype.names[0] == \"notfoo\")\r\n assert_(a.dtype.names[1] == \"bar\")\r\n\r\n def test_for_object_scalar_creation(self, level=rlevel):\r\n # Ticket #816\r\n a = np.object_()\r\n b = np.object_(3)\r\n b2 = np.object_(3.0)\r\n c = np.object_([4, 5])\r\n d = np.object_([None, {}, []])\r\n assert_(a is None)\r\n assert_(type(b) is int)\r\n assert_(type(b2) is float)\r\n assert_(type(c) is np.ndarray)\r\n assert_(c.dtype == object)\r\n assert_(d.dtype == object)\r\n\r\n def test_array_resize_method_system_error(self):\r\n # Ticket #840 - order should be an invalid keyword.\r\n x = np.array([[0, 1], [2, 3]])\r\n self.assertRaises(TypeError, x.resize, (2, 2), order='C')\r\n\r\n def test_for_zero_length_in_choose(self, level=rlevel):\r\n \"Ticket #882\"\r\n a = np.array(1)\r\n self.assertRaises(ValueError, lambda x: x.choose([]), a)\r\n\r\n def test_array_ndmin_overflow(self):\r\n \"Ticket #947.\"\r\n self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))\r\n\r\n def test_errobj_reference_leak(self, level=rlevel):\r\n # Ticket #955\r\n with np.errstate(all=\"ignore\"):\r\n z = int(0)\r\n p = np.int32(-1)\r\n\r\n gc.collect()\r\n n_before = len(gc.get_objects())\r\n z**p # this shouldn't leak a reference to errobj\r\n gc.collect()\r\n n_after = len(gc.get_objects())\r\n assert_(n_before >= n_after, (n_before, n_after))\r\n\r\n def test_void_scalar_with_titles(self, level=rlevel):\r\n # No ticket\r\n data = [('john', 4), ('mary', 5)]\r\n dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]\r\n arr = np.array(data, dtype=dtype1)\r\n assert_(arr[0][0] == 'john')\r\n assert_(arr[0][1] == 4)\r\n\r\n def test_void_scalar_constructor(self):\r\n #Issue #1550\r\n\r\n #Create test string data, construct void scalar from data and assert\r\n #that void scalar contains original data.\r\n test_string = np.array(\"test\")\r\n test_string_void_scalar = np.core.multiarray.scalar(\r\n np.dtype((\"V\", test_string.dtype.itemsize)), test_string.tobytes())\r\n\r\n assert_(test_string_void_scalar.view(test_string.dtype) == test_string)\r\n\r\n #Create record scalar, construct from data and assert that\r\n #reconstructed scalar is correct.\r\n test_record = np.ones((), \"i,i\")\r\n test_record_void_scalar = np.core.multiarray.scalar(\r\n test_record.dtype, test_record.tobytes())\r\n\r\n assert_(test_record_void_scalar == test_record)\r\n\r\n #Test pickle and unpickle of void and record scalars\r\n assert_(pickle.loads(pickle.dumps(test_string)) == test_string)\r\n assert_(pickle.loads(pickle.dumps(test_record)) == test_record)\r\n\r\n def test_blasdot_uninitialized_memory(self):\r\n # Ticket #950\r\n for m in [0, 1, 2]:\r\n for n in [0, 1, 2]:\r\n for k in range(3):\r\n # Try to ensure that x->data contains non-zero floats\r\n x = np.array([123456789e199], dtype=np.float64)\r\n x.resize((m, 0))\r\n y = np.array([123456789e199], dtype=np.float64)\r\n y.resize((0, n))\r\n\r\n # `dot` should just return zero (m,n) matrix\r\n z = np.dot(x, y)\r\n assert_(np.all(z == 0))\r\n assert_(z.shape == (m, n))\r\n\r\n def test_zeros(self):\r\n # Regression test for #1061.\r\n # Set a size which cannot fit into a 64 bits signed integer\r\n sz = 2 ** 64\r\n good = 'Maximum allowed dimension exceeded'\r\n try:\r\n np.empty(sz)\r\n except ValueError as e:\r\n if not str(e) == good:\r\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\r\n except Exception as e:\r\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\r\n\r\n def test_huge_arange(self):\r\n # Regression test for #1062.\r\n # Set a size which cannot fit into a 64 bits signed integer\r\n sz = 2 ** 64\r\n good = 'Maximum allowed size exceeded'\r\n try:\r\n np.arange(sz)\r\n self.assertTrue(np.size == sz)\r\n except ValueError as e:\r\n if not str(e) == good:\r\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\r\n except Exception as e:\r\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\r\n\r\n def test_fromiter_bytes(self):\r\n # Ticket #1058\r\n a = np.fromiter(list(range(10)), dtype='b')\r\n b = np.fromiter(list(range(10)), dtype='B')\r\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n\r\n def test_array_from_sequence_scalar_array(self):\r\n # Ticket #1078: segfaults when creating an array with a sequence of\r\n # 0d arrays.\r\n a = np.array((np.ones(2), np.array(2)))\r\n assert_equal(a.shape, (2,))\r\n assert_equal(a.dtype, np.dtype(object))\r\n assert_equal(a[0], np.ones(2))\r\n assert_equal(a[1], np.array(2))\r\n\r\n a = np.array(((1,), np.array(1)))\r\n assert_equal(a.shape, (2,))\r\n assert_equal(a.dtype, np.dtype(object))\r\n assert_equal(a[0], (1,))\r\n assert_equal(a[1], np.array(1))\r\n\r\n def test_array_from_sequence_scalar_array2(self):\r\n # Ticket #1081: weird array with strange input...\r\n t = np.array([np.array([]), np.array(0, object)])\r\n assert_equal(t.shape, (2,))\r\n assert_equal(t.dtype, np.dtype(object))\r\n\r\n def test_array_too_big(self):\r\n # Ticket #1080.\r\n assert_raises(ValueError, np.zeros, [975]*7, np.int8)\r\n assert_raises(ValueError, np.zeros, [26244]*5, np.int8)\r\n\r\n def test_dtype_keyerrors_(self):\r\n # Ticket #1106.\r\n dt = np.dtype([('f1', np.uint)])\r\n assert_raises(KeyError, dt.__getitem__, \"f2\")\r\n assert_raises(IndexError, dt.__getitem__, 1)\r\n assert_raises(ValueError, dt.__getitem__, 0.0)\r\n\r\n def test_lexsort_buffer_length(self):\r\n # Ticket #1217, don't segfault.\r\n a = np.ones(100, dtype=np.int8)\r\n b = np.ones(100, dtype=np.int32)\r\n i = np.lexsort((a[::-1], b))\r\n assert_equal(i, np.arange(100, dtype=np.int))\r\n\r\n def test_object_array_to_fixed_string(self):\r\n # Ticket #1235.\r\n a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)\r\n b = np.array(a, dtype=(np.str_, 8))\r\n assert_equal(a, b)\r\n c = np.array(a, dtype=(np.str_, 5))\r\n assert_equal(c, np.array(['abcde', 'ijklm']))\r\n d = np.array(a, dtype=(np.str_, 12))\r\n assert_equal(a, d)\r\n e = np.empty((2, ), dtype=(np.str_, 8))\r\n e[:] = a[:]\r\n assert_equal(a, e)\r\n\r\n def test_unicode_to_string_cast(self):\r\n # Ticket #1240.\r\n a = np.array([[sixu('abc'), sixu('\\u03a3')],\r\n [sixu('asdf'), sixu('erw')]],\r\n dtype='U')\r\n self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')\r\n\r\n def test_mixed_string_unicode_array_creation(self):\r\n a = np.array(['1234', sixu('123')])\r\n assert_(a.itemsize == 16)\r\n a = np.array([sixu('123'), '1234'])\r\n assert_(a.itemsize == 16)\r\n a = np.array(['1234', sixu('123'), '12345'])\r\n assert_(a.itemsize == 20)\r\n a = np.array([sixu('123'), '1234', sixu('12345')])\r\n assert_(a.itemsize == 20)\r\n a = np.array([sixu('123'), '1234', sixu('1234')])\r\n assert_(a.itemsize == 16)\r\n\r\n def test_misaligned_objects_segfault(self):\r\n # Ticket #1198 and #1267\r\n a1 = np.zeros((10,), dtype='O,c')\r\n a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')\r\n a1['f0'] = a2\r\n repr(a1)\r\n np.argmax(a1['f0'])\r\n a1['f0'][1] = \"FOO\"\r\n a1['f0'] = \"FOO\"\r\n np.array(a1['f0'], dtype='S')\r\n np.nonzero(a1['f0'])\r\n a1.sort()\r\n copy.deepcopy(a1)\r\n\r\n def test_misaligned_scalars_segfault(self):\r\n # Ticket #1267\r\n s1 = np.array(('a', 'Foo'), dtype='c,O')\r\n s2 = np.array(('b', 'Bar'), dtype='c,O')\r\n s1['f1'] = s2['f1']\r\n s1['f1'] = 'Baz'\r\n\r\n def test_misaligned_dot_product_objects(self):\r\n # Ticket #1267\r\n # This didn't require a fix, but it's worth testing anyway, because\r\n # it may fail if .dot stops enforcing the arrays to be BEHAVED\r\n a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')\r\n b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')\r\n np.dot(a['f0'], b['f0'])\r\n\r\n def test_byteswap_complex_scalar(self):\r\n # Ticket #1259 and gh-441\r\n for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:\r\n z = np.array([2.2-1.1j], dtype)\r\n x = z[0] # always native-endian\r\n y = x.byteswap()\r\n if x.dtype.byteorder == z.dtype.byteorder:\r\n # little-endian machine\r\n assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))\r\n else:\r\n # big-endian machine\r\n assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))\r\n # double check real and imaginary parts:\r\n assert_equal(x.real, y.real.byteswap())\r\n assert_equal(x.imag, y.imag.byteswap())\r\n\r\n def test_structured_arrays_with_objects1(self):\r\n # Ticket #1299\r\n stra = 'aaaa'\r\n strb = 'bbbb'\r\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\r\n x[x.nonzero()] = x.ravel()[:1]\r\n assert_(x[0, 1] == x[0, 0])\r\n\r\n def test_structured_arrays_with_objects2(self):\r\n # Ticket #1299 second test\r\n stra = 'aaaa'\r\n strb = 'bbbb'\r\n numb = sys.getrefcount(strb)\r\n numa = sys.getrefcount(stra)\r\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\r\n x[x.nonzero()] = x.ravel()[:1]\r\n assert_(sys.getrefcount(strb) == numb)\r\n assert_(sys.getrefcount(stra) == numa + 2)\r\n\r\n def test_duplicate_title_and_name(self):\r\n # Ticket #1254\r\n dtspec = [(('a', 'a'), 'i'), ('b', 'i')]\r\n self.assertRaises(ValueError, np.dtype, dtspec)\r\n\r\n def test_signed_integer_division_overflow(self):\r\n # Ticket #1317.\r\n def test_type(t):\r\n min = np.array([np.iinfo(t).min])\r\n min //= -1\r\n\r\n with np.errstate(divide=\"ignore\"):\r\n for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):\r\n test_type(t)\r\n\r\n def test_buffer_hashlib(self):\r\n try:\r\n from hashlib import md5\r\n except ImportError:\r\n from md5 import new as md5\r\n\r\n x = np.array([1, 2, 3], dtype=np.dtype('<i4'))\r\n assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')\r\n\r\n def test_0d_string_scalar(self):\r\n # Bug #1436; the following should succeed\r\n np.asarray('x', '>c')\r\n\r\n def test_log1p_compiler_shenanigans(self):\r\n # Check if log1p is behaving on 32 bit intel systems.\r\n assert_(np.isfinite(np.log1p(np.exp2(-53))))\r\n\r\n def test_fromiter_comparison(self, level=rlevel):\r\n a = np.fromiter(list(range(10)), dtype='b')\r\n b = np.fromiter(list(range(10)), dtype='B')\r\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n\r\n def test_fromstring_crash(self):\r\n # Ticket #1345: the following should not cause a crash\r\n np.fromstring(asbytes('aa, aa, 1.0'), sep=',')\r\n\r\n def test_ticket_1539(self):\r\n dtypes = [x for x in np.typeDict.values()\r\n if (issubclass(x, np.number)\r\n and not issubclass(x, np.timedelta64))]\r\n a = np.array([], dtypes[0])\r\n failures = []\r\n # ignore complex warnings\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', np.ComplexWarning)\r\n for x in dtypes:\r\n b = a.astype(x)\r\n for y in dtypes:\r\n c = a.astype(y)\r\n try:\r\n np.dot(b, c)\r\n except TypeError:\r\n failures.append((x, y))\r\n if failures:\r\n raise AssertionError(\"Failures: %r\" % failures)\r\n\r\n def test_ticket_1538(self):\r\n x = np.finfo(np.float32)\r\n for name in 'eps epsneg max min resolution tiny'.split():\r\n assert_equal(type(getattr(x, name)), np.float32,\r\n err_msg=name)\r\n\r\n def test_ticket_1434(self):\r\n # Check that the out= argument in var and std has an effect\r\n data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\r\n out = np.zeros((3,))\r\n\r\n ret = data.var(axis=1, out=out)\r\n assert_(ret is out)\r\n assert_array_equal(ret, data.var(axis=1))\r\n\r\n ret = data.std(axis=1, out=out)\r\n assert_(ret is out)\r\n assert_array_equal(ret, data.std(axis=1))\r\n\r\n def test_complex_nan_maximum(self):\r\n cnan = complex(0, np.nan)\r\n assert_equal(np.maximum(1, cnan), cnan)\r\n\r\n def test_subclass_int_tuple_assignment(self):\r\n # ticket #1563\r\n class Subclass(np.ndarray):\r\n def __new__(cls, i):\r\n return np.ones((i,)).view(cls)\r\n\r\n x = Subclass(5)\r\n x[(0,)] = 2 # shouldn't raise an exception\r\n assert_equal(x[0], 2)\r\n\r\n def test_ufunc_no_unnecessary_views(self):\r\n # ticket #1548\r\n class Subclass(np.ndarray):\r\n pass\r\n x = np.array([1, 2, 3]).view(Subclass)\r\n y = np.add(x, x, x)\r\n assert_equal(id(x), id(y))\r\n\r\n def test_take_refcount(self):\r\n # ticket #939\r\n a = np.arange(16, dtype=np.float)\r\n a.shape = (4, 4)\r\n lut = np.ones((5 + 3, 4), np.float)\r\n rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)\r\n c1 = sys.getrefcount(rgba)\r\n try:\r\n lut.take(a, axis=0, mode='clip', out=rgba)\r\n except TypeError:\r\n pass\r\n c2 = sys.getrefcount(rgba)\r\n assert_equal(c1, c2)\r\n\r\n def test_fromfile_tofile_seeks(self):\r\n # On Python 3, tofile/fromfile used to get (#1610) the Python\r\n # file handle out of sync\r\n f0 = tempfile.NamedTemporaryFile()\r\n f = f0.file\r\n f.write(np.arange(255, dtype='u1').tobytes())\r\n\r\n f.seek(20)\r\n ret = np.fromfile(f, count=4, dtype='u1')\r\n assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))\r\n assert_equal(f.tell(), 24)\r\n\r\n f.seek(40)\r\n np.array([1, 2, 3], dtype='u1').tofile(f)\r\n assert_equal(f.tell(), 43)\r\n\r\n f.seek(40)\r\n data = f.read(3)\r\n assert_equal(data, asbytes(\"\\x01\\x02\\x03\"))\r\n\r\n f.seek(80)\r\n f.read(4)\r\n data = np.fromfile(f, dtype='u1', count=4)\r\n assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))\r\n\r\n f.close()\r\n\r\n def test_complex_scalar_warning(self):\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = tp(1+2j)\r\n assert_warns(np.ComplexWarning, float, x)\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore')\r\n assert_equal(float(x), float(x.real))\r\n\r\n def test_complex_scalar_complex_cast(self):\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = tp(1+2j)\r\n assert_equal(complex(x), 1+2j)\r\n\r\n def test_complex_boolean_cast(self):\r\n # Ticket #2218\r\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\r\n x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)\r\n assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))\r\n assert_(np.any(x))\r\n assert_(np.all(x[1:]))\r\n\r\n def test_uint_int_conversion(self):\r\n x = 2**64 - 1\r\n assert_equal(int(np.uint64(x)), x)\r\n\r\n def test_duplicate_field_names_assign(self):\r\n ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')\r\n ra.dtype.names = ('f1', 'f2')\r\n repr(ra) # should not cause a segmentation fault\r\n assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))\r\n\r\n def test_eq_string_and_object_array(self):\r\n # From e-mail thread \"__eq__ with str and object\" (Keith Goodman)\r\n a1 = np.array(['a', 'b'], dtype=object)\r\n a2 = np.array(['a', 'c'])\r\n assert_array_equal(a1 == a2, [True, False])\r\n assert_array_equal(a2 == a1, [True, False])\r\n\r\n def test_nonzero_byteswap(self):\r\n a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)\r\n a.dtype = np.float32\r\n assert_equal(a.nonzero()[0], [1])\r\n a = a.byteswap().newbyteorder()\r\n assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap\r\n\r\n def test_find_common_type_boolean(self):\r\n # Ticket #1695\r\n assert_(np.find_common_type([], ['?', '?']) == '?')\r\n\r\n def test_empty_mul(self):\r\n a = np.array([1.])\r\n a[1:1] *= 2\r\n assert_equal(a, [1.])\r\n\r\n def test_array_side_effect(self):\r\n # The second use of itemsize was throwing an exception because in\r\n # ctors.c, discover_itemsize was calling PyObject_Length without\r\n # checking the return code. This failed to get the length of the\r\n # number 2, and the exception hung around until something checked\r\n # PyErr_Occurred() and returned an error.\r\n assert_equal(np.dtype('S10').itemsize, 10)\r\n np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)\r\n assert_equal(np.dtype('S10').itemsize, 10)\r\n\r\n def test_any_float(self):\r\n # all and any for floats\r\n a = np.array([0.1, 0.9])\r\n assert_(np.any(a))\r\n assert_(np.all(a))\r\n\r\n def test_large_float_sum(self):\r\n a = np.arange(10000, dtype='f')\r\n assert_equal(a.sum(dtype='d'), a.astype('d').sum())\r\n\r\n def test_ufunc_casting_out(self):\r\n a = np.array(1.0, dtype=np.float32)\r\n b = np.array(1.0, dtype=np.float64)\r\n c = np.array(1.0, dtype=np.float32)\r\n np.add(a, b, out=c)\r\n assert_equal(c, 2.0)\r\n\r\n def test_array_scalar_contiguous(self):\r\n # Array scalars are both C and Fortran contiguous\r\n assert_(np.array(1.0).flags.c_contiguous)\r\n assert_(np.array(1.0).flags.f_contiguous)\r\n assert_(np.array(np.float32(1.0)).flags.c_contiguous)\r\n assert_(np.array(np.float32(1.0)).flags.f_contiguous)\r\n\r\n def test_squeeze_contiguous(self):\r\n # Similar to GitHub issue #387\r\n a = np.zeros((1, 2)).squeeze()\r\n b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()\r\n assert_(a.flags.c_contiguous)\r\n assert_(a.flags.f_contiguous)\r\n assert_(b.flags.f_contiguous)\r\n\r\n def test_reduce_contiguous(self):\r\n # GitHub issue #387\r\n a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))\r\n b = np.add.reduce(np.zeros((2, 1, 2)), 1)\r\n assert_(a.flags.c_contiguous)\r\n assert_(a.flags.f_contiguous)\r\n assert_(b.flags.c_contiguous)\r\n\r\n def test_object_array_self_reference(self):\r\n # Object arrays with references to themselves can cause problems\r\n a = np.array(0, dtype=object)\r\n a[()] = a\r\n assert_raises(TypeError, int, a)\r\n assert_raises(TypeError, long, a)\r\n assert_raises(TypeError, float, a)\r\n assert_raises(TypeError, oct, a)\r\n assert_raises(TypeError, hex, a)\r\n\r\n # Test the same for a circular reference.\r\n b = np.array(a, dtype=object)\r\n a[()] = b\r\n assert_raises(TypeError, int, a)\r\n # Numpy has no tp_traverse currently, so circular references\r\n # cannot be detected. So resolve it:\r\n a[()] = 0\r\n\r\n # This was causing a to become like the above\r\n a = np.array(0, dtype=object)\r\n a[...] += 1\r\n assert_equal(a, 1)\r\n\r\n def test_object_array_self_copy(self):\r\n # An object array being copied into itself DECREF'ed before INCREF'ing\r\n # causing segmentation faults (gh-3787)\r\n a = np.array(object(), dtype=object)\r\n np.copyto(a, a)\r\n assert_equal(sys.getrefcount(a[()]), 2)\r\n a[()].__class__ # will segfault if object was deleted\r\n\r\n def test_zerosize_accumulate(self):\r\n \"Ticket #1733\"\r\n x = np.array([[42, 0]], dtype=np.uint32)\r\n assert_equal(np.add.accumulate(x[:-1, 0]), [])\r\n\r\n def test_objectarray_setfield(self):\r\n # Setfield should not overwrite Object fields with non-Object data\r\n x = np.array([1, 2, 3], dtype=object)\r\n assert_raises(TypeError, x.setfield, 4, np.int32, 0)\r\n\r\n def test_setting_rank0_string(self):\r\n \"Ticket #1736\"\r\n s1 = asbytes(\"hello1\")\r\n s2 = asbytes(\"hello2\")\r\n a = np.zeros((), dtype=\"S10\")\r\n a[()] = s1\r\n assert_equal(a, np.array(s1))\r\n a[()] = np.array(s2)\r\n assert_equal(a, np.array(s2))\r\n\r\n a = np.zeros((), dtype='f4')\r\n a[()] = 3\r\n assert_equal(a, np.array(3))\r\n a[()] = np.array(4)\r\n assert_equal(a, np.array(4))\r\n\r\n def test_string_astype(self):\r\n \"Ticket #1748\"\r\n s1 = asbytes('black')\r\n s2 = asbytes('white')\r\n s3 = asbytes('other')\r\n a = np.array([[s1], [s2], [s3]])\r\n assert_equal(a.dtype, np.dtype('S5'))\r\n b = a.astype(np.dtype('S0'))\r\n assert_equal(b.dtype, np.dtype('S5'))\r\n\r\n def test_ticket_1756(self):\r\n # Ticket #1756\r\n s = asbytes('0123456789abcdef')\r\n a = np.array([s]*5)\r\n for i in range(1, 17):\r\n a1 = np.array(a, \"|S%d\" % i)\r\n a2 = np.array([s[:i]]*5)\r\n assert_equal(a1, a2)\r\n\r\n def test_fields_strides(self):\r\n \"Ticket #1760\"\r\n r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')\r\n assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])\r\n assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])\r\n assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])\r\n assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)\r\n\r\n def test_alignment_update(self):\r\n # Check that alignment flag is updated on stride setting\r\n a = np.arange(10)\r\n assert_(a.flags.aligned)\r\n a.strides = 3\r\n assert_(not a.flags.aligned)\r\n\r\n def test_ticket_1770(self):\r\n \"Should not segfault on python 3k\"\r\n import numpy as np\r\n try:\r\n a = np.zeros((1,), dtype=[('f1', 'f')])\r\n a['f1'] = 1\r\n a['f2'] = 1\r\n except ValueError:\r\n pass\r\n except:\r\n raise AssertionError\r\n\r\n def test_ticket_1608(self):\r\n \"x.flat shouldn't modify data\"\r\n x = np.array([[1, 2], [3, 4]]).T\r\n np.array(x.flat)\r\n assert_equal(x, [[1, 3], [2, 4]])\r\n\r\n def test_pickle_string_overwrite(self):\r\n import re\r\n\r\n data = np.array([1], dtype='b')\r\n blob = pickle.dumps(data, protocol=1)\r\n data = pickle.loads(blob)\r\n\r\n # Check that loads does not clobber interned strings\r\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\r\n assert_equal(s[0], \"\\x01\")\r\n data[0] = 0xbb\r\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\r\n assert_equal(s[0], \"\\x01\")\r\n\r\n def test_pickle_bytes_overwrite(self):\r\n if sys.version_info[0] >= 3:\r\n data = np.array([1], dtype='b')\r\n data = pickle.loads(pickle.dumps(data))\r\n data[0] = 0xdd\r\n bytestring = \"\\x01 \".encode('ascii')\r\n assert_equal(bytestring[0:1], '\\x01'.encode('ascii'))\r\n\r\n def test_pickle_py2_array_latin1_hack(self):\r\n # Check that unpickling hacks in Py3 that support\r\n # encoding='latin1' work correctly.\r\n\r\n # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))\r\n data = asbytes(\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\n\"\r\n \"tp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'i1'\\np8\\n\"\r\n \"I0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nNNNI-1\\nI-1\\nI0\\ntp12\\nbI00\\nS'\\\\x81'\\n\"\r\n \"p13\\ntp14\\nb.\")\r\n if sys.version_info[0] >= 3:\r\n # This should work:\r\n result = pickle.loads(data, encoding='latin1')\r\n assert_array_equal(result, np.array([129], dtype='b'))\r\n # Should not segfault:\r\n assert_raises(Exception, pickle.loads, data, encoding='koi8-r')\r\n\r\n def test_pickle_py2_scalar_latin1_hack(self):\r\n # Check that scalar unpickling hack in Py3 that supports\r\n # encoding='latin1' work correctly.\r\n\r\n # Python2 output for pickle.dumps(...)\r\n datas = [\r\n # (original, python2_pickle, koi8r_validity)\r\n (np.unicode_('\\u6bd2'),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\r\n \"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\nI0\\n\"\r\n \"tp6\\nbS'\\\\xd2k\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\r\n 'invalid'),\r\n\r\n (np.float64(9e123),\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'f8'\\n\"\r\n \"p2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI-1\\nI-1\\nI0\\ntp6\\n\"\r\n \"bS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np7\\ntp8\\nRp9\\n.\"),\r\n 'invalid'),\r\n\r\n (np.bytes_(asbytes('\\x9c')), # different 8-bit code point in KOI8-R vs latin1\r\n asbytes(\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'S1'\\np2\\n\"\r\n \"I0\\nI1\\ntp3\\nRp4\\n(I3\\nS'|'\\np5\\nNNNI1\\nI1\\nI0\\ntp6\\nbS'\\\\x9c'\\np7\\n\"\r\n \"tp8\\nRp9\\n.\"),\r\n 'different'),\r\n ]\r\n if sys.version_info[0] >= 3:\r\n for original, data, koi8r_validity in datas:\r\n result = pickle.loads(data, encoding='latin1')\r\n assert_equal(result, original)\r\n\r\n # Decoding under non-latin1 encoding (e.g.) KOI8-R can\r\n # produce bad results, but should not segfault.\r\n if koi8r_validity == 'different':\r\n # Unicode code points happen to lie within latin1,\r\n # but are different in koi8-r, resulting to silent\r\n # bogus results\r\n result = pickle.loads(data, encoding='koi8-r')\r\n assert_(result != original)\r\n elif koi8r_validity == 'invalid':\r\n # Unicode code points outside latin1, so results\r\n # to an encoding exception\r\n assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')\r\n else:\r\n raise ValueError(koi8r_validity)\r\n\r\n def test_structured_type_to_object(self):\r\n a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')\r\n a_obj = np.empty((2,), dtype=object)\r\n a_obj[0] = (0, 1)\r\n a_obj[1] = (3, 2)\r\n # astype records -> object\r\n assert_equal(a_rec.astype(object), a_obj)\r\n # '=' records -> object\r\n b = np.empty_like(a_obj)\r\n b[...] = a_rec\r\n assert_equal(b, a_obj)\r\n # '=' object -> records\r\n b = np.empty_like(a_rec)\r\n b[...] = a_obj\r\n assert_equal(b, a_rec)\r\n\r\n def test_assign_obj_listoflists(self):\r\n # Ticket # 1870\r\n # The inner list should get assigned to the object elements\r\n a = np.zeros(4, dtype=object)\r\n b = a.copy()\r\n a[0] = [1]\r\n a[1] = [2]\r\n a[2] = [3]\r\n a[3] = [4]\r\n b[...] = [[1], [2], [3], [4]]\r\n assert_equal(a, b)\r\n # The first dimension should get broadcast\r\n a = np.zeros((2, 2), dtype=object)\r\n a[...] = [[1, 2]]\r\n assert_equal(a, [[1, 2], [1, 2]])\r\n\r\n def test_memoryleak(self):\r\n # Ticket #1917 - ensure that array data doesn't leak\r\n for i in range(1000):\r\n # 100MB times 1000 would give 100GB of memory usage if it leaks\r\n a = np.empty((100000000,), dtype='i1')\r\n del a\r\n\r\n def test_ufunc_reduce_memoryleak(self):\r\n a = np.arange(6)\r\n acnt = sys.getrefcount(a)\r\n np.add.reduce(a)\r\n assert_equal(sys.getrefcount(a), acnt)\r\n\r\n def test_search_sorted_invalid_arguments(self):\r\n # Ticket #2021, should not segfault.\r\n x = np.arange(0, 4, dtype='datetime64[D]')\r\n assert_raises(TypeError, x.searchsorted, 1)\r\n\r\n def test_string_truncation(self):\r\n # Ticket #1990 - Data can be truncated in creation of an array from a\r\n # mixed sequence of numeric values and strings\r\n for val in [True, 1234, 123.4, complex(1, 234)]:\r\n for tostr in [asunicode, asbytes]:\r\n b = np.array([val, tostr('xx')])\r\n assert_equal(tostr(b[0]), tostr(val))\r\n b = np.array([tostr('xx'), val])\r\n assert_equal(tostr(b[1]), tostr(val))\r\n\r\n # test also with longer strings\r\n b = np.array([val, tostr('xxxxxxxxxx')])\r\n assert_equal(tostr(b[0]), tostr(val))\r\n b = np.array([tostr('xxxxxxxxxx'), val])\r\n assert_equal(tostr(b[1]), tostr(val))\r\n\r\n def test_string_truncation_ucs2(self):\r\n # Ticket #2081. Python compiled with two byte unicode\r\n # can lead to truncation if itemsize is not properly\r\n # adjusted for Numpy's four byte unicode.\r\n if sys.version_info[0] >= 3:\r\n a = np.array(['abcd'])\r\n else:\r\n a = np.array([sixu('abcd')])\r\n assert_equal(a.dtype.itemsize, 16)\r\n\r\n def test_unique_stable(self):\r\n # Ticket #2063 must always choose stable sort for argsort to\r\n # get consistent results\r\n v = np.array(([0]*5 + [1]*6 + [2]*6)*4)\r\n res = np.unique(v, return_index=True)\r\n tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))\r\n assert_equal(res, tgt)\r\n\r\n def test_unicode_alloc_dealloc_match(self):\r\n # Ticket #1578, the mismatch only showed up when running\r\n # python-debug for python versions >= 2.7, and then as\r\n # a core dump and error message.\r\n a = np.array(['abc'], dtype=np.unicode)[0]\r\n del a\r\n\r\n def test_refcount_error_in_clip(self):\r\n # Ticket #1588\r\n a = np.zeros((2,), dtype='>i2').clip(min=0)\r\n x = a + a\r\n # This used to segfault:\r\n y = str(x)\r\n # Check the final string:\r\n assert_(y == \"[0 0]\")\r\n\r\n def test_searchsorted_wrong_dtype(self):\r\n # Ticket #2189, it used to segfault, so we check that it raises the\r\n # proper exception.\r\n a = np.array([('a', 1)], dtype='S1, int')\r\n assert_raises(TypeError, np.searchsorted, a, 1.2)\r\n # Ticket #2066, similar problem:\r\n dtype = np.format_parser(['i4', 'i4'], [], [])\r\n a = np.recarray((2, ), dtype)\r\n assert_raises(TypeError, np.searchsorted, a, 1)\r\n\r\n def test_complex64_alignment(self):\r\n # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment\r\n dtt = np.complex64\r\n arr = np.arange(10, dtype=dtt)\r\n # 2D array\r\n arr2 = np.reshape(arr, (2, 5))\r\n # Fortran write followed by (C or F) read caused bus error\r\n data_str = arr2.tobytes('F')\r\n data_back = np.ndarray(arr2.shape,\r\n arr2.dtype,\r\n buffer=data_str,\r\n order='F')\r\n assert_array_equal(arr2, data_back)\r\n\r\n def test_structured_count_nonzero(self):\r\n arr = np.array([0, 1]).astype('i4, (2)i4')[:1]\r\n count = np.count_nonzero(arr)\r\n assert_equal(count, 0)\r\n\r\n def test_copymodule_preserves_f_contiguity(self):\r\n a = np.empty((2, 2), order='F')\r\n b = copy.copy(a)\r\n c = copy.deepcopy(a)\r\n assert_(b.flags.fortran)\r\n assert_(b.flags.f_contiguous)\r\n assert_(c.flags.fortran)\r\n assert_(c.flags.f_contiguous)\r\n\r\n def test_fortran_order_buffer(self):\r\n import numpy as np\r\n a = np.array([['Hello', 'Foob']], dtype='U5', order='F')\r\n arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)\r\n arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],\r\n [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])\r\n assert_array_equal(arr, arr2)\r\n\r\n def test_assign_from_sequence_error(self):\r\n # Ticket #4024.\r\n arr = np.array([1, 2, 3])\r\n assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])\r\n arr.__setitem__(slice(None), [9])\r\n assert_equal(arr, [9, 9, 9])\r\n\r\n def test_format_on_flex_array_element(self):\r\n # Ticket #4369.\r\n dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])\r\n arr = np.array([('2000-01-01', 1)], dt)\r\n formatted = '{0}'.format(arr[0])\r\n assert_equal(formatted, str(arr[0]))\r\n\r\n def test_deepcopy_on_0d_array(self):\r\n # Ticket #3311.\r\n arr = np.array(3)\r\n arr_cp = copy.deepcopy(arr)\r\n\r\n assert_equal(arr, arr_cp)\r\n assert_equal(arr.shape, arr_cp.shape)\r\n assert_equal(int(arr), int(arr_cp))\r\n self.assertTrue(arr is not arr_cp)\r\n self.assertTrue(isinstance(arr_cp, type(arr)))\r\n\r\n def test_bool_subscript_crash(self):\r\n # gh-4494\r\n c = np.rec.array([(1, 2, 3), (4, 5, 6)])\r\n masked = c[np.array([True, False])]\r\n base = masked.base\r\n del masked, c\r\n base.dtype\r\n\r\n def test_richcompare_crash(self):\r\n # gh-4613\r\n import operator as op\r\n\r\n # dummy class where __array__ throws exception\r\n class Foo(object):\r\n __array_priority__ = 1002\r\n\r\n def __array__(self,*args,**kwargs):\r\n raise Exception()\r\n\r\n rhs = Foo()\r\n lhs = np.array(1)\r\n for f in [op.lt, op.le, op.gt, op.ge]:\r\n if sys.version_info[0] >= 3:\r\n assert_raises(TypeError, f, lhs, rhs)\r\n else:\r\n f(lhs, rhs)\r\n assert_(not op.eq(lhs, rhs))\r\n assert_(op.ne(lhs, rhs))\r\n\r\n def test_richcompare_scalar_and_subclass(self):\r\n # gh-4709\r\n class Foo(np.ndarray):\r\n def __eq__(self, other):\r\n return \"OK\"\r\n\r\n x = np.array([1,2,3]).view(Foo)\r\n assert_equal(10 == x, \"OK\")\r\n assert_equal(np.int32(10) == x, \"OK\")\r\n assert_equal(np.array([10]) == x, \"OK\")\r\n\r\n def test_pickle_empty_string(self):\r\n # gh-3926\r\n\r\n import pickle\r\n test_string = np.string_('')\r\n assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)\r\n\r\n def test_frompyfunc_many_args(self):\r\n # gh-5672\r\n\r\n def passer(*args):\r\n pass\r\n\r\n assert_raises(ValueError, np.frompyfunc, passer, 32, 1)\r\n\r\n def test_repeat_broadcasting(self):\r\n # gh-5743\r\n a = np.arange(60).reshape(3, 4, 5)\r\n for axis in chain(range(-a.ndim, a.ndim), [None]):\r\n assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))\r\n\r\n def test_frompyfunc_nout_0(self):\r\n # gh-2014\r\n\r\n def f(x):\r\n x[0], x[-1] = x[-1], x[0]\r\n\r\n uf = np.frompyfunc(f, 1, 0)\r\n a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])\r\n assert_equal(uf(a), ())\r\n assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])\r\n\r\n def test_leak_in_structured_dtype_comparison(self):\r\n # gh-6250\r\n recordtype = np.dtype([('a', np.float64),\r\n ('b', np.int32),\r\n ('d', (np.str, 5))])\r\n\r\n # Simple case\r\n a = np.zeros(2, dtype=recordtype)\r\n for i in range(100):\r\n a == a\r\n assert_(sys.getrefcount(a) < 10)\r\n\r\n # The case in the bug report.\r\n before = sys.getrefcount(a)\r\n u, v = a[0], a[1]\r\n u == v\r\n del u, v\r\n gc.collect()\r\n after = sys.getrefcount(a)\r\n assert_equal(before, after)\r\n\r\n def test_empty_percentile(self):\r\n # gh-6530 / gh-6553\r\n assert_array_equal(np.percentile(np.arange(10), []), np.array([]))\r\n\r\n def test_void_compare_segfault(self):\r\n # gh-6922. The following should not segfault\r\n a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])\r\n a.sort()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n",
"\"\"\" Test functions for linalg.decomp module\r\n\r\n\"\"\"\r\nfrom __future__ import division, print_function, absolute_import\r\n\r\n__usage__ = \"\"\"\r\nBuild linalg:\r\n python setup_linalg.py build\r\nRun tests if scipy is installed:\r\n python -c 'import scipy;scipy.linalg.test()'\r\nRun tests if linalg is not installed:\r\n python tests/test_decomp.py\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom numpy.testing import (TestCase, assert_equal, assert_almost_equal,\r\n assert_array_almost_equal, assert_array_equal,\r\n assert_raises, assert_, assert_allclose,\r\n run_module_suite, dec)\r\n\r\nfrom scipy._lib.six import xrange\r\n\r\nfrom scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,\r\n schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,\r\n eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz)\r\nfrom scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \\\r\n dsbev, dsbevd, dsbevx, zhbevd, zhbevx\r\nfrom scipy.linalg.misc import norm\r\n\r\nfrom numpy import array, transpose, sometrue, diag, ones, linalg, \\\r\n argsort, zeros, arange, float32, complex64, dot, conj, identity, \\\r\n ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \\\r\n asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\\\r\n triu, tril\r\n\r\nfrom numpy.random import normal, seed, random\r\n\r\nfrom scipy.linalg._testutils import assert_no_overwrite\r\n\r\n# digit precision to use in asserts for different types\r\nDIGITS = {'d':11, 'D':11, 'f':4, 'F':4}\r\n\r\n# XXX: This function should be available through numpy.testing\r\n\r\n\r\ndef assert_dtype_equal(act, des):\r\n if isinstance(act, ndarray):\r\n act = act.dtype\r\n else:\r\n act = dtype(act)\r\n\r\n if isinstance(des, ndarray):\r\n des = des.dtype\r\n else:\r\n des = dtype(des)\r\n\r\n assert_(act == des, 'dtype mismatch: \"%s\" (should be \"%s\") ' % (act, des))\r\n\r\n# XXX: This function should not be defined here, but somewhere in\r\n# scipy.linalg namespace\r\n\r\n\r\ndef symrand(dim_or_eigv):\r\n \"\"\"Return a random symmetric (Hermitian) matrix.\r\n\r\n If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues\r\n uniformly distributed on (-1,1).\r\n\r\n If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose\r\n eigenvalues are 'a'.\r\n \"\"\"\r\n if isinstance(dim_or_eigv, int):\r\n dim = dim_or_eigv\r\n d = random(dim)*2 - 1\r\n elif (isinstance(dim_or_eigv, ndarray) and\r\n len(dim_or_eigv.shape) == 1):\r\n dim = dim_or_eigv.shape[0]\r\n d = dim_or_eigv\r\n else:\r\n raise TypeError(\"input type not supported.\")\r\n\r\n v = random_rot(dim)\r\n h = dot(dot(v.T.conj(), diag(d)), v)\r\n # to avoid roundoff errors, symmetrize the matrix (again)\r\n h = 0.5*(h.T+h)\r\n return h\r\n\r\n# XXX: This function should not be defined here, but somewhere in\r\n# scipy.linalg namespace\r\n\r\n\r\ndef random_rot(dim):\r\n \"\"\"Return a random rotation matrix, drawn from the Haar distribution\r\n (the only uniform distribution on SO(n)).\r\n The algorithm is described in the paper\r\n Stewart, G.W., 'The efficient generation of random orthogonal\r\n matrices with an application to condition estimators', SIAM Journal\r\n on Numerical Analysis, 17(3), pp. 403-409, 1980.\r\n For more information see\r\n http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization\"\"\"\r\n H = eye(dim)\r\n D = ones((dim,))\r\n for n in range(1, dim):\r\n x = normal(size=(dim-n+1,))\r\n D[n-1] = sign(x[0])\r\n x[0] -= D[n-1]*sqrt((x*x).sum())\r\n # Householder transformation\r\n\r\n Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()\r\n mat = eye(dim)\r\n mat[n-1:,n-1:] = Hx\r\n H = dot(H, mat)\r\n # Fix the last sign such that the determinant is 1\r\n D[-1] = -D.prod()\r\n H = (D*H.T).T\r\n return H\r\n\r\n\r\nclass TestEigVals(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w = eigvals(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_simple_tr(self):\r\n a = array([[1,2,3],[1,2,3],[2,5,6]],'d')\r\n a = transpose(a).copy()\r\n a = transpose(a)\r\n w = eigvals(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\r\n w = eigvals(a)\r\n exact_w = [(9+1j+sqrt(92+6j))/2,\r\n 0,\r\n (9+1j-sqrt(92+6j))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w = eigvals(a, check_finite=False)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n assert_array_almost_equal(w,exact_w)\r\n\r\n\r\nclass TestEig(object):\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w,v = eig(a)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n v0 = array([1,1,(1+sqrt(93)/3)/2])\r\n v1 = array([3.,0,-1])\r\n v2 = array([1,1,(1-sqrt(93)/3)/2])\r\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\r\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\r\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\r\n assert_array_almost_equal(w,exact_w)\r\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\r\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\r\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\r\n w,v = eig(a,left=1,right=0)\r\n for i in range(3):\r\n assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])\r\n\r\n def test_simple_complex_eig(self):\r\n a = [[1,2],[-2,1]]\r\n w,vl,vr = eig(a,left=1,right=1)\r\n assert_array_almost_equal(w, array([1+2j, 1-2j]))\r\n for i in range(2):\r\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\r\n for i in range(2):\r\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\r\n conjugate(w[i])*vl[:,i])\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\r\n w,vl,vr = eig(a,left=1,right=1)\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\r\n for i in range(3):\r\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\r\n conjugate(w[i])*vl[:,i])\r\n\r\n def _check_gen_eig(self, A, B):\r\n A, B = asarray(A), asarray(B)\r\n msg = \"\\n%r\\n%r\" % (A, B)\r\n w, vr = eig(A,B)\r\n wt = eigvals(A,B)\r\n val1 = dot(A, vr)\r\n val2 = dot(B, vr) * w\r\n res = val1 - val2\r\n for i in range(res.shape[1]):\r\n if all(isfinite(res[:, i])):\r\n assert_array_almost_equal(res[:, i], 0, err_msg=msg)\r\n\r\n assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),\r\n err_msg=msg)\r\n\r\n length = np.empty(len(vr))\r\n for i in xrange(len(vr)):\r\n length[i] = norm(vr[:, i])\r\n assert_array_almost_equal(length, np.ones(length.size), err_msg=msg)\r\n\r\n @dec.knownfailureif(True, \"See gh-2254.\")\r\n def test_singular(self):\r\n # Example taken from\r\n # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html\r\n A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],\r\n [27,31,26,21,15], [38,44,44,24,30]))\r\n B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],\r\n [16,25,27,14,23], [24,35,18,21,22]))\r\n\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_falker(self):\r\n \"\"\"Test matrices giving some Nan generalized eigen values.\"\"\"\r\n M = diag(array(([1,0,3])))\r\n K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))\r\n D = array(([1,-1,0],[-1,1,0],[0,0,0]))\r\n Z = zeros((3,3))\r\n I = identity(3)\r\n A = bmat([[I,Z],[Z,-K]])\r\n B = bmat([[Z,I],[M,D]])\r\n\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_bad_geneig(self):\r\n # Ticket #709 (strange return values from DGGEV)\r\n\r\n def matrices(omega):\r\n c1 = -9 + omega**2\r\n c2 = 2*omega\r\n A = [[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, c1, 0],\r\n [0, 0, 0, c1]]\r\n B = [[0, 0, 1, 0],\r\n [0, 0, 0, 1],\r\n [1, 0, 0, -c2],\r\n [0, 1, c2, 0]]\r\n return A, B\r\n\r\n # With a buggy LAPACK, this can fail for different omega on different\r\n # machines -- so we need to test several values\r\n olderr = np.seterr(all='ignore')\r\n try:\r\n for k in xrange(100):\r\n A, B = matrices(omega=k*5./100)\r\n self._check_gen_eig(A, B)\r\n finally:\r\n np.seterr(**olderr)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n w,v = eig(a, check_finite=False)\r\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\r\n v0 = array([1,1,(1+sqrt(93)/3)/2])\r\n v1 = array([3.,0,-1])\r\n v2 = array([1,1,(1-sqrt(93)/3)/2])\r\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\r\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\r\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\r\n assert_array_almost_equal(w,exact_w)\r\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\r\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\r\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\r\n for i in range(3):\r\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\r\n\r\n def test_not_square_error(self):\r\n \"\"\"Check that passing a non-square array raises a ValueError.\"\"\"\r\n A = np.arange(6).reshape(3,2)\r\n assert_raises(ValueError, eig, A)\r\n\r\n def test_shape_mismatch(self):\r\n \"\"\"Check that passing arrays of with different shapes raises a ValueError.\"\"\"\r\n A = identity(2)\r\n B = np.arange(9.0).reshape(3,3)\r\n assert_raises(ValueError, eig, A, B)\r\n assert_raises(ValueError, eig, B, A)\r\n\r\n\r\nclass TestEigBanded(TestCase):\r\n\r\n def __init__(self, *args):\r\n TestCase.__init__(self, *args)\r\n\r\n self.create_bandmat()\r\n\r\n def create_bandmat(self):\r\n \"\"\"Create the full matrix `self.fullmat` and\r\n the corresponding band matrix `self.bandmat`.\"\"\"\r\n N = 10\r\n self.KL = 2 # number of subdiagonals (below the diagonal)\r\n self.KU = 2 # number of superdiagonals (above the diagonal)\r\n\r\n # symmetric band matrix\r\n self.sym_mat = (diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)\r\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # hermitian band matrix\r\n self.herm_mat = (diag(-1.0*ones(N))\r\n + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)\r\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # general real band matrix\r\n self.real_mat = (diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)\r\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # general complex band matrix\r\n self.comp_mat = (1j*diag(1.0*ones(N))\r\n + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)\r\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))\r\n\r\n # Eigenvalues and -vectors from linalg.eig\r\n ew, ev = linalg.eig(self.sym_mat)\r\n ew = ew.real\r\n args = argsort(ew)\r\n self.w_sym_lin = ew[args]\r\n self.evec_sym_lin = ev[:,args]\r\n\r\n ew, ev = linalg.eig(self.herm_mat)\r\n ew = ew.real\r\n args = argsort(ew)\r\n self.w_herm_lin = ew[args]\r\n self.evec_herm_lin = ev[:,args]\r\n\r\n # Extract upper bands from symmetric and hermitian band matrices\r\n # (for use in dsbevd, dsbevx, zhbevd, zhbevx\r\n # and their single precision versions)\r\n LDAB = self.KU + 1\r\n self.bandmat_sym = zeros((LDAB, N), dtype=float)\r\n self.bandmat_herm = zeros((LDAB, N), dtype=complex)\r\n for i in xrange(LDAB):\r\n self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)\r\n self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)\r\n\r\n # Extract bands from general real and complex band matrix\r\n # (for use in dgbtrf, dgbtrs and their single precision versions)\r\n LDAB = 2*self.KL + self.KU + 1\r\n self.bandmat_real = zeros((LDAB, N), dtype=float)\r\n self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal\r\n for i in xrange(self.KL):\r\n # superdiagonals\r\n self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)\r\n # subdiagonals\r\n self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)\r\n\r\n self.bandmat_comp = zeros((LDAB, N), dtype=complex)\r\n self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal\r\n for i in xrange(self.KL):\r\n # superdiagonals\r\n self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)\r\n # subdiagonals\r\n self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)\r\n\r\n # absolute value for linear equation system A*x = b\r\n self.b = 1.0*arange(N)\r\n self.bc = self.b * (1 + 1j)\r\n\r\n #####################################################################\r\n\r\n def test_dsbev(self):\r\n \"\"\"Compare dsbev eigenvalues and eigenvectors with\r\n the result of linalg.eig.\"\"\"\r\n w, evec, info = dsbev(self.bandmat_sym, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_dsbevd(self):\r\n \"\"\"Compare dsbevd eigenvalues and eigenvectors with\r\n the result of linalg.eig.\"\"\"\r\n w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_dsbevx(self):\r\n \"\"\"Compare dsbevx eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n N,N = shape(self.sym_mat)\r\n ## Achtung: Argumente 0.0,0.0,range?\r\n w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,\r\n compute_v=1, range=2)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\r\n\r\n def test_zhbevd(self):\r\n \"\"\"Compare zhbevd eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\r\n\r\n def test_zhbevx(self):\r\n \"\"\"Compare zhbevx eigenvalues and eigenvectors\r\n with the result of linalg.eig.\"\"\"\r\n N,N = shape(self.herm_mat)\r\n ## Achtung: Argumente 0.0,0.0,range?\r\n w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,\r\n compute_v=1, range=2)\r\n evec_ = evec[:,argsort(w)]\r\n assert_array_almost_equal(sort(w), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\r\n\r\n def test_eigvals_banded(self):\r\n \"\"\"Compare eigenvalues of eigvals_banded with those of linalg.eig.\"\"\"\r\n w_sym = eigvals_banded(self.bandmat_sym)\r\n w_sym = w_sym.real\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n\r\n w_herm = eigvals_banded(self.bandmat_herm)\r\n w_herm = w_herm.real\r\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\r\n\r\n # extracting eigenvalues with respect to an index range\r\n ind1 = 2\r\n ind2 = 6\r\n w_sym_ind = eigvals_banded(self.bandmat_sym,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_sym_ind),\r\n self.w_sym_lin[ind1:ind2+1])\r\n w_herm_ind = eigvals_banded(self.bandmat_herm,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_herm_ind),\r\n self.w_herm_lin[ind1:ind2+1])\r\n\r\n # extracting eigenvalues with respect to a value range\r\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\r\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\r\n w_sym_val = eigvals_banded(self.bandmat_sym,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_sym_val),\r\n self.w_sym_lin[ind1:ind2+1])\r\n\r\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\r\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\r\n w_herm_val = eigvals_banded(self.bandmat_herm,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_herm_val),\r\n self.w_herm_lin[ind1:ind2+1])\r\n\r\n w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)\r\n w_sym = w_sym.real\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n\r\n def test_eig_banded(self):\r\n \"\"\"Compare eigenvalues and eigenvectors of eig_banded\r\n with those of linalg.eig. \"\"\"\r\n w_sym, evec_sym = eig_banded(self.bandmat_sym)\r\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\r\n\r\n w_herm, evec_herm = eig_banded(self.bandmat_herm)\r\n evec_herm_ = evec_herm[:,argsort(w_herm.real)]\r\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\r\n assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))\r\n\r\n # extracting eigenvalues with respect to an index range\r\n ind1 = 2\r\n ind2 = 6\r\n w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_sym_ind),\r\n self.w_sym_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_sym_ind),\r\n abs(self.evec_sym_lin[:,ind1:ind2+1]))\r\n\r\n w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,\r\n select='i', select_range=(ind1, ind2))\r\n assert_array_almost_equal(sort(w_herm_ind),\r\n self.w_herm_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_herm_ind),\r\n abs(self.evec_herm_lin[:,ind1:ind2+1]))\r\n\r\n # extracting eigenvalues with respect to a value range\r\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\r\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\r\n w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_sym_val),\r\n self.w_sym_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_sym_val),\r\n abs(self.evec_sym_lin[:,ind1:ind2+1]))\r\n\r\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\r\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\r\n w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,\r\n select='v', select_range=(v_lower, v_upper))\r\n assert_array_almost_equal(sort(w_herm_val),\r\n self.w_herm_lin[ind1:ind2+1])\r\n assert_array_almost_equal(abs(evec_herm_val),\r\n abs(self.evec_herm_lin[:,ind1:ind2+1]))\r\n\r\n w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)\r\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\r\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\r\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\r\n\r\n def test_dgbtrf(self):\r\n \"\"\"Compare dgbtrf LU factorisation with the LU factorisation result\r\n of linalg.lu.\"\"\"\r\n M,N = shape(self.real_mat)\r\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\r\n\r\n # extract matrix u from lu_symm_band\r\n u = diag(lu_symm_band[2*self.KL,:])\r\n for i in xrange(self.KL + self.KU):\r\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\r\n\r\n p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)\r\n assert_array_almost_equal(u, u_lin)\r\n\r\n def test_zgbtrf(self):\r\n \"\"\"Compare zgbtrf LU factorisation with the LU factorisation result\r\n of linalg.lu.\"\"\"\r\n M,N = shape(self.comp_mat)\r\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\r\n\r\n # extract matrix u from lu_symm_band\r\n u = diag(lu_symm_band[2*self.KL,:])\r\n for i in xrange(self.KL + self.KU):\r\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\r\n\r\n p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)\r\n assert_array_almost_equal(u, u_lin)\r\n\r\n def test_dgbtrs(self):\r\n \"\"\"Compare dgbtrs solutions for linear equation system A*x = b\r\n with solutions of linalg.solve.\"\"\"\r\n\r\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\r\n y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)\r\n\r\n y_lin = linalg.solve(self.real_mat, self.b)\r\n assert_array_almost_equal(y, y_lin)\r\n\r\n def test_zgbtrs(self):\r\n \"\"\"Compare zgbtrs solutions for linear equation system A*x = b\r\n with solutions of linalg.solve.\"\"\"\r\n\r\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\r\n y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)\r\n\r\n y_lin = linalg.solve(self.comp_mat, self.bc)\r\n assert_array_almost_equal(y, y_lin)\r\n\r\n\r\ndef test_eigh():\r\n DIM = 6\r\n v = {'dim': (DIM,),\r\n 'dtype': ('f','d','F','D'),\r\n 'overwrite': (True, False),\r\n 'lower': (True, False),\r\n 'turbo': (True, False),\r\n 'eigvals': (None, (2, DIM-2))}\r\n\r\n for dim in v['dim']:\r\n for typ in v['dtype']:\r\n for overwrite in v['overwrite']:\r\n for turbo in v['turbo']:\r\n for eigenvalues in v['eigvals']:\r\n for lower in v['lower']:\r\n yield (eigenhproblem_standard,\r\n 'ordinary',\r\n dim, typ, overwrite, lower,\r\n turbo, eigenvalues)\r\n yield (eigenhproblem_general,\r\n 'general ',\r\n dim, typ, overwrite, lower,\r\n turbo, eigenvalues)\r\n\r\n\r\ndef test_eigh_of_sparse():\r\n # This tests the rejection of inputs that eigh cannot currently handle.\r\n import scipy.sparse\r\n a = scipy.sparse.identity(2).tocsc()\r\n b = np.atleast_2d(a)\r\n assert_raises(ValueError, eigh, a)\r\n assert_raises(ValueError, eigh, b)\r\n\r\n\r\ndef _complex_symrand(dim, dtype):\r\n a1, a2 = symrand(dim), symrand(dim)\r\n # add antisymmetric matrix as imag part\r\n a = a1 + 1j*(triu(a2)-tril(a2))\r\n return a.astype(dtype)\r\n\r\n\r\ndef eigenhproblem_standard(desc, dim, dtype,\r\n overwrite, lower, turbo,\r\n eigenvalues):\r\n \"\"\"Solve a standard eigenvalue problem.\"\"\"\r\n if iscomplex(empty(1, dtype=dtype)):\r\n a = _complex_symrand(dim, dtype)\r\n else:\r\n a = symrand(dim).astype(dtype)\r\n\r\n if overwrite:\r\n a_c = a.copy()\r\n else:\r\n a_c = a\r\n w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)\r\n assert_dtype_equal(z.dtype, dtype)\r\n w = w.astype(dtype)\r\n diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real\r\n assert_array_almost_equal(diag_, w, DIGITS[dtype])\r\n\r\n\r\ndef eigenhproblem_general(desc, dim, dtype,\r\n overwrite, lower, turbo,\r\n eigenvalues):\r\n \"\"\"Solve a generalized eigenvalue problem.\"\"\"\r\n if iscomplex(empty(1, dtype=dtype)):\r\n a = _complex_symrand(dim, dtype)\r\n b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)\r\n else:\r\n a = symrand(dim).astype(dtype)\r\n b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)\r\n\r\n if overwrite:\r\n a_c, b_c = a.copy(), b.copy()\r\n else:\r\n a_c, b_c = a, b\r\n\r\n w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,\r\n overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)\r\n assert_dtype_equal(z.dtype, dtype)\r\n w = w.astype(dtype)\r\n diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real\r\n assert_array_almost_equal(diag1_, w, DIGITS[dtype])\r\n diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real\r\n assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])\r\n\r\n\r\ndef test_eigh_integer():\r\n a = array([[1,2],[2,7]])\r\n b = array([[3,1],[1,5]])\r\n w,z = eigh(a)\r\n w,z = eigh(a,b)\r\n\r\n\r\nclass TestLU(TestCase):\r\n\r\n def __init__(self, *args, **kw):\r\n TestCase.__init__(self, *args, **kw)\r\n\r\n self.a = array([[1,2,3],[1,2,3],[2,5,6]])\r\n self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])\r\n # Those matrices are more robust to detect problems in permutation\r\n # matrices than the ones above\r\n self.b = array([[1,2,3],[4,5,6],[7,8,9]])\r\n self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])\r\n\r\n # Reectangular matrices\r\n self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\r\n self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\r\n\r\n self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\r\n self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\r\n\r\n # Medium sizes matrices\r\n self.med = random((30, 40))\r\n self.cmed = random((30, 40)) + 1.j * random((30, 40))\r\n\r\n def _test_common(self, data):\r\n p,l,u = lu(data)\r\n assert_array_almost_equal(dot(dot(p,l),u),data)\r\n pl,u = lu(data,permute_l=1)\r\n assert_array_almost_equal(dot(pl,u),data)\r\n\r\n # Simple tests\r\n def test_simple(self):\r\n self._test_common(self.a)\r\n\r\n def test_simple_complex(self):\r\n self._test_common(self.ca)\r\n\r\n def test_simple2(self):\r\n self._test_common(self.b)\r\n\r\n def test_simple2_complex(self):\r\n self._test_common(self.cb)\r\n\r\n # rectangular matrices tests\r\n def test_hrectangular(self):\r\n self._test_common(self.hrect)\r\n\r\n def test_vrectangular(self):\r\n self._test_common(self.vrect)\r\n\r\n def test_hrectangular_complex(self):\r\n self._test_common(self.chrect)\r\n\r\n def test_vrectangular_complex(self):\r\n self._test_common(self.cvrect)\r\n\r\n # Bigger matrices\r\n def test_medium1(self):\r\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\r\n self._test_common(self.med)\r\n\r\n def test_medium1_complex(self):\r\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\r\n self._test_common(self.cmed)\r\n\r\n def test_check_finite(self):\r\n p, l, u = lu(self.a, check_finite=False)\r\n assert_array_almost_equal(dot(dot(p,l),u), self.a)\r\n\r\n def test_simple_known(self):\r\n # Ticket #1458\r\n for order in ['C', 'F']:\r\n A = np.array([[2, 1],[0, 1.]], order=order)\r\n LU, P = lu_factor(A)\r\n assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))\r\n assert_array_equal(P, np.array([0, 1]))\r\n\r\n\r\nclass TestLUSingle(TestLU):\r\n \"\"\"LU testers for single precision, real and double\"\"\"\r\n def __init__(self, *args, **kw):\r\n TestLU.__init__(self, *args, **kw)\r\n\r\n self.a = self.a.astype(float32)\r\n self.ca = self.ca.astype(complex64)\r\n self.b = self.b.astype(float32)\r\n self.cb = self.cb.astype(complex64)\r\n\r\n self.hrect = self.hrect.astype(float32)\r\n self.chrect = self.hrect.astype(complex64)\r\n\r\n self.vrect = self.vrect.astype(float32)\r\n self.cvrect = self.vrect.astype(complex64)\r\n\r\n self.med = self.vrect.astype(float32)\r\n self.cmed = self.vrect.astype(complex64)\r\n\r\n\r\nclass TestLUSolve(TestCase):\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_lu(self):\r\n a0 = random((10,10))\r\n b = random((10,))\r\n\r\n for order in ['C', 'F']:\r\n a = np.array(a0, order=order)\r\n\r\n x1 = solve(a,b)\r\n\r\n lu_a = lu_factor(a)\r\n x2 = lu_solve(lu_a,b)\r\n\r\n assert_array_almost_equal(x1,x2)\r\n\r\n def test_check_finite(self):\r\n a = random((10,10))\r\n b = random((10,))\r\n x1 = solve(a,b)\r\n\r\n lu_a = lu_factor(a, check_finite=False)\r\n x2 = lu_solve(lu_a,b, check_finite=False)\r\n\r\n assert_array_almost_equal(x1,x2)\r\n\r\n\r\nclass TestSVD_GESDD(TestCase):\r\n def setUp(self):\r\n self.lapack_driver = 'gesdd'\r\n seed(1234)\r\n\r\n def test_degenerate(self):\r\n assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)\r\n assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,20,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_singular(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_underdet(self):\r\n a = [[1,2,3],[4,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_overdet(self):\r\n a = [[1,2],[4,5],[3,4]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(2))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_random(self):\r\n n = 20\r\n m = 15\r\n for i in range(3):\r\n for a in [random([n,m]),random([m,n])]:\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))\r\n assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,2j,3],[2,5,6]]\r\n for full_matrices in (True, False):\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\r\n assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n m = 15\r\n for i in range(3):\r\n for full_matrices in (True, False):\r\n for a in [random([n,m]),random([m,n])]:\r\n a = a + 1j*random(list(a.shape))\r\n u,s,vh = svd(a, full_matrices=full_matrices,\r\n lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\r\n # This fails when [m,n]\r\n # assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))\r\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_crash_1580(self):\r\n sizes = [(13, 23), (30, 50), (60, 100)]\r\n np.random.seed(1234)\r\n for sz in sizes:\r\n for dt in [np.float32, np.float64, np.complex64, np.complex128]:\r\n a = np.random.rand(*sz).astype(dt)\r\n # should not crash\r\n svd(a, lapack_driver=self.lapack_driver)\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,20,3],[2,5,6]]\r\n u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)\r\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\r\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\r\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\r\n for i in range(len(s)):\r\n sigma[i,i] = s[i]\r\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\r\n\r\n def test_gh_5039(self):\r\n # This is a smoke test for https://github.com/scipy/scipy/issues/5039\r\n #\r\n # The following is reported to raise \"ValueError: On entry to DGESDD\r\n # parameter number 12 had an illegal value\".\r\n # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`\r\n # This is reported to only show up on LAPACK 3.0.3.\r\n #\r\n # The matrix below is taken from the call to\r\n # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest\r\n b = np.array(\r\n [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],\r\n [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],\r\n [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],\r\n [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])\r\n svd(b, lapack_driver=self.lapack_driver)\r\n\r\n\r\nclass TestSVD_GESVD(TestSVD_GESDD):\r\n def setUp(self):\r\n self.lapack_driver = 'gesvd'\r\n seed(1234)\r\n\r\n\r\nclass TestSVDVals(TestCase):\r\n\r\n def test_empty(self):\r\n for a in [[]], np.empty((2, 0)), np.ones((0, 3)):\r\n s = svdvals(a)\r\n assert_equal(s, np.empty(0))\r\n\r\n def test_simple(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n def test_simple_underdet(self):\r\n a = [[1,2,3],[4,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_overdet(self):\r\n a = [[1,2],[4,5],[3,4]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_complex(self):\r\n a = [[1,2,3],[1,20,3j],[2,5,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n def test_simple_underdet_complex(self):\r\n a = [[1,2,3],[4,5j,6]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_simple_overdet_complex(self):\r\n a = [[1,2],[4,5],[3j,4]]\r\n s = svdvals(a)\r\n assert_(len(s) == 2)\r\n assert_(s[0] >= s[1])\r\n\r\n def test_check_finite(self):\r\n a = [[1,2,3],[1,2,3],[2,5,6]]\r\n s = svdvals(a, check_finite=False)\r\n assert_(len(s) == 3)\r\n assert_(s[0] >= s[1] >= s[2])\r\n\r\n @dec.slow\r\n def test_crash_2609(self):\r\n np.random.seed(1234)\r\n a = np.random.rand(1500, 2800)\r\n # Shouldn't crash:\r\n svdvals(a)\r\n\r\n\r\nclass TestDiagSVD(TestCase):\r\n\r\n def test_simple(self):\r\n assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])\r\n\r\n\r\nclass TestQR(TestCase):\r\n\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_simple(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_left(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n c = [1, 2, 3]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r2 = qr_multiply(a, identity(3), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_right(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a)\r\n c = [1, 2, 3]\r\n qc,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_pivoting(self):\r\n a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_left_pivoting(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n\r\n def test_simple_right_pivoting(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3]\r\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n\r\n def test_simple_trap(self):\r\n a = [[8,2,3],[2,9,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_trap_pivoting(self):\r\n a = np.asarray([[8,2,3],[2,9,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall(self):\r\n # full version\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_tall_pivoting(self):\r\n # full version pivoting\r\n a = np.asarray([[8,2],[2,9],[5,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall_e(self):\r\n # economy version\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (3,2))\r\n assert_equal(r.shape, (2,2))\r\n\r\n def test_simple_tall_e_pivoting(self):\r\n # economy version pivoting\r\n a = np.asarray([[8,2],[2,9],[5,3]])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_tall_left(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n c = array([1,2,0])\r\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\r\n assert_array_almost_equal(dot(q, c[:2]), qc)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_tall_left_pivoting(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\r\n c = [1, 2]\r\n qc,r,kpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_equal(jpvt, kpvt)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_tall_right(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2, 3]\r\n cq,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n assert_array_almost_equal(r, r2)\r\n cq,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_tall_right_pivoting(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\r\n c = [1, 2, 3]\r\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_fat(self):\r\n # full version\r\n a = [[8,2,5],[2,9,3]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n\r\n def test_simple_fat_pivoting(self):\r\n # full version pivoting\r\n a = np.asarray([[8,2,5],[2,9,3]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_fat_e(self):\r\n # economy version\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n\r\n def test_simple_fat_e_pivoting(self):\r\n # economy version pivoting\r\n a = np.asarray([[8,2,3],[2,9,5]])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (2,2))\r\n assert_equal(r.shape, (2,3))\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_fat_left(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_fat_left_pivoting(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\r\n c = [1, 2]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_fat_right(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2]\r\n cq,r2 = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n assert_array_almost_equal(r, r2)\r\n cq,r = qr_multiply(a, identity(2))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_fat_right_pivoting(self):\r\n a = [[8,2,3],[2,9,5]]\r\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\r\n c = [1, 2]\r\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_simple_complex(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_simple_complex_left(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(3), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_complex_right(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n qc,r = qr_multiply(a, identity(3))\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_simple_tall_complex_left(self):\r\n a = [[8,2+3j],[2,9],[5+7j,3]]\r\n q,r = qr(a, mode=\"economic\")\r\n c = [1, 2+2j]\r\n qc,r2 = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n assert_array_almost_equal(r, r2)\r\n c = array([1,2,0])\r\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\r\n assert_array_almost_equal(dot(q, c[:2]), qc)\r\n qc,r = qr_multiply(a, identity(2), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_simple_complex_left_conjugate(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\r\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\r\n\r\n def test_simple_complex_tall_left_conjugate(self):\r\n a = [[3,3+4j],[5,2+2j],[3,2]]\r\n q,r = qr(a, mode='economic')\r\n c = [1, 3+4j]\r\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\r\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\r\n\r\n def test_simple_complex_right_conjugate(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n q,r = qr(a)\r\n c = [1, 2, 3+4j]\r\n qc,r = qr_multiply(a, c, conjugate=True)\r\n assert_array_almost_equal(dot(c, q.conjugate()), qc)\r\n\r\n def test_simple_complex_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_simple_complex_left_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3+4j]\r\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\r\n assert_array_almost_equal(dot(q, c), qc)\r\n\r\n def test_simple_complex_right_pivoting(self):\r\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\r\n q,r,jpvt = qr(a, pivoting=True)\r\n c = [1, 2, 3+4j]\r\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\r\n assert_array_almost_equal(dot(c, q), qc)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_left(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n c = random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_random_right(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r = qr(a)\r\n c = random([n])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(n))\r\n assert_array_almost_equal(q, cq)\r\n\r\n def test_random_pivoting(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_tall(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_tall_left(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode=\"economic\")\r\n c = random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(qc, q)\r\n\r\n def test_random_tall_right(self):\r\n # full version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode=\"economic\")\r\n c = random([m])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(m))\r\n assert_array_almost_equal(cq, q)\r\n\r\n def test_random_tall_pivoting(self):\r\n # full version pivoting\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_tall_e(self):\r\n # economy version\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a, mode='economic')\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n assert_equal(q.shape, (m,n))\r\n assert_equal(r.shape, (n,n))\r\n\r\n def test_random_tall_e_pivoting(self):\r\n # economy version pivoting\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True, mode='economic')\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n assert_equal(q.shape, (m,n))\r\n assert_equal(r.shape, (n,n))\r\n q2,r2 = qr(a[:,p], mode='economic')\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_trap(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_trap_pivoting(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_random_complex_left(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n c = random([n])+1j*random([n])\r\n qc,r = qr_multiply(a, c, \"left\")\r\n assert_array_almost_equal(dot(q, c), qc)\r\n qc,r = qr_multiply(a, identity(n), \"left\")\r\n assert_array_almost_equal(q, qc)\r\n\r\n def test_random_complex_right(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r = qr(a)\r\n c = random([n])+1j*random([n])\r\n cq,r = qr_multiply(a, c)\r\n assert_array_almost_equal(dot(c, q), cq)\r\n cq,r = qr_multiply(a, identity(n))\r\n assert_array_almost_equal(q, cq)\r\n\r\n def test_random_complex_pivoting(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n q,r,p = qr(a, pivoting=True)\r\n d = abs(diag(r))\r\n assert_(all(d[1:] <= d[:-1]))\r\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\r\n assert_array_almost_equal(dot(q,r),a[:,p])\r\n q2,r2 = qr(a[:,p])\r\n assert_array_almost_equal(q,q2)\r\n assert_array_almost_equal(r,r2)\r\n\r\n def test_check_finite(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n q,r = qr(a, check_finite=False)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(q,r),a)\r\n\r\n def test_lwork(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n # Get comparison values\r\n q,r = qr(a, lwork=None)\r\n\r\n # Test against minimum valid lwork\r\n q2,r2 = qr(a, lwork=3)\r\n assert_array_almost_equal(q2,q)\r\n assert_array_almost_equal(r2,r)\r\n\r\n # Test against larger lwork\r\n q3,r3 = qr(a, lwork=10)\r\n assert_array_almost_equal(q3,q)\r\n assert_array_almost_equal(r3,r)\r\n\r\n # Test against explicit lwork=-1\r\n q4,r4 = qr(a, lwork=-1)\r\n assert_array_almost_equal(q4,q)\r\n assert_array_almost_equal(r4,r)\r\n\r\n # Test against invalid lwork\r\n assert_raises(Exception, qr, (a,), {'lwork':0})\r\n assert_raises(Exception, qr, (a,), {'lwork':2})\r\n\r\nclass TestRQ(TestCase):\r\n\r\n def setUp(self):\r\n seed(1234)\r\n\r\n def test_simple(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_r(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a)\r\n r2 = rq(a, mode='r')\r\n assert_array_almost_equal(r, r2)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_trap(self):\r\n a = [[8,2,3],[2,9,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_tall(self):\r\n a = [[8,2],[2,9],[5,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_fat(self):\r\n a = [[8,2,5],[2,9,3]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_simple_complex(self):\r\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_tall(self):\r\n m = 200\r\n n = 100\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_trap(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_trap_economic(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])\r\n r,q = rq(a, mode='economic')\r\n assert_array_almost_equal(dot(q,transpose(q)),identity(m))\r\n assert_array_almost_equal(dot(r,q),a)\r\n assert_equal(q.shape, (m, n))\r\n assert_equal(r.shape, (m, m))\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n r,q = rq(a)\r\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n def test_random_complex_economic(self):\r\n m = 100\r\n n = 200\r\n for k in range(2):\r\n a = random([m,n])+1j*random([m,n])\r\n r,q = rq(a, mode='economic')\r\n assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))\r\n assert_array_almost_equal(dot(r,q),a)\r\n assert_equal(q.shape, (m, n))\r\n assert_equal(r.shape, (m, m))\r\n\r\n def test_check_finite(self):\r\n a = [[8,2,3],[2,9,3],[5,3,6]]\r\n r,q = rq(a, check_finite=False)\r\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\r\n assert_array_almost_equal(dot(r,q),a)\r\n\r\n\r\ntransp = transpose\r\nany = sometrue\r\n\r\n\r\nclass TestSchur(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[8,12,3],[2,9,3],[10,3,6]]\r\n t,z = schur(a)\r\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\r\n tc,zc = schur(a,'complex')\r\n assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))\r\n assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)\r\n tc2,zc2 = rsf2csf(tc,zc)\r\n assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)\r\n\r\n def test_sort(self):\r\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\r\n s,u,sdim = schur(a,sort='lhp')\r\n assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],\r\n [-0.1134,-0.8245,0.5544,0.],\r\n [-0.8213,0.1308,0.0265,-0.5547],\r\n [-0.5475,0.0872,0.0177,0.8321]],\r\n u,3)\r\n assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],\r\n [0.,-0.5000,9.4472,-0.7184],\r\n [0.,0.,1.4142,-0.1456],\r\n [0.,0.,0.,0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='rhp')\r\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\r\n [-0.4862,0.4930,-0.1434,-0.7071],\r\n [0.6042,0.3944,-0.6924,0.],\r\n [0.4028,0.5986,0.6924,0.]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\r\n [0.,0.5,6.5809,-3.1870],\r\n [0.,0.,-1.4142,0.9270],\r\n [0.,0.,0.,-0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='iuc')\r\n assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],\r\n [-0.8321,0.,-0.3814,-0.4028],\r\n [0.,0.7071,-0.5134,0.4862],\r\n [0.,0.7071,0.5134,-0.4862]],\r\n u,3)\r\n assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],\r\n [0.,0.5000,-3.3191,-14.4130],\r\n [0.,0.,1.4142,2.1573],\r\n [0.,0.,0.,-1.4142]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n s,u,sdim = schur(a,sort='ouc')\r\n assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],\r\n [-0.4862,0.5134,0.7071,0.],\r\n [0.6042,0.5721,0.,-0.5547],\r\n [0.4028,0.3814,0.,0.8321]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],\r\n [0.,-1.4142,3.3191,6.5809],\r\n [0.,0.,-0.5000,0.],\r\n [0.,0.,0.,0.5000]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n rhp_function = lambda x: x >= 0.0\r\n s,u,sdim = schur(a,sort=rhp_function)\r\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\r\n [-0.4862,0.4930,-0.1434,-0.7071],\r\n [0.6042,0.3944,-0.6924,0.],\r\n [0.4028,0.5986,0.6924,0.]],\r\n u,3)\r\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\r\n [0.,0.5,6.5809,-3.1870],\r\n [0.,0.,-1.4142,0.9270],\r\n [0.,0.,0.,-0.5]],\r\n s,3)\r\n assert_equal(2,sdim)\r\n\r\n def test_sort_errors(self):\r\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\r\n assert_raises(ValueError, schur, a, sort='unsupported')\r\n assert_raises(ValueError, schur, a, sort=1)\r\n\r\n def test_check_finite(self):\r\n a = [[8,12,3],[2,9,3],[10,3,6]]\r\n t,z = schur(a, check_finite=False)\r\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\r\n\r\n\r\nclass TestHessenberg(TestCase):\r\n\r\n def test_simple(self):\r\n a = [[-149, -50,-154],\r\n [537, 180, 546],\r\n [-27, -9, -25]]\r\n h1 = [[-149.0000,42.2037,-156.3165],\r\n [-537.6783,152.5511,-554.9272],\r\n [0,0.0728, 2.4489]]\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n assert_array_almost_equal(h,h1,decimal=4)\r\n\r\n def test_simple_complex(self):\r\n a = [[-149, -50,-154],\r\n [537, 180j, 546],\r\n [-27j, -9, -25]]\r\n h,q = hessenberg(a,calc_q=1)\r\n h1 = dot(transp(conj(q)),dot(a,q))\r\n assert_array_almost_equal(h1,h)\r\n\r\n def test_simple2(self):\r\n a = [[1,2,3,4,5,6,7],\r\n [0,2,3,4,6,7,2],\r\n [0,2,2,3,0,3,2],\r\n [0,0,2,8,0,0,2],\r\n [0,3,1,2,0,1,2],\r\n [0,1,2,3,0,1,0],\r\n [0,0,0,0,0,1,2]]\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n\r\n def test_simple3(self):\r\n a = np.eye(3)\r\n a[-1, 0] = 2\r\n h, q = hessenberg(a, calc_q=1)\r\n assert_array_almost_equal(dot(transp(q), dot(a, q)), h)\r\n\r\n def test_random(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])\r\n h,q = hessenberg(a,calc_q=1)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n\r\n def test_random_complex(self):\r\n n = 20\r\n for k in range(2):\r\n a = random([n,n])+1j*random([n,n])\r\n h,q = hessenberg(a,calc_q=1)\r\n h1 = dot(transp(conj(q)),dot(a,q))\r\n assert_array_almost_equal(h1,h)\r\n\r\n def test_check_finite(self):\r\n a = [[-149, -50,-154],\r\n [537, 180, 546],\r\n [-27, -9, -25]]\r\n h1 = [[-149.0000,42.2037,-156.3165],\r\n [-537.6783,152.5511,-554.9272],\r\n [0,0.0728, 2.4489]]\r\n h,q = hessenberg(a,calc_q=1, check_finite=False)\r\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\r\n assert_array_almost_equal(h,h1,decimal=4)\r\n\r\n def test_2x2(self):\r\n a = [[2, 1], [7, 12]]\r\n\r\n h, q = hessenberg(a, calc_q=1)\r\n assert_array_almost_equal(q, np.eye(2))\r\n assert_array_almost_equal(h, a)\r\n\r\n b = [[2-7j, 1+2j], [7+3j, 12-2j]]\r\n h2, q2 = hessenberg(b, calc_q=1)\r\n assert_array_almost_equal(q2, np.eye(2))\r\n assert_array_almost_equal(h2, b)\r\n\r\n\r\nclass TestQZ(TestCase):\r\n def setUp(self):\r\n seed(12345)\r\n\r\n def test_qz_single(self):\r\n n = 5\r\n A = random([n,n]).astype(float32)\r\n B = random([n,n]).astype(float32)\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_double(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_complex(self):\r\n n = 5\r\n A = random([n,n]) + 1j*random([n,n])\r\n B = random([n,n]) + 1j*random([n,n])\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n assert_(all(diag(BB).imag == 0))\r\n\r\n def test_qz_complex64(self):\r\n n = 5\r\n A = (random([n,n]) + 1j*random([n,n])).astype(complex64)\r\n B = (random([n,n]) + 1j*random([n,n])).astype(complex64)\r\n AA,BB,Q,Z = qz(A,B)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)\r\n assert_(all(diag(BB) >= 0))\r\n assert_(all(diag(BB).imag == 0))\r\n\r\n def test_qz_double_complex(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B, output='complex')\r\n aa = dot(dot(Q,AA),Z.conjugate().T)\r\n assert_array_almost_equal(aa.real, A)\r\n assert_array_almost_equal(aa.imag, 0)\r\n bb = dot(dot(Q,BB),Z.conjugate().T)\r\n assert_array_almost_equal(bb.real, B)\r\n assert_array_almost_equal(bb.imag, 0)\r\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n def test_qz_double_sort(self):\r\n # from http://www.nag.com/lapack-ex/node119.html\r\n # NOTE: These matrices may be ill-conditioned and lead to a\r\n # seg fault on certain python versions when compiled with\r\n # sse2 or sse3 older ATLAS/LAPACK binaries for windows\r\n # A = np.array([[3.9, 12.5, -34.5, -0.5],\r\n # [ 4.3, 21.5, -47.5, 7.5],\r\n # [ 4.3, 21.5, -43.5, 3.5],\r\n # [ 4.4, 26.0, -46.0, 6.0 ]])\r\n\r\n # B = np.array([[ 1.0, 2.0, -3.0, 1.0],\r\n # [1.0, 3.0, -5.0, 4.0],\r\n # [1.0, 3.0, -4.0, 3.0],\r\n # [1.0, 3.0, -4.0, 4.0]])\r\n A = np.array([[3.9, 12.5, -34.5, 2.5],\r\n [4.3, 21.5, -47.5, 7.5],\r\n [4.3, 1.5, -43.5, 3.5],\r\n [4.4, 6.0, -46.0, 6.0]])\r\n\r\n B = np.array([[1.0, 1.0, -3.0, 1.0],\r\n [1.0, 3.0, -5.0, 4.4],\r\n [1.0, 2.0, -4.0, 1.0],\r\n [1.2, 3.0, -4.0, 4.0]])\r\n\r\n sort = lambda ar,ai,beta: ai == 0\r\n\r\n assert_raises(ValueError, qz, A, B, sort=sort)\r\n if False:\r\n AA,BB,Q,Z,sdim = qz(A,B,sort=sort)\r\n # assert_(sdim == 2)\r\n assert_(sdim == 4)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n\r\n # test absolute values bc the sign is ambiguous and might be platform\r\n # dependent\r\n assert_array_almost_equal(np.abs(AA), np.abs(np.array(\r\n [[35.7864, -80.9061, -12.0629, -9.498],\r\n [0., 2.7638, -2.3505, 7.3256],\r\n [0., 0., 0.6258, -0.0398],\r\n [0., 0., 0., -12.8217]])), 4)\r\n assert_array_almost_equal(np.abs(BB), np.abs(np.array(\r\n [[4.5324, -8.7878, 3.2357, -3.5526],\r\n [0., 1.4314, -2.1894, 0.9709],\r\n [0., 0., 1.3126, -0.3468],\r\n [0., 0., 0., 0.559]])), 4)\r\n assert_array_almost_equal(np.abs(Q), np.abs(np.array(\r\n [[-0.4193, -0.605, -0.1894, -0.6498],\r\n [-0.5495, 0.6987, 0.2654, -0.3734],\r\n [-0.4973, -0.3682, 0.6194, 0.4832],\r\n [-0.5243, 0.1008, -0.7142, 0.4526]])), 4)\r\n assert_array_almost_equal(np.abs(Z), np.abs(np.array(\r\n [[-0.9471, -0.2971, -0.1217, 0.0055],\r\n [-0.0367, 0.1209, 0.0358, 0.9913],\r\n [0.3171, -0.9041, -0.2547, 0.1312],\r\n [0.0346, 0.2824, -0.9587, 0.0014]])), 4)\r\n\r\n # test absolute values bc the sign is ambiguous and might be platform\r\n # dependent\r\n # assert_array_almost_equal(abs(AA), abs(np.array([\r\n # [3.8009, -69.4505, 50.3135, -43.2884],\r\n # [0.0000, 9.2033, -0.2001, 5.9881],\r\n # [0.0000, 0.0000, 1.4279, 4.4453],\r\n # [0.0000, 0.0000, 0.9019, -1.1962]])), 4)\r\n # assert_array_almost_equal(abs(BB), abs(np.array([\r\n # [1.9005, -10.2285, 0.8658, -5.2134],\r\n # [0.0000, 2.3008, 0.7915, 0.4262],\r\n # [0.0000, 0.0000, 0.8101, 0.0000],\r\n # [0.0000, 0.0000, 0.0000, -0.2823]])), 4)\r\n # assert_array_almost_equal(abs(Q), abs(np.array([\r\n # [0.4642, 0.7886, 0.2915, -0.2786],\r\n # [0.5002, -0.5986, 0.5638, -0.2713],\r\n # [0.5002, 0.0154, -0.0107, 0.8657],\r\n # [0.5331, -0.1395, -0.7727, -0.3151]])), 4)\r\n # assert_array_almost_equal(dot(Q,Q.T), eye(4))\r\n # assert_array_almost_equal(abs(Z), abs(np.array([\r\n # [0.9961, -0.0014, 0.0887, -0.0026],\r\n # [0.0057, -0.0404, -0.0938, -0.9948],\r\n # [0.0626, 0.7194, -0.6908, 0.0363],\r\n # [0.0626, -0.6934, -0.7114, 0.0956]])), 4)\r\n # assert_array_almost_equal(dot(Z,Z.T), eye(4))\r\n\r\n # def test_qz_complex_sort(self):\r\n # cA = np.array([\r\n # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],\r\n # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],\r\n # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],\r\n # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])\r\n\r\n # cB = np.array([\r\n # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],\r\n # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],\r\n # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],\r\n # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])\r\n\r\n # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')\r\n\r\n # eigenvalues = diag(AAS)/diag(BBS)\r\n # assert_(all(np.real(eigenvalues[:sdim] < 0)))\r\n # assert_(all(np.real(eigenvalues[sdim:] > 0)))\r\n\r\n def test_check_finite(self):\r\n n = 5\r\n A = random([n,n])\r\n B = random([n,n])\r\n AA,BB,Q,Z = qz(A,B,check_finite=False)\r\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\r\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\r\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\r\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\r\n assert_(all(diag(BB) >= 0))\r\n\r\n\r\ndef _make_pos(X):\r\n # the decompositions can have different signs than verified results\r\n return np.sign(X)*X\r\n\r\n\r\nclass TestOrdQZ(TestCase):\r\n @classmethod\r\n def setupClass(cls):\r\n # http://www.nag.com/lapack-ex/node119.html\r\n cls.A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,\r\n 7.5 + 0.5j],\r\n [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,\r\n -10.5 - 1.5j],\r\n [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,\r\n -7.5 - 3.5j],\r\n [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,\r\n -19.0 - 32.5j]])\r\n\r\n cls.B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],\r\n [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],\r\n [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],\r\n [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])\r\n\r\n # http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml\r\n cls.A2 = np.array([[3.9, 12.5, -34.5, -0.5],\r\n [4.3, 21.5, -47.5, 7.5],\r\n [4.3, 21.5, -43.5, 3.5],\r\n [4.4, 26.0, -46.0, 6.0]])\r\n\r\n cls.B2 = np.array([[1, 2, -3, 1],\r\n [1, 3, -5, 4],\r\n [1, 3, -4, 3],\r\n [1, 3, -4, 4]])\r\n\r\n # example with the eigenvalues\r\n # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,\r\n # 0.61244091\r\n # thus featuring:\r\n # * one complex conjugate eigenvalue pair,\r\n # * one eigenvalue in the lhp\r\n # * 2 eigenvalues in the unit circle\r\n # * 2 non-real eigenvalues\r\n cls.A3 = np.array([[5., 1., 3., 3.],\r\n [4., 4., 2., 7.],\r\n [7., 4., 1., 3.],\r\n [0., 4., 8., 7.]])\r\n cls.B3 = np.array([[8., 10., 6., 10.],\r\n [7., 7., 2., 9.],\r\n [9., 1., 6., 6.],\r\n [5., 1., 4., 7.]])\r\n\r\n def qz_decomp(self, sort):\r\n retc = ordqz(self.A1, self.B1, sort=sort)\r\n ret1 = ordqz(self.A2, self.B2, sort=sort)\r\n ret2 = ordqz(self.A3, self.B3, sort=sort)\r\n return retc, ret1, ret2\r\n\r\n def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):\r\n I = np.eye(*A.shape)\r\n # make sure Q and Z are orthogonal\r\n assert_array_almost_equal(Q.dot(Q.T.conj()), I)\r\n assert_array_almost_equal(Z.dot(Z.T.conj()), I)\r\n # check factorization\r\n assert_array_almost_equal(Q.dot(AA), A.dot(Z))\r\n assert_array_almost_equal(Q.dot(BB), B.dot(Z))\r\n # check shape of AA and BB\r\n assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))\r\n assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))\r\n # check eigenvalues\r\n for i in range(A.shape[0]):\r\n # does the current diagonal element belong to a 2-by-2 block\r\n # that was already checked?\r\n if i > 0 and A[i, i - 1] != 0:\r\n continue\r\n # take care of 2-by-2 blocks\r\n if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:\r\n evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])\r\n # make sure the pair of complex conjugate eigenvalues\r\n # is ordered consistently (positive imaginary part first)\r\n if evals[0].imag < 0:\r\n evals = evals[[1, 0]]\r\n tmp = alpha[i:i + 2]/beta[i:i + 2]\r\n if tmp[0].imag < 0:\r\n tmp = tmp[[1, 0]]\r\n assert_array_almost_equal(evals, tmp)\r\n else:\r\n assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])\r\n sortfun = sort\r\n if sortfun == 'lhp':\r\n sortfun = lambda x, y: (x/y).real < 0\r\n if sortfun == 'rhp':\r\n sortfun = lambda x, y: (x/y).real > 0\r\n if sortfun == 'iuc':\r\n sortfun = lambda x, y: np.abs(x/y) < 1\r\n if sortfun == 'ouc':\r\n sortfun = lambda x, y: np.abs(x/y) > 1\r\n lastsort = True\r\n for i in range(A.shape[0]):\r\n cursort = sortfun(alpha[i], beta[i])\r\n # once the sorting criterion was not matched all subsequent\r\n # eigenvalues also shouldn't match\r\n if not lastsort:\r\n assert(not cursort)\r\n lastsort = cursort\r\n\r\n def test_lhp(self):\r\n retc, ret1, ret2 = self.qz_decomp('lhp')\r\n\r\n self.check(self.A1, self.B1, 'lhp', *retc)\r\n self.check(self.A2, self.B2, 'lhp', *ret1)\r\n self.check(self.A3, self.B3, 'lhp', *ret2)\r\n\r\n def test_rhp(self):\r\n retc, ret1, ret2 = self.qz_decomp('rhp')\r\n\r\n self.check(self.A1, self.B1, 'rhp', *retc)\r\n self.check(self.A2, self.B2, 'rhp', *ret1)\r\n self.check(self.A3, self.B3, 'rhp', *ret2)\r\n\r\n def test_iuc(self):\r\n retc, ret1, ret2 = self.qz_decomp('iuc')\r\n\r\n self.check(self.A1, self.B1, 'iuc', *retc)\r\n self.check(self.A2, self.B2, 'iuc', *ret1)\r\n self.check(self.A3, self.B3, 'iuc', *ret2)\r\n\r\n def test_ouc(self):\r\n retc, ret1, ret2 = self.qz_decomp('ouc')\r\n\r\n self.check(self.A1, self.B1, 'ouc', *retc)\r\n self.check(self.A2, self.B2, 'ouc', *ret1)\r\n self.check(self.A3, self.B3, 'ouc', *ret2)\r\n\r\n def test_ref(self):\r\n # real eigenvalues first (top-left corner)\r\n sort = lambda x, y: (x/y).imag == 0\r\n retc, ret1, ret2 = self.qz_decomp(sort)\r\n\r\n self.check(self.A1, self.B1, sort, *retc)\r\n self.check(self.A2, self.B2, sort, *ret1)\r\n self.check(self.A3, self.B3, sort, *ret2)\r\n\r\n def test_cef(self):\r\n # complex eigenvalues first (top-left corner)\r\n sort = lambda x, y: (x/y).imag != 0\r\n retc, ret1, ret2 = self.qz_decomp(sort)\r\n\r\n self.check(self.A1, self.B1, sort, *retc)\r\n self.check(self.A2, self.B2, sort, *ret1)\r\n self.check(self.A3, self.B3, sort, *ret2)\r\n\r\n def test_diff_input_types(self):\r\n ret = ordqz(self.A1, self.B2, sort='lhp')\r\n self.check(self.A1, self.B2, 'lhp', *ret)\r\n\r\n ret = ordqz(self.B2, self.A1, sort='lhp')\r\n self.check(self.B2, self.A1, 'lhp', *ret)\r\n\r\nclass TestOrdQZWorkspaceSize(TestCase):\r\n\r\n def setUp(self):\r\n seed(12345)\r\n\r\n def test_decompose(self):\r\n\r\n N = 202\r\n\r\n # raises error if lwork parameter to dtrsen is too small\r\n for ddtype in [np.float32, np.float64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n # sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2\r\n sort = lambda alpha, beta: alpha < beta\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')\r\n\r\n for ddtype in [np.complex, np.complex64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n sort = lambda alpha, beta: alpha < beta\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')\r\n\r\n @dec.slow\r\n def test_decompose_ouc(self):\r\n\r\n N = 202\r\n\r\n # segfaults if lwork parameter to dtrsen is too small\r\n for ddtype in [np.float32, np.float64, np.complex, np.complex64]:\r\n A = random((N,N)).astype(ddtype)\r\n B = random((N,N)).astype(ddtype)\r\n [S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')\r\n\r\n\r\nclass TestDatacopied(TestCase):\r\n\r\n def test_datacopied(self):\r\n from scipy.linalg.decomp import _datacopied\r\n\r\n M = matrix([[0,1],[2,3]])\r\n A = asarray(M)\r\n L = M.tolist()\r\n M2 = M.copy()\r\n\r\n class Fake1:\r\n def __array__(self):\r\n return A\r\n\r\n class Fake2:\r\n __array_interface__ = A.__array_interface__\r\n\r\n F1 = Fake1()\r\n F2 = Fake2()\r\n\r\n for item, status in [(M, False), (A, False), (L, True),\r\n (M2, False), (F1, False), (F2, False)]:\r\n arr = asarray(item)\r\n assert_equal(_datacopied(arr, item), status,\r\n err_msg=repr(item))\r\n\r\n\r\ndef test_aligned_mem_float():\r\n \"\"\"Check linalg works with non-aligned memory\"\"\"\r\n # Allocate 402 bytes of memory (allocated on boundary)\r\n a = arange(402, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 4\r\n z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef test_aligned_mem():\r\n \"\"\"Check linalg works with non-aligned memory\"\"\"\r\n # Allocate 804 bytes of memory (allocated on boundary)\r\n a = arange(804, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 4\r\n z = np.frombuffer(a.data, offset=4, count=100, dtype=float)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef test_aligned_mem_complex():\r\n \"\"\"Check that complex objects don't need to be completely aligned\"\"\"\r\n # Allocate 1608 bytes of memory (allocated on boundary)\r\n a = zeros(1608, dtype=np.uint8)\r\n\r\n # Create an array with boundary offset 8\r\n z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)\r\n z.shape = 10, 10\r\n\r\n eig(z, overwrite_a=True)\r\n # This does not need special handling\r\n eig(z.T, overwrite_a=True)\r\n\r\n\r\ndef check_lapack_misaligned(func, args, kwargs):\r\n args = list(args)\r\n for i in range(len(args)):\r\n a = args[:]\r\n if isinstance(a[i],np.ndarray):\r\n # Try misaligning a[i]\r\n aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)\r\n aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)\r\n aa.shape = a[i].shape\r\n aa[...] = a[i]\r\n a[i] = aa\r\n func(*a,**kwargs)\r\n if len(a[i].shape) > 1:\r\n a[i] = a[i].T\r\n func(*a,**kwargs)\r\n\r\n\r\[email protected](True, \"Ticket #1152, triggers a segfault in rare cases.\")\r\ndef test_lapack_misaligned():\r\n M = np.eye(10,dtype=float)\r\n R = np.arange(100)\r\n R.shape = 10,10\r\n S = np.arange(20000,dtype=np.uint8)\r\n S = np.frombuffer(S.data, offset=4, count=100, dtype=float)\r\n S.shape = 10, 10\r\n b = np.ones(10)\r\n LU, piv = lu_factor(S)\r\n for (func, args, kwargs) in [\r\n (eig,(S,),dict(overwrite_a=True)), # crash\r\n (eigvals,(S,),dict(overwrite_a=True)), # no crash\r\n (lu,(S,),dict(overwrite_a=True)), # no crash\r\n (lu_factor,(S,),dict(overwrite_a=True)), # no crash\r\n (lu_solve,((LU,piv),b),dict(overwrite_b=True)),\r\n (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),\r\n (svd,(M,),dict(overwrite_a=True)), # no crash\r\n (svd,(R,),dict(overwrite_a=True)), # no crash\r\n (svd,(S,),dict(overwrite_a=True)), # crash\r\n (svdvals,(S,),dict()), # no crash\r\n (svdvals,(S,),dict(overwrite_a=True)), # crash\r\n (cholesky,(M,),dict(overwrite_a=True)), # no crash\r\n (qr,(S,),dict(overwrite_a=True)), # crash\r\n (rq,(S,),dict(overwrite_a=True)), # crash\r\n (hessenberg,(S,),dict(overwrite_a=True)), # crash\r\n (schur,(S,),dict(overwrite_a=True)), # crash\r\n ]:\r\n yield check_lapack_misaligned, func, args, kwargs\r\n# not properly tested\r\n# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd\r\n\r\n\r\nclass TestOverwrite(object):\r\n def test_eig(self):\r\n assert_no_overwrite(eig, [(3,3)])\r\n assert_no_overwrite(eig, [(3,3), (3,3)])\r\n\r\n def test_eigh(self):\r\n assert_no_overwrite(eigh, [(3,3)])\r\n assert_no_overwrite(eigh, [(3,3), (3,3)])\r\n\r\n def test_eig_banded(self):\r\n assert_no_overwrite(eig_banded, [(3,2)])\r\n\r\n def test_eigvals(self):\r\n assert_no_overwrite(eigvals, [(3,3)])\r\n\r\n def test_eigvalsh(self):\r\n assert_no_overwrite(eigvalsh, [(3,3)])\r\n\r\n def test_eigvals_banded(self):\r\n assert_no_overwrite(eigvals_banded, [(3,2)])\r\n\r\n def test_hessenberg(self):\r\n assert_no_overwrite(hessenberg, [(3,3)])\r\n\r\n def test_lu_factor(self):\r\n assert_no_overwrite(lu_factor, [(3,3)])\r\n\r\n def test_lu_solve(self):\r\n x = np.array([[1,2,3], [4,5,6], [7,8,8]])\r\n xlu = lu_factor(x)\r\n assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])\r\n\r\n def test_lu(self):\r\n assert_no_overwrite(lu, [(3,3)])\r\n\r\n def test_qr(self):\r\n assert_no_overwrite(qr, [(3,3)])\r\n\r\n def test_rq(self):\r\n assert_no_overwrite(rq, [(3,3)])\r\n\r\n def test_schur(self):\r\n assert_no_overwrite(schur, [(3,3)])\r\n\r\n def test_schur_complex(self):\r\n assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],\r\n dtypes=[np.float32, np.float64])\r\n\r\n def test_svd(self):\r\n assert_no_overwrite(svd, [(3,3)])\r\n assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])\r\n\r\n def test_svdvals(self):\r\n assert_no_overwrite(svdvals, [(3,3)])\r\n\r\n\r\ndef _check_orth(n):\r\n X = np.ones((n, 2), dtype=float)\r\n Y = orth(X)\r\n assert_equal(Y.shape, (n, 1))\r\n assert_allclose(Y, Y.mean(), atol=1e-10)\r\n Y = orth(X.T)\r\n assert_equal(Y.shape, (2, 1))\r\n assert_allclose(Y, Y.mean())\r\n\r\n\r\[email protected]\r\[email protected](np.dtype(np.intp).itemsize < 8, \"test only on 64-bit, else too slow\")\r\ndef test_orth_memory_efficiency():\r\n # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.\r\n # Keep in mind that @dec.slow tests are likely to be running\r\n # under configurations that support 4Gb+ memory for tests related to\r\n # 32 bit overflow.\r\n n = 10*1000*1000\r\n try:\r\n _check_orth(n)\r\n except MemoryError:\r\n raise AssertionError('memory error perhaps caused by orth regression')\r\n\r\n\r\ndef test_orth():\r\n for n in 1, 2, 3, 10, 100:\r\n _check_orth(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_module_suite()\r\n"
] | [
[
"numpy.ones",
"numpy.testing.assert_equal",
"numpy.any",
"numpy.asarray",
"numpy.testing.assert_warns",
"numpy.transpose",
"numpy.abs",
"numpy.bool_",
"numpy.unique",
"numpy.typeDict.values",
"numpy.string_",
"numpy.float32",
"numpy.errstate",
"numpy.rec.fromarrays",
"numpy.unicode_",
"numpy.random.normal",
"numpy.array",
"numpy.dot",
"numpy.subtract.reduce",
"numpy.find_common_type",
"numpy.random.seed",
"numpy.add",
"numpy.add.reduce",
"numpy.char.array",
"numpy.reshape",
"numpy.fromstring",
"numpy.binary_repr",
"numpy.load",
"numpy.single",
"numpy.argmax",
"numpy.uint64",
"numpy.int32",
"numpy.divide.reduce",
"numpy.copyto",
"numpy.hstack",
"numpy.count_nonzero",
"numpy.finfo",
"numpy.intp",
"numpy.testing.assert_raises",
"numpy.zeros_like",
"numpy.random.shuffle",
"numpy.format_parser",
"numpy.concatenate",
"numpy.rec.array",
"numpy.array_str",
"numpy.take",
"numpy.testing.run_module_suite",
"numpy.int_",
"numpy.add.outer",
"numpy.frompyfunc",
"numpy.float64",
"numpy.testing.utils._assert_valid_refcount",
"numpy.empty_like",
"numpy.random.rand",
"numpy.nonzero",
"numpy.fromfile",
"numpy.float_",
"numpy.zeros",
"numpy.arange",
"numpy.lexsort",
"numpy.testing.assert_array_almost_equal",
"numpy.recarray",
"numpy.chararray",
"numpy.sort",
"numpy.divide.accumulate",
"numpy.random.randn",
"numpy.compat.asbytes",
"numpy.subtract.accumulate",
"numpy.setbufsize",
"numpy.dtype",
"numpy.compat.sixu",
"numpy.object_",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_array_equal",
"numpy.ndarray",
"numpy.where",
"numpy.linspace",
"numpy.longdouble",
"numpy.double",
"numpy.compat.asbytes_nested",
"numpy.exp2",
"numpy.lib.stride_tricks.as_strided",
"numpy.all",
"numpy.indices",
"numpy.maximum",
"numpy.sign",
"numpy.empty",
"numpy.add.accumulate",
"numpy.iinfo",
"numpy.testing.assert_"
],
[
"numpy.ones",
"scipy.linalg.lu",
"scipy.linalg.qr_multiply",
"numpy.testing.assert_equal",
"numpy.asarray",
"scipy.linalg.hessenberg",
"numpy.tril",
"numpy.transpose",
"scipy.linalg.schur",
"numpy.abs",
"scipy.linalg.eigh",
"numpy.atleast_2d",
"scipy.linalg.solve",
"scipy.linalg.eigvals_banded",
"scipy.linalg._testutils.assert_no_overwrite",
"numpy.testing.TestCase.__init__",
"numpy.linalg.solve",
"numpy.matrix",
"numpy.array",
"numpy.random.normal",
"numpy.dot",
"numpy.random.seed",
"scipy.linalg.lu_factor",
"numpy.testing.dec.knownfailureif",
"scipy.linalg.ordqz",
"scipy.linalg.rq",
"scipy.linalg.rsf2csf",
"numpy.seterr",
"numpy.linalg.eig",
"scipy.linalg.orth",
"scipy.linalg.lapack.zhbevx",
"numpy.testing.assert_raises",
"numpy.iscomplex",
"scipy.linalg.lapack.dgbtrs",
"scipy.linalg.lapack.dsbevd",
"numpy.conj",
"scipy.linalg.svdvals",
"numpy.sqrt",
"numpy.frombuffer",
"scipy.linalg.svd",
"numpy.bmat",
"numpy.testing.run_module_suite",
"numpy.argsort",
"scipy.linalg.eig_banded",
"numpy.random.rand",
"scipy.linalg.decomp._datacopied",
"numpy.zeros",
"scipy.linalg.eigvals",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.sort",
"scipy.linalg.lu_solve",
"numpy.triu",
"scipy.linalg.lapack.zgbtrf",
"scipy.linalg.lapack.dsbevx",
"numpy.outer",
"scipy.linalg.lapack.zhbevd",
"numpy.diag",
"numpy.dtype",
"scipy.linalg.qz",
"scipy.linalg.misc.norm",
"numpy.isfinite",
"numpy.testing.assert_almost_equal",
"scipy.linalg.lapack.dsbev",
"numpy.testing.assert_array_equal",
"numpy.identity",
"numpy.eye",
"scipy.linalg.diagsvd",
"numpy.all",
"numpy.sign",
"numpy.empty",
"scipy._lib.six.xrange",
"numpy.conjugate",
"scipy.linalg.lapack.dgbtrf",
"scipy.linalg.lapack.zgbtrs",
"numpy.random.random",
"scipy.linalg.qr",
"numpy.shape",
"scipy.linalg.eig",
"numpy.testing.assert_"
]
] |
NunoEdgarGFlowHub/torchbearer | [
"940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0",
"940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0",
"940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0"
] | [
"torchbearer/callbacks/weight_decay.py",
"torchbearer/callbacks/between_class.py",
"tests/metrics/test_aggregators.py"
] | [
"import torchbearer\n\nfrom torchbearer.callbacks import Callback\n\nimport torch\n\n\nclass WeightDecay(Callback):\n \"\"\"Create a WeightDecay callback which uses the given norm on the given parameters and with the given decay rate.\n If params is None (default) then the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model\n >>> decay = WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n p (int): The norm level\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, p=2, params=None):\n super(WeightDecay, self).__init__()\n\n self.p = p\n self.params = params\n self.rate = rate\n\n def on_start(self, state):\n \"\"\"Retrieve params from state['model'] if required.\n\n Args:\n state (dict): The :class:`.Trial` state\n \"\"\"\n if self.params is None:\n self.params = state[torchbearer.MODEL].parameters()\n\n def on_criterion(self, state):\n \"\"\"Calculate the decay term and add to state['loss'].\n\n Args:\n state (dict): The :class:`.Trial` state\n \"\"\"\n for param in self.params:\n state[torchbearer.LOSS] += self.rate * torch.norm(param, self.p)\n\n\nclass L1WeightDecay(WeightDecay):\n \"\"\"WeightDecay callback which uses an L1 norm with the given rate and parameters. If params is None (default) then\n the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import L1WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model using an L1 norm\n >>> decay = L1WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, params=None):\n super(L1WeightDecay, self).__init__(rate=rate, p=1, params=params)\n\n\nclass L2WeightDecay(WeightDecay):\n \"\"\"WeightDecay callback which uses an L2 norm with the given rate and parameters. If params is None (default) then\n the parameters will be retrieved from the model.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import L2WeightDecay\n\n # Example Trial which runs a trial with weight decay on the model using an L2 norm\n >>> decay = L2WeightDecay()\n >>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)\n\n Args:\n rate (float): The decay rate or lambda\n params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a\n single Tensor that will have gradients normalized, otherwise this is retrieved from state\n\n State Requirements:\n - :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method\n - :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented\n \"\"\"\n def __init__(self, rate=5e-4, params=None):\n super(L2WeightDecay, self).__init__(rate=rate, p=2, params=params)\n",
"import torchbearer\nfrom torchbearer import Callback\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Beta\n\nfrom torchbearer.bases import cite\n\nbc = \"\"\"\n@inproceedings{tokozume2018between,\n title={Between-class learning for image classification},\n author={Tokozume, Yuji and Ushiku, Yoshitaka and Harada, Tatsuya},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},\n pages={5486--5494},\n year={2018}\n}\n\"\"\"\n\n\n@cite(bc)\nclass BCPlus(Callback):\n \"\"\"BC+ callback which mixes images by treating them as waveforms. For standard BC, see :class:`.Mixup`.\n This callback can optionally convert labels to one hot before combining them according to the lambda parameters,\n sampled from a beta distribution, use alpha=1 to replicate the paper. Use with :meth:`BCPlus.bc_loss` or set\n `mixup_loss = True` and use :meth:`.Mixup.mixup_loss`.\n\n .. note::\n\n This callback first sets all images to have zero mean. Consider adding an offset (e.g. 0.5) back before\n visualising.\n\n Example: ::\n\n >>> from torchbearer import Trial\n >>> from torchbearer.callbacks import BCPlus\n\n # Example Trial which does BCPlus regularisation\n >>> bcplus = BCPlus(classes=10)\n >>> trial = Trial(None, criterion=BCPlus.bc_loss, callbacks=[bcplus], metrics=['acc'])\n\n Args:\n mixup_loss (bool): If True, the lambda and targets will be stored for use with the mixup loss function.\n alpha (float): The alpha value for the beta distribution.\n classes (int): The number of classes for conversion to one hot.\n\n State Requirements:\n - :attr:`torchbearer.state.X`: State should have the current data stored and correctly normalised\n - :attr:`torchbearer.state.Y_TRUE`: State should have the current data stored\n \"\"\"\n\n def __init__(self, mixup_loss=False, alpha=1, classes=-1):\n super(BCPlus, self).__init__()\n self.mixup_loss = mixup_loss\n self.classes = classes\n self.dist = Beta(torch.tensor([float(alpha)]), torch.tensor([float(alpha)]))\n\n @staticmethod\n def bc_loss(state):\n \"\"\"The KL divergence between the outputs of the model and the ratio labels. Model ouputs should be un-normalised\n logits as this function performs a log_softmax.\n\n Args:\n state: The current :class:`Trial` state.\n \"\"\"\n prediction, target = state[torchbearer.Y_PRED], state[torchbearer.Y_TRUE]\n\n entropy = - (target[target.nonzero().split(1, dim=1)] * target[target.nonzero().split(1, dim=1)].log()).sum()\n cross = - (target * F.log_softmax(prediction, dim=1)).sum()\n\n return (cross - entropy) / prediction.size(0)\n\n def _to_one_hot(self, target):\n if target.dim() == 1:\n target = target.unsqueeze(1)\n one_hot = torch.zeros_like(target).repeat(1, self.classes)\n one_hot.scatter_(1, target, 1)\n return one_hot\n return target.float()\n\n def on_sample(self, state):\n super(BCPlus, self).on_sample(state)\n\n lam = self.dist.sample().to(state[torchbearer.DEVICE])\n\n permutation = torch.randperm(state[torchbearer.X].size(0))\n\n batch1 = state[torchbearer.X]\n batch1 = batch1 - batch1.view(batch1.size(0), -1).mean(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))\n g1 = batch1.view(batch1.size(0), -1).std(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))\n\n batch2 = batch1[permutation]\n g2 = g1[permutation]\n\n p = 1. / (1 + ((g1 / g2) * ((1 - lam) / lam)))\n\n state[torchbearer.X] = (batch1 * p + batch2 * (1 - p)) / (p.pow(2) + (1 - p).pow(2)).sqrt()\n\n if not self.mixup_loss:\n target = self._to_one_hot(state[torchbearer.TARGET]).float()\n state[torchbearer.Y_TRUE] = lam * target + (1 - lam) * target[permutation]\n else:\n state[torchbearer.MIXUP_LAMBDA] = lam\n state[torchbearer.MIXUP_PERMUTATION] = permutation\n state[torchbearer.Y_TRUE] = (state[torchbearer.Y_TRUE], state[torchbearer.Y_TRUE][state[torchbearer.MIXUP_PERMUTATION]])\n\n def on_sample_validation(self, state):\n super(BCPlus, self).on_sample_validation(state)\n if not self.mixup_loss:\n state[torchbearer.TARGET] = self._to_one_hot(state[torchbearer.TARGET]).float()\n",
"import unittest\n\nfrom mock import Mock, call\n\nfrom torchbearer.metrics import RunningMean, Metric, RunningMetric, Mean, Std, Var\n\nimport torch\n\n\nclass TestVar(unittest.TestCase):\n def test_variance_dim(self):\n var = Var('test', dim=0)\n var.process(torch.Tensor([[1., 2.], [3., 4.]]))\n var.process(torch.Tensor([[4., 3.], [2., 1.]]))\n var.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = var.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 1.6000) < 0.0001)\n\n\nclass TestStd(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([0.1, 0.2, 0.3]),\n torch.FloatTensor([0.4, 0.5, 0.6]),\n torch.FloatTensor([0.7, 0.8, 0.9]),\n torch.ones(torch.Size([]))]\n\n self._std = Std('test', unbiased=False)\n self._std.reset({})\n self._target = 0.31622776601684\n\n def test_train(self):\n self.setUp()\n self._std.train()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_validate(self):\n self.setUp()\n self._std.eval()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_precision_error(self):\n self.setUp()\n self._std.train()\n val = torch.tensor([0.55])\n for i in range(2):\n self._std.process(val)\n\n result = self._std.process_final({})\n self.assertEqual(0, result)\n\n def setUpMoreDims(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),\n torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),\n torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),\n torch.ones(torch.Size([]))]\n self._std = Std('test', unbiased=False)\n self._std.reset({})\n self._target = 0.57662804083742\n\n def test_more_dims(self):\n self.setUpMoreDims()\n for i in range(5):\n self._std.process(self._metric.process())\n result = self._std.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_std_dim(self):\n std = Std('test', dim=0)\n std.process(torch.Tensor([[1., 2.], [3., 4.]]))\n std.process(torch.Tensor([[4., 3.], [2., 1.]]))\n std.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = std.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 1.2649) < 0.0001)\n\n\nclass TestMean(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([0.1, 0.2, 0.3]),\n torch.FloatTensor([0.4, 0.5, 0.6]),\n torch.FloatTensor([0.7, 0.8, 0.9]),\n torch.ones(torch.Size([]))]\n\n self._mean = Mean('test')\n self._mean.reset({})\n self._target = 0.5\n\n def test_train_dict(self):\n self.setUp()\n self._mean.train()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_validate_dict(self):\n self.setUp()\n self._mean.eval()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def setUpMoreDims(self):\n self._metric = Metric('test')\n self._metric.process = Mock()\n self._metric.process.side_effect = [torch.zeros(torch.Size([])),\n torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),\n torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),\n torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),\n torch.ones(torch.Size([]))]\n self._mean = Mean('test')\n self._mean.reset({})\n self._target = 0.95\n\n def test_more_dims(self):\n self.setUpMoreDims()\n for i in range(5):\n self._mean.process(self._metric.process())\n result = self._mean.process_final({})\n self.assertAlmostEqual(self._target, result, places=5)\n\n def test_mean_dim(self):\n mean = Mean('test', dim=0)\n mean.process(torch.Tensor([[1., 2.], [3., 4.]]))\n mean.process(torch.Tensor([[4., 3.], [2., 1.]]))\n mean.process(torch.Tensor([[1., 1.], [1., 1.]]))\n\n res = mean.process_final()\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 2.0) < 0.0001)\n\n\nclass TestRunningMetric(unittest.TestCase):\n def setUp(self):\n self._metric = RunningMetric('test', batch_size=5, step_size=5)\n self._metric.reset({})\n self._metric._process_train = Mock(return_value=3)\n self._metric._step = Mock(return_value='output')\n\n def test_train_called_with_state(self):\n self._metric.train()\n self._metric.process({'test': -1})\n self._metric._process_train.assert_called_with({'test': -1})\n\n def test_cache_one_step(self):\n self._metric.train()\n for i in range(6):\n self._metric.process({})\n self._metric._step.assert_has_calls([call([3]), call([3, 3, 3, 3, 3])])\n\n def test_empty_methods(self):\n metric = RunningMetric('test')\n self.assertRaises(NotImplementedError, lambda: metric._step(['test']) is None)\n self.assertRaises(NotImplementedError, lambda: metric._process_train(['test']) is None)\n\n\nclass TestRunningMean(unittest.TestCase):\n def setUp(self):\n self._metric = Metric('test')\n self._mean = RunningMean('test')\n self._cache = [torch.Tensor([1.0]), torch.Tensor([1.5]), torch.Tensor([2.0])]\n self._target = 1.5\n\n def test_train(self):\n result = self._mean._process_train(torch.FloatTensor([1.0, 1.5, 2.0]))\n self.assertAlmostEqual(self._target, result, 3, 0.002)\n\n def test_step(self):\n result = self._mean._step(self._cache)\n self.assertEqual(self._target, result)\n\n def test_dims(self):\n mean = RunningMean('test', dim=0)\n cache = [mean._process_train(torch.Tensor([[1., 2.], [3., 4.]])),\n mean._process_train(torch.Tensor([[4., 3.], [2., 1.]])),\n mean._process_train(torch.Tensor([[1., 1.], [1., 1.]]))]\n\n res = mean._step(cache)\n self.assertTrue(len(res) == 2)\n for m in res:\n self.assertTrue(abs(m - 2.0) < 0.0001)\n"
] | [
[
"torch.norm"
],
[
"torch.zeros_like",
"torch.nn.functional.log_softmax"
],
[
"torch.Size",
"torch.FloatTensor",
"torch.tensor",
"torch.Tensor"
]
] |
datalayer-externals/papermill-scrapbook | [
"911220a26c7f6606f6370a75a4cdac4284675bdc"
] | [
"scrapbook/models.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodels.py\n\nProvides the various model wrapper objects for scrapbook\n\"\"\"\nfrom __future__ import unicode_literals\nimport os\nimport copy\nimport nbformat\nimport collections\nimport pandas as pd\n\nfrom six import string_types\nfrom collections import OrderedDict\nfrom IPython.display import display as ip_display, Markdown\n\n# We lean on papermill's readers to connect to remote stores\nfrom papermill.iorw import papermill_io\n\nfrom .scraps import Scrap, Scraps, payload_to_scrap, scrap_to_payload\nfrom .schemas import GLUE_PAYLOAD_PREFIX, RECORD_PAYLOAD_PREFIX\nfrom .encoders import registry as encoder_registry\nfrom .exceptions import ScrapbookException\nfrom .utils import kernel_required, deprecated\n\ntry:\n from urllib.parse import urlparse # Py3\nexcept ImportError:\n from urlparse import urlparse # Py2\n\n\ndef merge_dicts(dicts):\n iterdicts = iter(dicts)\n outcome = next(iterdicts).copy()\n for d in iterdicts:\n outcome.update(d)\n return outcome\n\n\nclass Notebook(object):\n \"\"\"\n Representation of a notebook. This model is quasi-compatible with the\n nbformat NotebookNode object in that it support access to the v4\n required fields from nbformat's json schema. For complete access to\n normal nbformat operations, use the `node` attribute of this model.\n\n Parameters\n ----------\n node_or_path : `nbformat.NotebookNode`, str\n a notebook object, or a path to a notebook object\n \"\"\"\n\n def __init__(self, node_or_path):\n if isinstance(node_or_path, string_types):\n path = urlparse(node_or_path).path\n if not os.path.splitext(path)[-1].endswith('ipynb'):\n raise Warning(\n \"Requires an '.ipynb' file extension. Provided path: '{}'\".format(\n node_or_path\n )\n )\n self.path = node_or_path\n self.node = nbformat.reads(papermill_io.read(node_or_path), as_version=4)\n else:\n self.path = \"\"\n self.node = node_or_path\n\n # Memoized traits\n self._scraps = None\n self._outputs = None\n\n def copy(self):\n cp = Notebook(self.node.copy())\n cp.path = self.path\n return cp\n\n # nbformat mirroring properties\n @property\n def metadata(self):\n return self.node.metadata\n\n @property\n def nbformat_minor(self):\n return self.node.nbformat_minor\n\n @property\n def nbformat(self):\n return self.node.nbformat\n\n @property\n def cells(self):\n return self.node.cells\n\n @property\n def filename(self):\n \"\"\"str: filename found a the specified path\"\"\"\n return os.path.basename(self.path)\n\n @property\n def directory(self):\n \"\"\"str: directory name found for a notebook (nb)\"\"\"\n return os.path.dirname(self.path)\n\n @property\n def parameters(self):\n \"\"\"dict: parameters stored in the notebook metadata\"\"\"\n return self.metadata.get(\"papermill\", {}).get(\"parameters\", {})\n\n def _extract_papermill_output_data(self, sig, payload):\n if sig.startswith(RECORD_PAYLOAD_PREFIX):\n # Fetch '+json' and strip the leading '+'\n encoder = sig.split(RECORD_PAYLOAD_PREFIX, 1)[1][1:]\n # First key is the only named payload\n for name, data in payload.items():\n return encoder_registry.decode(Scrap(name, data, encoder))\n\n def _extract_output_data_scraps(self, output):\n output_scraps = Scraps()\n for sig, payload in output.get(\"data\", {}).items():\n # Backwards compatibility for papermill\n scrap = self._extract_papermill_output_data(sig, payload)\n if scrap is None and sig.startswith(GLUE_PAYLOAD_PREFIX):\n scrap = encoder_registry.decode(payload_to_scrap(payload))\n if scrap:\n output_scraps[scrap.name] = scrap\n\n return output_scraps\n\n def _extract_output_displays(self, output):\n output_displays = OrderedDict()\n # Backwards compatibility for papermill\n metadata = output.get(\"metadata\", {})\n if \"papermill\" in metadata:\n output_name = output.metadata[\"papermill\"].get(\"name\")\n if output_name:\n output_displays[output_name] = output\n # Only grab outputs that are displays\n elif metadata.get(\"scrapbook\", {}).get(\"display\"):\n output_name = output.metadata[\"scrapbook\"].get(\"name\")\n if output_name:\n output_displays[output_name] = output\n\n return output_displays\n\n def _fetch_scraps(self):\n \"\"\"Returns a dictionary of the data recorded in a notebook.\"\"\"\n scraps = Scraps()\n\n for cell in self.cells:\n for output in cell.get(\"outputs\", []):\n output_data_scraps = self._extract_output_data_scraps(output)\n output_displays = self._extract_output_displays(output)\n\n # Combine displays with data while trying to preserve ordering\n output_scraps = Scraps(\n [\n # Hydrate with output_displays\n (\n scrap.name,\n Scrap(\n scrap.name,\n scrap.data,\n scrap.encoder,\n output_displays.get(scrap.name),\n ),\n )\n for scrap in output_data_scraps.values()\n ]\n )\n for name, display in output_displays.items():\n if name not in output_scraps:\n output_scraps[name] = Scrap(name, None, \"display\", display)\n scraps.update(output_scraps)\n\n return scraps\n\n @property\n def scraps(self):\n \"\"\"dict: a dictionary of data found in the notebook\"\"\"\n if self._scraps is None:\n self._scraps = self._fetch_scraps()\n return self._scraps\n\n @property\n def cell_timing(self):\n \"\"\"list: a list of cell execution timings in cell order\"\"\"\n return [\n # TODO: Other timing conventions?\n cell.metadata.get(\"papermill\", {}).get(\"duration\", 0.0)\n if cell.get(\"execution_count\")\n else None\n for cell in self.cells\n ]\n\n @property\n def execution_counts(self):\n \"\"\"list: a list of cell execution counts in cell order\"\"\"\n return [cell.get(\"execution_count\") for cell in self.cells]\n\n @property\n @deprecated('0.4.0', '`metrics`')\n def papermill_metrics(self):\n return self.metrics\n\n @property\n def metrics(self):\n \"\"\"pandas dataframe: dataframe of cell execution counts and times\"\"\"\n df = pd.DataFrame(columns=[\"filename\", \"cell\", \"value\", \"type\"])\n\n for i, cell in enumerate(self.cells):\n execution_count = cell.get(\"execution_count\")\n if not execution_count:\n continue\n name = \"Out [{}]\".format(str(execution_count))\n value = cell.metadata.get(\"papermill\", {}).get(\"duration\", 0.0)\n df.loc[i] = self.filename, name, value, \"time (s)\"\n return df\n\n @property\n def parameter_dataframe(self):\n \"\"\"pandas dataframe: dataframe of notebook parameters\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return pd.DataFrame(\n [\n [name, self.parameters[name], \"parameter\", self.filename]\n for name in sorted(self.parameters.keys())\n ],\n columns=[\"name\", \"value\", \"type\", \"filename\"],\n )\n\n @property\n def scrap_dataframe(self):\n \"\"\"pandas dataframe: dataframe of cell scraps\"\"\"\n df = self.scraps.dataframe\n df[\"filename\"] = self.filename\n return df\n\n @property\n @deprecated('1.0.0')\n def papermill_record_dataframe(self):\n \"\"\"pandas dataframe: dataframe of cell scraps\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return pd.DataFrame(\n [\n [name, self.scraps[name].data, \"record\", self.filename]\n for name in sorted(self.scraps.keys())\n if self.scraps[name].data is not None\n ],\n columns=[\"name\", \"value\", \"type\", \"filename\"],\n )\n\n @property\n @deprecated('1.0.0')\n def papermill_dataframe(self):\n \"\"\"pandas dataframe: dataframe of notebook parameters and cell scraps\"\"\"\n # Meant for backwards compatibility to papermill's dataframe method\n return self.parameter_dataframe.append(\n self.papermill_record_dataframe, ignore_index=True\n )\n\n def _strip_scrapbook_metadata(self, metadata):\n copied = copy.copy(metadata)\n # Strip old metadata name\n copied.pop(\"papermill\", None)\n copied.pop(\"scrapbook\", None)\n return copied\n\n @kernel_required\n def reglue(self, name, new_name=None, raise_on_missing=True, unattached=False):\n \"\"\"\n Display output from a named source of the notebook.\n\n Parameters\n ----------\n name : str\n name of scrap object\n new_name : str\n replacement name for scrap\n raise_error : bool\n indicator for if the resketch should print a message or error on missing snaps\n unattached : bool\n indicator for rendering without making the display recallable as scrapbook data\n \"\"\"\n # Avoid circular imports\n from .api import _prepare_ipy_data_format, _prepare_ipy_display_format\n\n if name not in self.scraps:\n if raise_on_missing:\n raise ScrapbookException(\n \"Scrap '{}' is not available in this notebook.\".format(name)\n )\n else:\n ip_display(\n \"No scrap found with name '{}' in this notebook\".format(name)\n )\n else:\n scrap = self.scraps[name]\n if new_name:\n scrap = scrap._replace(name=new_name)\n if scrap.data is not None:\n data, metadata = _prepare_ipy_data_format(\n scrap.name, scrap_to_payload(scrap), scrap.encoder\n )\n # Skip saving data for later regluing and remove 'scrapbook'\n # from keys, when unattached\n if unattached:\n metadata = self._strip_scrapbook_metadata(metadata)\n ip_display(data, metadata=metadata, raw=True)\n if scrap.display is not None:\n scrap_data = scrap.display.get(\"data\", {})\n scrap_metadata = self._strip_scrapbook_metadata(\n scrap.display.get(\"metadata\", {})\n )\n data, metadata = _prepare_ipy_display_format(\n scrap.name, scrap_data, scrap_metadata\n )\n if unattached:\n # Remove 'scrapbook' from keys if we want it unassociated\n metadata = self._strip_scrapbook_metadata(metadata)\n ip_display(data, metadata=metadata, raw=True)\n\n\nclass Scrapbook(collections.MutableMapping):\n \"\"\"\n A collection of notebooks represented as a dictionary of notebooks\n \"\"\"\n\n def __init__(self):\n self._notebooks = OrderedDict()\n\n def __setitem__(self, key, value):\n # If notebook is a path str then load the notebook.\n if isinstance(value, string_types):\n value = Notebook(value)\n self._notebooks.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self._notebooks.__getitem__(key)\n\n def __delitem__(self, key):\n return self._notebooks.__delitem__(key)\n\n def __iter__(self):\n return self._notebooks.__iter__()\n\n def __len__(self):\n return self._notebooks.__len__()\n\n @property\n @deprecated('1.0.0')\n def papermill_dataframe(self):\n \"\"\"list: a list of data names from a collection of notebooks\"\"\"\n\n # Backwards compatible dataframe interface\n\n df_list = []\n for key in self._notebooks:\n nb = self._notebooks[key]\n df = nb.papermill_dataframe\n df[\"key\"] = key\n df_list.append(df)\n return pd.concat(df_list).reset_index(drop=True)\n\n @property\n @deprecated('0.4.0', 'metrics')\n def papermill_metrics(self):\n return self.metrics\n\n @property\n def metrics(self):\n \"\"\"list: a list of metrics from a collection of notebooks\"\"\"\n df_list = []\n for key in self._notebooks:\n nb = self._notebooks[key]\n df = nb.metrics\n df[\"key\"] = key\n df_list.append(df)\n return pd.concat(df_list).reset_index(drop=True)\n\n @property\n def notebooks(self):\n \"\"\"list: a sorted list of associated notebooks.\"\"\"\n return self.values()\n\n @property\n def notebook_scraps(self):\n \"\"\"dict: a dictionary of the notebook scraps by key.\"\"\"\n return OrderedDict([(key, nb.scraps) for key, nb in self._notebooks.items()])\n\n @property\n def scraps(self):\n \"\"\"dict: a dictionary of the merged notebook scraps.\"\"\"\n return Scraps(merge_dicts(nb.scraps for nb in self.notebooks))\n\n def scraps_report(\n self, scrap_names=None, notebook_names=None, include_data=False, headers=True\n ):\n \"\"\"\n Display scraps as markdown structed outputs.\n\n Parameters\n ----------\n scrap_names : str or iterable[str] (optional)\n the scraps to display as reported outputs\n notebook_names : str or iterable[str] (optional)\n notebook names to use in filtering on scraps to report\n include_data : bool (default: False)\n indicator that data-only scraps should be reported\n header : bool (default: True)\n indicator for if the scraps should render with a header\n \"\"\"\n\n def trim_repr(data):\n # Generate a small data representation for display purposes\n if not isinstance(data, string_types):\n data_str = repr(data)\n if len(data_str) > 102:\n data_str = data_str[:100] + \"...\"\n return data_str\n\n if isinstance(scrap_names, string_types):\n scrap_names = [scrap_names]\n scrap_names = set(scrap_names or [])\n\n if notebook_names is None:\n notebook_names = self._notebooks.keys()\n elif isinstance(notebook_names, string_types):\n notebook_names = [notebook_names]\n\n for i, nb_name in enumerate(notebook_names):\n notebook = self[nb_name]\n if headers:\n if i > 0:\n ip_display(Markdown(\"<hr>\")) # tag between outputs\n ip_display(Markdown(\"### {}\".format(nb_name)))\n\n for name in scrap_names or notebook.scraps.display_scraps.keys():\n if headers:\n ip_display(Markdown(\"#### {}\".format(name)))\n notebook.reglue(name, raise_on_missing=False, unattached=True)\n\n if include_data:\n for name, scrap in scrap_names or notebook.scraps.data_scraps.items():\n if scrap.display is None and scrap.data is not None:\n if headers:\n ip_display(Markdown(\"#### {}\".format(name)))\n ip_display(trim_repr(scrap.data))\n else:\n ip_display(\n \"{}: {}\".format(scrap.name, trim_repr(scrap.data))\n )\n"
] | [
[
"pandas.DataFrame",
"pandas.concat"
]
] |
PacktPublishing/Computer-Vision-YOLO-Custom-Object-Detection-with-Colab-GPU | [
"f90db3c5f3326d89282f249ede92234812c824a5"
] | [
"pretrained_yolo_video_nms.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: abhilash\n\"\"\"\n\nimport numpy as np\nimport cv2\n\n#get the webcam video stream\nfile_video_stream = cv2.VideoCapture('images/testing/video_sample2.mp4')\n\n#create a while loop \nwhile (file_video_stream.isOpened):\n #get the current frame from video stream\n ret,current_frame = file_video_stream.read()\n #use the video current frame instead of image\n img_to_detect = current_frame\n\n img_height = img_to_detect.shape[0]\n img_width = img_to_detect.shape[1]\n \n # convert to blob to pass into model\n img_blob = cv2.dnn.blobFromImage(img_to_detect, 0.003922, (416, 416), swapRB=True, crop=False)\n #recommended by yolo authors, scale factor is 0.003922=1/255, width,height of blob is 320,320\n #accepted sizes are 320×320,416×416,609×609. More size means more accuracy but less speed\n \n # set of 80 class labels \n class_labels = [\"person\",\"bicycle\",\"car\",\"motorcycle\",\"airplane\",\"bus\",\"train\",\"truck\",\"boat\",\n \"trafficlight\",\"firehydrant\",\"stopsign\",\"parkingmeter\",\"bench\",\"bird\",\"cat\",\n \"dog\",\"horse\",\"sheep\",\"cow\",\"elephant\",\"bear\",\"zebra\",\"giraffe\",\"backpack\",\n \"umbrella\",\"handbag\",\"tie\",\"suitcase\",\"frisbee\",\"skis\",\"snowboard\",\"sportsball\",\n \"kite\",\"baseballbat\",\"baseballglove\",\"skateboard\",\"surfboard\",\"tennisracket\",\n \"bottle\",\"wineglass\",\"cup\",\"fork\",\"knife\",\"spoon\",\"bowl\",\"banana\",\"apple\",\n \"sandwich\",\"orange\",\"broccoli\",\"carrot\",\"hotdog\",\"pizza\",\"donut\",\"cake\",\"chair\",\n \"sofa\",\"pottedplant\",\"bed\",\"diningtable\",\"toilet\",\"tvmonitor\",\"laptop\",\"mouse\",\n \"remote\",\"keyboard\",\"cellphone\",\"microwave\",\"oven\",\"toaster\",\"sink\",\"refrigerator\",\n \"book\",\"clock\",\"vase\",\"scissors\",\"teddybear\",\"hairdrier\",\"toothbrush\"]\n \n #Declare List of colors as an array\n #Green, Blue, Red, cyan, yellow, purple\n #Split based on ',' and for every split, change type to int\n #convert that to a numpy array to apply color mask to the image numpy array\n class_colors = [\"0,255,0\",\"0,0,255\",\"255,0,0\",\"255,255,0\",\"0,255,255\"]\n class_colors = [np.array(every_color.split(\",\")).astype(\"int\") for every_color in class_colors]\n class_colors = np.array(class_colors)\n class_colors = np.tile(class_colors,(16,1))\n \n # Loading pretrained model \n # input preprocessed blob into model and pass through the model\n # obtain the detection predictions by the model using forward() method\n yolo_model = cv2.dnn.readNetFromDarknet('model/yolov3.cfg','model/yolov3.weights')\n \n # Get all layers from the yolo network\n # Loop and find the last layer (output layer) of the yolo network \n yolo_layers = yolo_model.getLayerNames()\n yolo_output_layer = [yolo_layers[yolo_layer[0] - 1] for yolo_layer in yolo_model.getUnconnectedOutLayers()]\n \n # input preprocessed blob into model and pass through the model\n yolo_model.setInput(img_blob)\n # obtain the detection layers by forwarding through till the output layer\n obj_detection_layers = yolo_model.forward(yolo_output_layer)\n \n \n ############## NMS Change 1 ###############\n # initialization for non-max suppression (NMS)\n # declare list for [class id], [box center, width & height[], [confidences]\n class_ids_list = []\n boxes_list = []\n confidences_list = []\n ############## NMS Change 1 END ###########\n \n \n # loop over each of the layer outputs\n for object_detection_layer in obj_detection_layers:\n \t# loop over the detections\n for object_detection in object_detection_layer:\n \n # obj_detections[1 to 4] => will have the two center points, box width and box height\n # obj_detections[5] => will have scores for all objects within bounding box\n all_scores = object_detection[5:]\n predicted_class_id = np.argmax(all_scores)\n prediction_confidence = all_scores[predicted_class_id]\n \n # take only predictions with confidence more than 20%\n if prediction_confidence > 0.20:\n #get the predicted label\n predicted_class_label = class_labels[predicted_class_id]\n #obtain the bounding box co-oridnates for actual image from resized image size\n bounding_box = object_detection[0:4] * np.array([img_width, img_height, img_width, img_height])\n (box_center_x_pt, box_center_y_pt, box_width, box_height) = bounding_box.astype(\"int\")\n start_x_pt = int(box_center_x_pt - (box_width / 2))\n start_y_pt = int(box_center_y_pt - (box_height / 2))\n \n ############## NMS Change 2 ###############\n #save class id, start x, y, width & height, confidences in a list for nms processing\n #make sure to pass confidence as float and width and height as integers\n class_ids_list.append(predicted_class_id)\n confidences_list.append(float(prediction_confidence))\n boxes_list.append([start_x_pt, start_y_pt, int(box_width), int(box_height)])\n ############## NMS Change 2 END ###########\n \n ############## NMS Change 3 ###############\n # Applying the NMS will return only the selected max value ids while suppressing the non maximum (weak) overlapping bounding boxes \n # Non-Maxima Suppression confidence set as 0.5 & max_suppression threhold for NMS as 0.4 (adjust and try for better perfomance)\n max_value_ids = cv2.dnn.NMSBoxes(boxes_list, confidences_list, 0.5, 0.4)\n \n # loop through the final set of detections remaining after NMS and draw bounding box and write text\n for max_valueid in max_value_ids:\n max_class_id = max_valueid[0]\n box = boxes_list[max_class_id]\n start_x_pt = box[0]\n start_y_pt = box[1]\n box_width = box[2]\n box_height = box[3]\n \n #get the predicted class id and label\n predicted_class_id = class_ids_list[max_class_id]\n predicted_class_label = class_labels[predicted_class_id]\n prediction_confidence = confidences_list[max_class_id]\n ############## NMS Change 3 END ########### \n \n end_x_pt = start_x_pt + box_width\n end_y_pt = start_y_pt + box_height\n \n #get a random mask color from the numpy array of colors\n box_color = class_colors[predicted_class_id]\n \n #convert the color numpy array as a list and apply to text and box\n box_color = [int(c) for c in box_color]\n \n # print the prediction in console\n predicted_class_label = \"{}: {:.2f}%\".format(predicted_class_label, prediction_confidence * 100)\n print(\"predicted object {}\".format(predicted_class_label))\n \n # draw rectangle and text in the image\n cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), box_color, 1)\n cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 1)\n \n cv2.imshow(\"Detection Output\", img_to_detect)\n \n #terminate while loop if 'q' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n#releasing the stream and the camera\n#close all opencv windows\nfile_video_stream.release()\ncv2.destroyAllWindows()"
] | [
[
"numpy.array",
"numpy.tile",
"numpy.argmax"
]
] |
ishaiqbal/sqlalchemy-challenge- | [
"5b2b7bbb954e371bd1777b5cb04bfb22d7a5a25c"
] | [
"app.py"
] | [
"import numpy as np\nimport datetime as dt\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\n\nfrom flask import Flask, jsonify\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Session link from python to DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n\[email protected](\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to the Hawaii Climate Analysis API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/temp/start<br/>\"\n f\"/api/v1.0/temp/start/end\"\n )\n\n\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n\n data = session.query(Measurement.date, Measurement.prcp).\\\n order_by(Measurement.date).all()\n\n precip_dates = []\n\n for date, prcp in data:\n new_dict = {}\n new_dict[date] = prcp\n precip_dates.append(new_dict)\n\n session.close()\n\n return jsonify(precip_dates)\n\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n\n results = session.query(Station.station).all()\n stations = list(np.ravel(results))\n\n session.close()\n return jsonify(stations)\n\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n\n \n lastdate = session.query(Measurement.date).order_by(\n Measurement.date.desc()).first()\n\n last_date = dt.datetime.strptime(lastdate[0], '%Y-%m-%d')\n\n \n query_date = dt.date(last_date.year, last_date.month,\n last_date.day) - dt.timedelta(days=365)\n\n \n results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date >= query_date).all()\n\n all_tobs = []\n for row in results:\n tobs_dict = {}\n tobs_dict[\"date\"] = row.date\n tobs_dict[\"tobs\"] = row.tobs\n all_tobs.append(tobs_dict)\n\n session.close()\n return jsonify(all_tobs)\n\n\[email protected](\"/api/v1.0/temp/start\")\ndef stats():\n\n start_date = session.query(func.min(Measurement.date)).all()[0][0]\n\n sel = [func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs)]\n temp_lstuple = session.query(*sel).filter(Measurement.date >= start_date).all()\n\n session.close()\n\n temp_pram1_list = list(np.ravel(temp_lstuple))\n temp_list =[]\n for t in temp_lstuple:\n temp_dict = {}\n temp_dict[\"Min Temp\"] = temp_pram1_list[0]\n temp_dict[\"Avg Temp\"] = temp_pram1_list[1]\n temp_dict[\"Max Temp\"] = temp_pram1_list[2]\n temp_list.append(temp_dict)\n return jsonify(temp_list)\n\n\[email protected](\"/api/v1.0/temp/start/end\")\ndef tempstartend(start=None, end=None):\n\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n temps_q = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n temps = list(np.ravel(temps_q))\n\n return jsonify(temps)\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
] | [
[
"numpy.ravel"
]
] |
ekatef/oemof-examples | [
"4805d5cef03141a917fd8a9e1141acfa8cc9d781"
] | [
"oemof_examples/tespy/heat_pump/heat_pump_water.py"
] | [
"# -*- coding: utf-8 -*-\nfrom tespy.networks import Network\nfrom tespy.components import (\n Sink, Source, Splitter, Compressor, Condenser, Pump, HeatExchangerSimple,\n Valve, Drum, HeatExchanger, CycleCloser\n)\nfrom tespy.connections import Connection, Ref\nfrom tespy.tools.characteristics import CharLine\nfrom tespy.tools.characteristics import load_default_char as ldc\nfrom tespy.tools import document_model\n\nimport numpy as np\nimport pandas as pd\n\n# %% network\n\nnw = Network(\n fluids=['water', 'NH3', 'air'], T_unit='C', p_unit='bar', h_unit='kJ / kg',\n m_unit='kg / s'\n)\n\n# %% components\n\n# sources & sinks\ncc = CycleCloser('coolant cycle closer')\ncc_cons = CycleCloser('consumer cycle closer')\namb = Source('ambient air')\namb_out1 = Sink('sink ambient 1')\namb_out2 = Sink('sink ambient 2')\n\n# ambient system\nsp = Splitter('splitter')\npu = Pump('pump')\n\n# consumer system\n\ncd = Condenser('condenser')\ndhp = Pump('district heating pump')\ncons = HeatExchangerSimple('consumer')\n\n# evaporator system\n\nves = Valve('valve')\ndr = Drum('drum')\nev = HeatExchanger('evaporator')\nsu = HeatExchanger('superheater')\nerp = Pump('evaporator reciculation pump')\n\n# compressor-system\n\ncp1 = Compressor('compressor 1')\ncp2 = Compressor('compressor 2')\nic = HeatExchanger('intercooler')\n\n# %% connections\n\n# consumer system\n\nc_in_cd = Connection(cc, 'out1', cd, 'in1')\n\ncb_dhp = Connection(cc_cons, 'out1', dhp, 'in1')\ndhp_cd = Connection(dhp, 'out1', cd, 'in2')\ncd_cons = Connection(cd, 'out2', cons, 'in1')\ncons_cf = Connection(cons, 'out1', cc_cons, 'in1')\n\nnw.add_conns(c_in_cd, cb_dhp, dhp_cd, cd_cons, cons_cf)\n\n# connection condenser - evaporator system\n\ncd_ves = Connection(cd, 'out1', ves, 'in1')\n\nnw.add_conns(cd_ves)\n\n# evaporator system\n\nves_dr = Connection(ves, 'out1', dr, 'in1')\ndr_erp = Connection(dr, 'out1', erp, 'in1')\nerp_ev = Connection(erp, 'out1', ev, 'in2')\nev_dr = Connection(ev, 'out2', dr, 'in2')\ndr_su = Connection(dr, 'out2', su, 'in2')\n\nnw.add_conns(ves_dr, dr_erp, erp_ev, ev_dr, dr_su)\n\namb_p = Connection(amb, 'out1', pu, 'in1')\np_sp = Connection(pu, 'out1', sp, 'in1')\nsp_su = Connection(sp, 'out1', su, 'in1')\nsu_ev = Connection(su, 'out1', ev, 'in1')\nev_amb_out = Connection(ev, 'out1', amb_out1, 'in1')\n\nnw.add_conns(amb_p, p_sp, sp_su, su_ev, ev_amb_out)\n\n# connection evaporator system - compressor system\n\nsu_cp1 = Connection(su, 'out2', cp1, 'in1')\n\nnw.add_conns(su_cp1)\n\n# compressor-system\n\ncp1_he = Connection(cp1, 'out1', ic, 'in1')\nhe_cp2 = Connection(ic, 'out1', cp2, 'in1')\ncp2_c_out = Connection(cp2, 'out1', cc, 'in1')\n\nsp_ic = Connection(sp, 'out2', ic, 'in2')\nic_out = Connection(ic, 'out2', amb_out2, 'in1')\n\nnw.add_conns(cp1_he, he_cp2, sp_ic, ic_out, cp2_c_out)\n\n# %% component parametrization\n\n# condenser system\n\ncd.set_attr(pr1=0.99, pr2=0.99, ttd_u=5, design=['pr2', 'ttd_u'],\n offdesign=['zeta2', 'kA_char'])\ndhp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])\ncons.set_attr(pr=0.99, design=['pr'], offdesign=['zeta'])\n\n# water pump\n\npu.set_attr(eta_s=0.75, design=['eta_s'], offdesign=['eta_s_char'])\n\n# evaporator system\n\nkA_char1 = ldc('heat exchanger', 'kA_char1', 'DEFAULT', CharLine)\nkA_char2 = ldc('heat exchanger', 'kA_char2', 'EVAPORATING FLUID', CharLine)\n\nev.set_attr(pr1=0.98, pr2=0.99, ttd_l=5,\n kA_char1=kA_char1, kA_char2=kA_char2,\n design=['pr1', 'ttd_l'], offdesign=['zeta1', 'kA_char'])\nsu.set_attr(pr1=0.98, pr2=0.99, ttd_u=2, design=['pr1', 'pr2', 'ttd_u'],\n offdesign=['zeta1', 'zeta2', 'kA_char'])\nerp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])\n\n# compressor system\n\ncp1.set_attr(eta_s=0.85, design=['eta_s'], offdesign=['eta_s_char'])\ncp2.set_attr(eta_s=0.9, pr=3, design=['eta_s'], offdesign=['eta_s_char'])\nic.set_attr(pr1=0.99, pr2=0.98, design=['pr1', 'pr2'],\n offdesign=['zeta1', 'zeta2', 'kA_char'])\n\n# %% connection parametrization\n\n# condenser system\n\nc_in_cd.set_attr(fluid={'air': 0, 'NH3': 1, 'water': 0})\ncb_dhp.set_attr(T=60, p=10, fluid={'air': 0, 'NH3': 0, 'water': 1})\ncd_cons.set_attr(T=90)\n\n# evaporator system cold side\n\nerp_ev.set_attr(m=Ref(ves_dr, 1.25, 0), p0=5)\nsu_cp1.set_attr(p0=5, state='g')\n\n# evaporator system hot side\n\n# pumping at constant rate in partload\namb_p.set_attr(T=12, p=2, fluid={'air': 0, 'NH3': 0, 'water': 1},\n offdesign=['v'])\nsp_su.set_attr(offdesign=['v'])\nev_amb_out.set_attr(p=2, T=9, design=['T'])\n\n# compressor-system\n\nhe_cp2.set_attr(Td_bp=5, p0=20, design=['Td_bp'])\nic_out.set_attr(T=30, design=['T'])\n\n# %% key paramter\n\ncons.set_attr(Q=-200e3)\n\n# %% Calculation\n\nnw.solve('design')\nnw.print_results()\nnw.save('heat_pump_water')\ndocument_model(nw, filename='report_water_design.tex')\n\n# offdesign test\nnw.solve('offdesign', design_path='heat_pump_water')\ndocument_model(nw, filename='report_water_offdesign.tex')\n\nT_range = [6, 12, 18, 24, 30]\nQ_range = np.array([100e3, 120e3, 140e3, 160e3, 180e3, 200e3, 220e3])\ndf = pd.DataFrame(columns=Q_range / -cons.Q.val)\n\nfor T in T_range:\n amb_p.set_attr(T=T)\n eps = []\n\n for Q in Q_range:\n cons.set_attr(Q=-Q)\n nw.solve('offdesign', design_path='heat_pump_water')\n\n if nw.lin_dep:\n eps += [np.nan]\n else:\n eps += [\n abs(cd.Q.val) / (cp1.P.val + cp2.P.val + erp.P.val + pu.P.val)\n ]\n\n df.loc[T] = eps\n\ndf.to_csv('COP_water.csv')\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] |
fitbenchmarking/fitbenchmarking | [
"ea398efa61f071dc64fe7c3b484d5bb4e1897856"
] | [
"examples/benchmark_problems/scripts/generate_simulated_mantid.py"
] | [
"\"\"\"\nThis script is used to generate simulated count data based on a Mantid\nscript.\n\"\"\"\n\nimport os\n\nimport numpy\n\n\ndef VariableStatsData(N, A0, omega, phi, sigma, bg):\n x = numpy.linspace(start=0.0, stop=32.0, num=2001)\n y = (1+A0*numpy.cos(omega*x+phi)*numpy.exp(-(sigma*x)**2)) * \\\n numpy.exp(-x/2.197)+bg\n NN = N/numpy.sum(y) # normalisation so whole spectrum has ~N counts\n return (x, numpy.random.poisson(y*NN))\n\n\ndef write_data(x, y, part=0):\n path = f'{os.path.dirname(__file__)}/../data_files'\n part_str = part if part != 0 else \"\"\n with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:\n f.write('# X Y\\n')\n lines = [[x[i], y[i]]\n # if y[i] != 0 # Uncomment to replace 0s with 1s\n # else [x[i], 1]\n for i in range(len(x))\n # if y[i] != 0 # Uncomment to ignore 0 values\n ]\n f.writelines([f'{i} {j}\\n' for i, j in lines])\n\n\ndef write_problem(N, part=0):\n path = f'{os.path.dirname(__file__)}/..'\n part_str = part if part != 0 else \"\"\n with open(f'{path}/simulated_mantid{part_str}.txt', 'w') as f:\n f.write('# FitBenchmark Problem\\n')\n f.write(\"software = 'Mantid'\\n\")\n f.write(f\"name = 'Simulated poisson (Mantid) {part_str}'\\n\")\n f.write(\"description = 'A simulated dataset for testing poisson cost\"\n \"functions, based on a simple simulation from Mantid.'\\n\")\n f.write(f\"input_file = 'simulated_mantid{part_str}.txt'\\n\")\n f.write(\"function = 'name=UserFunction,\"\n \"Formula=N*((1+A*cos(omega*x+phi)*exp(-(sigma*x)^2))*\"\n \"exp(-x/2.197)+bg),\"\n f\"N={0.007*N},\"\n \"A=0.3,\"\n \"omega=0.9,\"\n \"phi=0.2,\"\n \"sigma=0.12,\"\n \"bg=0.001'\\n\")\n\n\nif __name__ == '__main__':\n chunks = [1] #,8,16,20,32,40,50,100]\n num = 1000\n N0 = 4e5\n for i, part in enumerate(chunks):\n args = {'N': 1000/part,\n 'A0': 0.25,\n 'omega': 1.0,\n 'phi': 0.1,\n 'sigma': 0.1,\n 'bg': 1.E-4}\n x, y = VariableStatsData(**args)\n write_data(x, y, part=i)\n write_problem(N=args['N'], part=i)\n"
] | [
[
"numpy.sum",
"numpy.random.poisson",
"numpy.exp",
"numpy.cos",
"numpy.linspace"
]
] |
Zerwer/EEGMachineLearning | [
"d0dfcf617b22317a88018a86545c4f7e37a290b9"
] | [
"data/live_predict.py"
] | [
"# Unsure majority of time but more correct then wrong when thinking of\n# Requires more data for training\nfrom data import *\nfrom tkinter import *\nfrom keras.models import load_model\nimport numpy as np\nimport threading\nimport time\n\n# Time variables\nstart_wait = 10000\nwait = 2100\n\n# Set dimensions\nw = 900\nh = 556\n\nroot = Tk()\nroot.geometry(str(w)+'x'+str(h))\nroot.title('Predictor')\n\ngraphing_area = Canvas(root, width=w, height=h)\ngraphing_area.pack()\n\n# Import model to be used\nsaved_model = load_model('model.h5')\n\n# Begin data thread\nthread = threading.Thread(target=data_loop, args=[False, False, False, 1, False])\nthread.start()\n\n\n# Predicts the input values and returns predicted letter\ndef predict(values, model):\n processed_data = np.expand_dims(np.array([np.abs(np.fft.rfft(np.array(values)))/85000]), 3)\n prediction = model.predict(processed_data)\n print(prediction[0][0])\n if prediction[0][0] < 0.1:\n return 'B'\n elif prediction[0][0] > 0.9:\n return 'A'\n else:\n return '?'\n\n\ndef display_prediction(canvas, frame, model):\n prediction = predict(last_values[-1500:], model)\n\n canvas.delete('all')\n canvas.create_text(w / 2, h / 2, font=\"Arial \" + str(int(round(h / 3, 0))), text='Collecting...', anchor='center')\n time.sleep(1)\n canvas.delete('all')\n canvas.create_text(w / 2, h / 2, font=\"Arial \" + str(int(round(h / 3, 0))), text=prediction, anchor='center')\n\n root.after(wait, display_prediction, canvas, frame, model)\n\n\nroot.after(start_wait, display_prediction, graphing_area, root, saved_model)\nroot.mainloop()\n"
] | [
[
"numpy.array"
]
] |
st2yang/garage | [
"50186a9630df038aeba36d6b06b006ab32ed48f5"
] | [
"tests/garage/sampler/test_sampler.py"
] | [
"from dowel import logger\nimport numpy as np\n\nfrom garage.sampler.utils import truncate_paths\n\nfrom tests.fixtures.logger import NullOutput\n\n\nclass TestSampler:\n\n def setup_method(self):\n logger.add_output(NullOutput())\n\n def teardown_method(self):\n logger.remove_all()\n\n def test_truncate_paths(self):\n paths = [\n dict(\n observations=np.zeros((100, 1)),\n actions=np.zeros((100, 1)),\n rewards=np.zeros(100),\n env_infos=dict(),\n agent_infos=dict(lala=np.zeros(100)),\n ),\n dict(\n observations=np.zeros((50, 1)),\n actions=np.zeros((50, 1)),\n rewards=np.zeros(50),\n env_infos=dict(),\n agent_infos=dict(lala=np.zeros(50)),\n ),\n ]\n\n truncated = truncate_paths(paths, 130)\n assert len(truncated) == 2\n assert len(truncated[-1]['observations']) == 30\n assert len(truncated[0]['observations']) == 100\n # make sure not to change the original one\n assert len(paths) == 2\n assert len(paths[-1]['observations']) == 50\n"
] | [
[
"numpy.zeros"
]
] |
lorentzenchr/scipy | [
"393a05ee927883ad6316b7092c851afea8f16816"
] | [
"scipy/signal/tests/test_savitzky_golay.py"
] | [
"import numpy as np\nfrom numpy.testing import (assert_allclose, assert_equal,\n assert_almost_equal, assert_array_equal,\n assert_array_almost_equal)\n\nfrom scipy.ndimage import convolve1d\n\nfrom scipy.signal import savgol_coeffs, savgol_filter\nfrom scipy.signal._savitzky_golay import _polyder\n\n\ndef check_polyder(p, m, expected):\n dp = _polyder(p, m)\n assert_array_equal(dp, expected)\n\n\ndef test_polyder():\n cases = [\n ([5], 0, [5]),\n ([5], 1, [0]),\n ([3, 2, 1], 0, [3, 2, 1]),\n ([3, 2, 1], 1, [6, 2]),\n ([3, 2, 1], 2, [6]),\n ([3, 2, 1], 3, [0]),\n ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),\n ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),\n ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),\n ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),\n ]\n for p, m, expected in cases:\n check_polyder(np.array(p).T, m, np.array(expected).T)\n\n\n#--------------------------------------------------------------------\n# savgol_coeffs tests\n#--------------------------------------------------------------------\n\ndef alt_sg_coeffs(window_length, polyorder, pos):\n \"\"\"This is an alternative implementation of the SG coefficients.\n\n It uses numpy.polyfit and numpy.polyval. The results should be\n equivalent to those of savgol_coeffs(), but this implementation\n is slower.\n\n window_length should be odd.\n\n \"\"\"\n if pos is None:\n pos = window_length // 2\n t = np.arange(window_length)\n unit = (t == pos).astype(int)\n h = np.polyval(np.polyfit(t, unit, polyorder), t)\n return h\n\n\ndef test_sg_coeffs_trivial():\n # Test a trivial case of savgol_coeffs: polyorder = window_length - 1\n h = savgol_coeffs(1, 0)\n assert_allclose(h, [1])\n\n h = savgol_coeffs(3, 2)\n assert_allclose(h, [0, 1, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4)\n assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4, pos=1)\n assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)\n\n h = savgol_coeffs(5, 4, pos=1, use='dot')\n assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)\n\n\ndef compare_coeffs_to_alt(window_length, order):\n # For the given window_length and order, compare the results\n # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.\n # Also include pos=None.\n for pos in [None] + list(range(window_length)):\n h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')\n h2 = alt_sg_coeffs(window_length, order, pos=pos)\n assert_allclose(h1, h2, atol=1e-10,\n err_msg=(\"window_length = %d, order = %d, pos = %s\" %\n (window_length, order, pos)))\n\n\ndef test_sg_coeffs_compare():\n # Compare savgol_coeffs() to alt_sg_coeffs().\n for window_length in range(1, 8, 2):\n for order in range(window_length):\n compare_coeffs_to_alt(window_length, order)\n\n\ndef test_sg_coeffs_exact():\n polyorder = 4\n window_length = 9\n halflen = window_length // 2\n\n x = np.linspace(0, 21, 43)\n delta = x[1] - x[0]\n\n # The data is a cubic polynomial. We'll use an order 4\n # SG filter, so the filtered values should equal the input data\n # (except within half window_length of the edges).\n y = 0.5 * x ** 3 - x\n h = savgol_coeffs(window_length, polyorder)\n y0 = convolve1d(y, h)\n assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])\n\n # Check the same input, but use deriv=1. dy is the exact result.\n dy = 1.5 * x ** 2 - 1\n h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)\n y1 = convolve1d(y, h)\n assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])\n\n # Check the same input, but use deriv=2. d2y is the exact result.\n d2y = 3.0 * x\n h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)\n y2 = convolve1d(y, h)\n assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])\n\n\ndef test_sg_coeffs_deriv():\n # The data in `x` is a sampled parabola, so using savgol_coeffs with an\n # order 2 or higher polynomial should give exact results.\n i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])\n x = i ** 2 / 4\n dx = i / 2\n d2x = np.full_like(i, 0.5)\n for pos in range(x.size):\n coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')\n assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)\n coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)\n assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)\n coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)\n assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)\n\n\ndef test_sg_coeffs_deriv_gt_polyorder():\n \"\"\"\n If deriv > polyorder, the coefficients should be all 0.\n This is a regression test for a bug where, e.g.,\n savgol_coeffs(5, polyorder=1, deriv=2)\n raised an error.\n \"\"\"\n coeffs = savgol_coeffs(5, polyorder=1, deriv=2)\n assert_array_equal(coeffs, np.zeros(5))\n coeffs = savgol_coeffs(7, polyorder=4, deriv=6)\n assert_array_equal(coeffs, np.zeros(7))\n\n\ndef test_sg_coeffs_large():\n # Test that for large values of window_length and polyorder the array of\n # coefficients returned is symmetric. The aim is to ensure that\n # no potential numeric overflow occurs.\n coeffs0 = savgol_coeffs(31, 9)\n assert_array_almost_equal(coeffs0, coeffs0[::-1])\n coeffs1 = savgol_coeffs(31, 9, deriv=1)\n assert_array_almost_equal(coeffs1, -coeffs1[::-1])\n\n# --------------------------------------------------------------------\n# savgol_coeffs tests for even window length\n# --------------------------------------------------------------------\n\n\ndef test_sg_coeffs_even_window_length():\n # Simple case - deriv=0, polyorder=0, 1\n window_lengths = [4, 6, 8, 10, 12, 14, 16]\n for length in window_lengths:\n h_p_d = savgol_coeffs(length, 0, 0)\n assert_allclose(h_p_d, 1/length)\n\n # Verify with closed forms\n # deriv=1, polyorder=1, 2\n def h_p_d_closed_form_1(k, m):\n return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))\n\n # deriv=2, polyorder=2\n def h_p_d_closed_form_2(k, m):\n numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)\n denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)\n return numer/denom\n\n for length in window_lengths:\n m = length//2\n expected_output = [h_p_d_closed_form_1(k, m)\n for k in range(-m + 1, m + 1)][::-1]\n actual_output = savgol_coeffs(length, 1, 1)\n assert_allclose(expected_output, actual_output)\n actual_output = savgol_coeffs(length, 2, 1)\n assert_allclose(expected_output, actual_output)\n\n expected_output = [h_p_d_closed_form_2(k, m)\n for k in range(-m + 1, m + 1)][::-1]\n actual_output = savgol_coeffs(length, 2, 2)\n assert_allclose(expected_output, actual_output)\n actual_output = savgol_coeffs(length, 3, 2)\n assert_allclose(expected_output, actual_output)\n\n#--------------------------------------------------------------------\n# savgol_filter tests\n#--------------------------------------------------------------------\n\n\ndef test_sg_filter_trivial():\n \"\"\" Test some trivial edge cases for savgol_filter().\"\"\"\n x = np.array([1.0])\n y = savgol_filter(x, 1, 0)\n assert_equal(y, [1.0])\n\n # Input is a single value. With a window length of 3 and polyorder 1,\n # the value in y is from the straight-line fit of (-1,0), (0,3) and\n # (1, 0) at 0. This is just the average of the three values, hence 1.0.\n x = np.array([3.0])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_almost_equal(y, [1.0], decimal=15)\n\n x = np.array([3.0])\n y = savgol_filter(x, 3, 1, mode='nearest')\n assert_almost_equal(y, [3.0], decimal=15)\n\n x = np.array([1.0] * 3)\n y = savgol_filter(x, 3, 1, mode='wrap')\n assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)\n\n\ndef test_sg_filter_basic():\n # Some basic test cases for savgol_filter().\n x = np.array([1.0, 2.0, 1.0])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_allclose(y, [1.0, 4.0 / 3, 1.0])\n\n y = savgol_filter(x, 3, 1, mode='mirror')\n assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])\n\n y = savgol_filter(x, 3, 1, mode='wrap')\n assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])\n\n\ndef test_sg_filter_2d():\n x = np.array([[1.0, 2.0, 1.0],\n [2.0, 4.0, 2.0]])\n expected = np.array([[1.0, 4.0 / 3, 1.0],\n [2.0, 8.0 / 3, 2.0]])\n y = savgol_filter(x, 3, 1, mode='constant')\n assert_allclose(y, expected)\n\n y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)\n assert_allclose(y, expected.T)\n\n\ndef test_sg_filter_interp_edges():\n # Another test with low degree polynomial data, for which we can easily\n # give the exact results. In this test, we use mode='interp', so\n # savgol_filter should match the exact solution for the entire data set,\n # including the edges.\n t = np.linspace(-5, 5, 21)\n delta = t[1] - t[0]\n # Polynomial test data.\n x = np.array([t,\n 3 * t ** 2,\n t ** 3 - t])\n dx = np.array([np.ones_like(t),\n 6 * t,\n 3 * t ** 2 - 1.0])\n d2x = np.array([np.zeros_like(t),\n np.full_like(t, 6),\n 6 * t])\n\n window_length = 7\n\n y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')\n assert_allclose(y, x, atol=1e-12)\n\n y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',\n deriv=1, delta=delta)\n assert_allclose(y1, dx, atol=1e-12)\n\n y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',\n deriv=2, delta=delta)\n assert_allclose(y2, d2x, atol=1e-12)\n\n # Transpose everything, and test again with axis=0.\n\n x = x.T\n dx = dx.T\n d2x = d2x.T\n\n y = savgol_filter(x, window_length, 3, axis=0, mode='interp')\n assert_allclose(y, x, atol=1e-12)\n\n y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',\n deriv=1, delta=delta)\n assert_allclose(y1, dx, atol=1e-12)\n\n y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',\n deriv=2, delta=delta)\n assert_allclose(y2, d2x, atol=1e-12)\n\n\ndef test_sg_filter_interp_edges_3d():\n # Test mode='interp' with a 3-D array.\n t = np.linspace(-5, 5, 21)\n delta = t[1] - t[0]\n x1 = np.array([t, -t])\n x2 = np.array([t ** 2, 3 * t ** 2 + 5])\n x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])\n dx1 = np.array([np.ones_like(t), -np.ones_like(t)])\n dx2 = np.array([2 * t, 6 * t])\n dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])\n\n # z has shape (3, 2, 21)\n z = np.array([x1, x2, x3])\n dz = np.array([dx1, dx2, dx3])\n\n y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n\n # z has shape (3, 21, 2)\n z = np.array([x1.T, x2.T, x3.T])\n dz = np.array([dx1.T, dx2.T, dx3.T])\n\n y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n\n # z has shape (21, 3, 2)\n z = z.swapaxes(0, 1).copy()\n dz = dz.swapaxes(0, 1).copy()\n\n y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)\n assert_allclose(y, z, atol=1e-10)\n\n dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)\n assert_allclose(dy, dz, atol=1e-10)\n"
] | [
[
"scipy.signal.savgol_filter",
"numpy.full_like",
"numpy.testing.assert_almost_equal",
"numpy.zeros_like",
"scipy.signal.savgol_coeffs",
"numpy.zeros",
"scipy.ndimage.convolve1d",
"numpy.testing.assert_equal",
"numpy.ones_like",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"scipy.signal._savitzky_golay._polyder",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.polyfit",
"numpy.linspace"
]
] |
DelphianCalamity/PrivateKube | [
"14f575e77021ab7baca30f4061140ec83bdc96a7"
] | [
"evaluation/macrobenchmark/workload/models/classification.py"
] | [
"import sys, os, shutil\nimport h5py\nimport time\nimport io\nimport random\nimport tempfile\nfrom tqdm import tqdm\nfrom absl import app, flags, logging\nfrom ray.util.multiprocessing import Pool\nimport gcsfs\nimport numpy as np\nfrom pathlib import Path\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\n\n\nimport torchtext\nimport torch\n\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.nn as nn\nfrom transformers import BertTokenizer, BertModel, BertForSequenceClassification\nimport opacus\n\nfrom privatekube.experiments.datasets import (\n EventLevelDataset,\n split_review_batch,\n UserTimeLevelDataset,\n select_blocks_by_timeframe,\n)\nfrom privatekube.experiments.utils import (\n build_flags,\n flags_to_dict,\n load_yaml,\n results_to_dict,\n save_yaml,\n save_model,\n binary_accuracy,\n multiclass_accuracy,\n epoch_time,\n)\nfrom privatekube.privacy.text import build_public_vocab\nfrom privatekube.privacy.rdp import (\n compute_noise_from_target_epsilon,\n ALPHAS,\n compute_rdp_sgm,\n)\n\nimport models\n\n\nDEFAULT_DATA_PATH = Path(__file__).resolve().parent.parent.parent.joinpath(\"data\")\n\n# Define default args\ndataset_args = {\n \"n_blocks\": 200,\n \"max_text_len\": 140,\n \"vocab_size\": 10_000,\n \"n_blocks_test\": 200,\n}\n\ninput_path_args = {\n \"dataset_dir\": \"\",\n \"dataset_monofile\": \"\",\n \"block_counts\": str(DEFAULT_DATA_PATH.joinpath(\"block_counts.yaml\")),\n \"emb_path\": str(DEFAULT_DATA_PATH.joinpath(\".vector_cache\")),\n}\n\nmodel_args = {\n \"task\": \"product\",\n \"model\": \"bow\",\n \"embedding_dim\": 100,\n \"hidden_dim_1\": 240,\n \"hidden_dim_2\": 195,\n \"hidden_dim\": 100,\n \"dropout\": 0.25,\n}\n\ntraining_args = {\n \"device\": \"cuda\",\n \"learning_rate\": 0.01,\n \"dp\": 0,\n \"dp_eval\": 0,\n \"user_level\": 0,\n \"epsilon\": 5.0,\n \"delta\": 1e-5,\n \"n_epochs\": 15,\n \"batch_size\": 64,\n \"virtual_batch_multiplier\": 2,\n \"adaptive_batch_size\": 1,\n \"noise\": -1.0,\n \"timeframe_days\": 0,\n \"learning_rate_scheduler\": 1,\n \"dynamic_clipping\": 0,\n \"max_grad_norm\": 1.0,\n \"per_layer_clipping\": 0,\n \"n_workers\": 6,\n \"non_dp_batch_size\": 256,\n}\n\noutput_args = {\n \"log_path\": \"\",\n \"model_path\": \"\",\n \"metrics_path\": \"\",\n}\n\nbuild_flags(dataset_args, model_args, training_args, input_path_args, output_args)\nFLAGS = flags.FLAGS\n\n\nnp.random.seed(0)\n\n\ndef build_split_dataset():\n\n block_dir = tempfile.mkdtemp()\n test_block_dir = tempfile.mkdtemp()\n\n if FLAGS.dataset_dir[0:5] == \"gs://\":\n os.system(\n \"gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS\"\n )\n fs = gcsfs.GCSFileSystem(\n project=os.get_env(\"GCP_PROJECT\"), token=\"google_default\"\n ) # Get the local Gcloud token\n logging.info(\"Listing bucket files.\")\n all_blocks = list(\n map(\n lambda blob: os.path.basename(blob[\"name\"]),\n fs.listdir(FLAGS.dataset_dir),\n )\n )\n logging.info(f\"Got {len(all_blocks)} blocks.\")\n logging.warning(f\"The evaluation set is not fixed.\")\n elif FLAGS.dataset_dir == \"\":\n logging.info(\"Listing the block names.\")\n all_blocks = list(load_yaml(FLAGS.block_counts).keys())\n else:\n all_blocks = os.listdir(FLAGS.dataset_dir)\n\n logging.info(f\"Selecting {FLAGS.n_blocks_test} test blocks (fixed randomness).\")\n test_blocks = np.random.choice(all_blocks, FLAGS.n_blocks_test, replace=False)\n\n for tb in test_blocks:\n all_blocks.remove(tb)\n\n # Use every user to the maximum.\n def sort_by_user(block_name):\n if block_name.endswith(\".h5\"):\n block_name = block_name[: -len(\".h5\")]\n name = block_name.split(\"-\")\n user_slice = int(name[1])\n return user_slice\n\n logging.info(\n f\"Selecting as few users as possible.\\n Pseudorandom and deterministic (hashed user ids).\"\n )\n selected_blocks = sorted(all_blocks, key=sort_by_user)[0 : FLAGS.n_blocks]\n\n if FLAGS.dataset_dir[0:5] == \"gs://\":\n pool = Pool()\n\n bucket_path = FLAGS.dataset_dir\n\n def download_datasource(block_name):\n block_path = os.path.join(bucket_path, block_name)\n dest = os.path.join(block_dir, block_name)\n os.system(f\"gsutil cp {block_path} {dest}\")\n return\n\n logging.warning(\"Downloading the blocks in parallel.\")\n b = pool.map(download_datasource, selected_blocks)\n pool.close()\n pool.join()\n block_names = None\n test_block_names = None\n elif FLAGS.dataset_dir == \"\":\n block_dir = None\n test_block_dir = None\n block_names = selected_blocks\n test_block_names = test_blocks\n\n else:\n for b in selected_blocks:\n os.symlink(os.path.join(FLAGS.dataset_dir, b), os.path.join(block_dir, b))\n for b in test_blocks:\n os.symlink(\n os.path.join(FLAGS.dataset_dir, b), os.path.join(test_block_dir, b)\n )\n block_names = None\n test_block_names = None\n\n # Store for the logs\n FLAGS.dataset_dir = block_dir\n if not FLAGS.dataset_monofile:\n if FLAGS.model == \"bert\":\n from_h5 = DEFAULT_DATA_PATH.joinpath(\"reviews.h5\")\n else:\n from_h5 = DEFAULT_DATA_PATH.joinpath(\"reviews_custom_vocab.h5\")\n else:\n from_h5 = FLAGS.dataset_monofile\n\n if FLAGS.dp and FLAGS.user_level:\n train_data = UserTimeLevelDataset(\n blocks_dir=block_dir,\n timeframe=FLAGS.timeframe_days * 86400,\n from_h5=from_h5,\n block_names=block_names,\n )\n else:\n train_data = EventLevelDataset(\n blocks_dir=block_dir,\n from_h5=from_h5,\n block_names=block_names,\n )\n\n test_data = EventLevelDataset(\n blocks_dir=test_block_dir,\n from_h5=from_h5,\n block_names=test_block_names,\n )\n test_data, valid_data = test_data.split([0.75, 0.25])\n logging.info(f\"Test size: {len(test_data)}\\n Valid size: {len(valid_data)}\")\n\n # Values from the preprocessing\n # (max text len doesn't matter here)\n text_field = torchtext.data.Field(\n batch_first=True,\n use_vocab=True,\n init_token=\"<bos>\",\n eos_token=\"<eos>\",\n pad_token=\"<pad>\",\n unk_token=\"<unk>\",\n include_lengths=True,\n )\n build_public_vocab(\n text_field,\n max_size=FLAGS.vocab_size - 4,\n vectors=f\"glove.6B.{FLAGS.embedding_dim}d\",\n unk_init=torch.Tensor.normal_,\n vectors_cache=FLAGS.emb_path,\n )\n\n return train_data, test_data, valid_data, text_field\n\n\ndef compute_optimal_batch_size(real_batch_size, dataset_len):\n logging.info(\n f\"Computing the optimal batch size. Dataset {dataset_len}, real batch {real_batch_size}\"\n )\n # Under approximate\n optimal_batch_size = int(np.sqrt(dataset_len))\n if optimal_batch_size <= real_batch_size:\n return optimal_batch_size, 0\n else:\n return (real_batch_size, optimal_batch_size // real_batch_size)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef build_model(text_field):\n INPUT_DIM = len(text_field.vocab)\n word_embeddings = text_field.vocab.vectors\n PAD_IDX = text_field.vocab.stoi[text_field.pad_token]\n UNK_IDX = text_field.vocab.stoi[text_field.unk_token]\n\n if FLAGS.task == \"sentiment\":\n output_dim = 1\n elif FLAGS.task == \"product\":\n output_dim = 11\n\n if FLAGS.model == \"lstm\":\n model = models.LSTMClassifier(\n batch_size=FLAGS.batch_size,\n output_size=output_dim,\n hidden_size=FLAGS.hidden_dim,\n vocab_size=INPUT_DIM,\n embedding_length=FLAGS.embedding_dim,\n weights=word_embeddings,\n dropout=FLAGS.dropout,\n dp=FLAGS.dp,\n )\n elif FLAGS.model == \"bow\":\n model = models.NBOW(\n input_dim=word_embeddings.shape[0],\n emb_dim=FLAGS.embedding_dim,\n output_dim=output_dim,\n pad_idx=PAD_IDX,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"feedforward\":\n model = models.FeedforwardModel(\n vocab_size=INPUT_DIM,\n embedding_dim=FLAGS.embedding_dim,\n pad_idx=PAD_IDX,\n H_1=FLAGS.hidden_dim_1,\n H_2=FLAGS.hidden_dim_2,\n D_out=output_dim,\n word_embeddings=word_embeddings,\n )\n elif FLAGS.model == \"bert\":\n # The dataset has been preprocessed with the bert tokenizer, so the indices should be correct\n logging.info(f\"Pad and unk index {PAD_IDX, UNK_IDX}\")\n model = models.FineTunedBert.build_new(output_dim=output_dim)\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n # Bert has its own pretrained embeddings\n return model\n\n pretrained_embeddings = text_field.vocab.vectors\n\n model.embedding.weight.data.copy_(pretrained_embeddings)\n model.embedding.weight.data[UNK_IDX] = torch.zeros(FLAGS.embedding_dim)\n model.embedding.weight.data[PAD_IDX] = torch.zeros(FLAGS.embedding_dim)\n\n logging.info(\n f\"Model {FLAGS.model} has {count_parameters(model)} trainable parameters.\"\n )\n\n return model\n\n\ndef train(model, iterator, optimizer, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.train()\n optimizer.zero_grad()\n\n for i, batch in enumerate(tqdm(iterator)):\n\n # batch = batch.to(FLAGS.device)\n\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n # logging.info(f\"Inputs {inputs}\")\n # The model outputs loss, logits\n outputs = model(**inputs)[1]\n # logging.info(f\"Outputs {outputs}\")\n else:\n outputs = model(text)\n # logging.info(f\"Outputs {outputs}\")\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs.detach(), label)\n\n loss.backward()\n\n if FLAGS.dp and FLAGS.virtual_batch_multiplier > 1:\n # NOTE: step is not called at every minibatch, so the RDP accountant need to know this\n\n if (i + 1) % FLAGS.virtual_batch_multiplier == 0 or (i + 1) == len(\n iterator\n ):\n # For the (virtual_batch_multiplier)th batch, call a clip-noise-step\n optimizer.step()\n optimizer.zero_grad()\n else:\n # For the first (virtual_batch_multiplier - 1) batches, just accumulate the gradients\n optimizer.virtual_step()\n else:\n # Regular optimizer step (either non-DP or DP with no virtual step)\n optimizer.step()\n optimizer.zero_grad()\n\n epoch_loss += loss.item()\n # epoch_loss += loss.detach().item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef evaluate(model, iterator, criterion, accuracy_fn):\n\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n with torch.no_grad():\n\n for batch in iterator:\n\n # batch = batch.to(FLAGS.device)\n if FLAGS.task == \"sentiment\":\n data, label = split_review_batch(\n batch,\n label_feature=\"binary_rating\",\n max_text_len=FLAGS.max_text_len,\n include_len=True,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n text_lengths, text = data\n elif FLAGS.task == \"product\":\n text, label = split_review_batch(\n batch,\n label_feature=\"category\",\n max_text_len=FLAGS.max_text_len,\n vocab_size=FLAGS.vocab_size,\n custom_vocab=(FLAGS.model != \"bert\"),\n )\n\n text = text.to(device=FLAGS.device, dtype=torch.long)\n label = (\n label.to(device=FLAGS.device, dtype=torch.long)\n if FLAGS.task == \"product\"\n else label.to(device=FLAGS.device, dtype=torch.float)\n )\n\n if FLAGS.model == \"lstm\":\n hidden = model.init_hidden(batch_size=len(batch))\n if isinstance(hidden, tuple):\n hidden = (\n hidden[0].to(FLAGS.device),\n hidden[1].to(FLAGS.device),\n )\n else:\n hidden = hidden.to(FLAGS.device)\n outputs = model(text, hidden)\n elif FLAGS.model == \"bert\":\n PAD_IDX = 0\n inputs = {\n \"input_ids\": text,\n \"labels\": label,\n \"attention_mask\": torch.where(\n text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)\n ),\n }\n outputs = model(**inputs)[1]\n else:\n outputs = model(text)\n if FLAGS.task == \"sentiment\":\n outputs = outputs.squeeze(1)\n\n # print(f\"Training. Outputs: {outputs}, labels: {batch.label}\")\n loss = criterion(outputs, label)\n acc = accuracy_fn(outputs, label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\ndef train_validate(\n train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler\n):\n validation_accuracy_epochs = []\n validation_loss_epochs = []\n training_loss_epochs = []\n training_accuracy_epochs = []\n\n logging.info(f\"n workers: {FLAGS.n_workers}\")\n train_iterator = torch.utils.data.DataLoader(\n train_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=True,\n )\n\n valid_iterator = torch.utils.data.DataLoader(\n valid_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n\n criterion = criterion.to(FLAGS.device)\n\n best_valid_loss = float(\"inf\")\n\n for epoch in range(FLAGS.n_epochs):\n\n start_time = time.time()\n logging.info(f\"Starting epoch {epoch + 1}.\")\n train_loss, train_acc = train(\n model, train_iterator, optimizer, criterion, accuracy_fn\n )\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, accuracy_fn)\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), \"tut2-model.pt\")\n\n logging.info(f\"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\")\n logging.info(\n f\"\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%\"\n )\n scheduler.step(train_loss)\n logging.info(\n f\"\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%\"\n )\n\n validation_accuracy_epochs.append(valid_acc)\n validation_loss_epochs.append(valid_loss)\n training_loss_epochs.append(train_loss)\n training_accuracy_epochs.append(train_acc)\n\n return (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n )\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main(argv):\n start_time = time.time()\n\n # Convert flags for the epsilon = -1 shortcut\n if FLAGS.dp and FLAGS.epsilon < 0 and FLAGS.noise < 0:\n FLAGS.dp = False\n\n # No multiprocessing for large datasets (save RAM)\n if FLAGS.n_blocks > 50_000:\n logging.info(f\"Large dataset, we use a single thread for the loader.\")\n FLAGS.n_workers = 0\n\n # Build the dataset, either event level or user level\n train_data, test_data, valid_data, text_field = build_split_dataset()\n logging.info(\n f\"Number of samples for training: {len(train_data)}, validation: {len(valid_data)} and testing: {len(test_data)}\"\n )\n\n # Adapt the batch size and the virtual step size, unless it has been specified manually\n if FLAGS.dp and FLAGS.adaptive_batch_size and FLAGS.virtual_batch_multiplier <= 0:\n FLAGS.batch_size, FLAGS.virtual_batch_multiplier = compute_optimal_batch_size(\n FLAGS.batch_size, len(train_data)\n )\n logging.info(\n f\"Using real batch {FLAGS.batch_size} with multiplier {FLAGS.virtual_batch_multiplier}\"\n )\n if not FLAGS.dp:\n FLAGS.batch_size = FLAGS.non_dp_batch_size\n\n # Prepare the model and optimizer\n model = build_model(text_field).to(FLAGS.device)\n\n logging.info(f\"Number of trainable parameters: {count_parameters(model)}\")\n\n # optimizer = optim.Adam(model.parameters())\n optimizer = optim.AdamW(model.parameters(), lr=FLAGS.learning_rate, eps=1e-8)\n\n scheduler = ReduceLROnPlateau(optimizer, mode=\"min\", patience=3)\n\n # train_it = torch.utils.data.DataLoader(\n # train_data,\n # batch_size=2048,\n # shuffle=False,\n # num_workers=FLAGS.n_workers,\n # drop_last=False,\n # )\n # counts = {}\n # for i in range(11):\n # counts[i] = 0\n # for b in train_it:\n # for cat in b[:, 3]:\n # counts[int(cat)] += 1\n # s = sum(counts.values())\n # for cat, count in counts.items():\n # counts[cat] = count / s\n # logging.info(counts)\n\n if FLAGS.task == \"sentiment\":\n criterion = nn.BCEWithLogitsLoss().to(FLAGS.device)\n accuracy_fn = binary_accuracy\n\n # automotive: 0.03036145803296712\n # books: 0.41258122723567553\n # cds: 0.012897189083383703\n # clothing: 0.2025265712144095\n # games: 0.031613111956201506\n # groceries: 0.01949595483554337\n # home: 0.119920985593197\n # movies: 0.0484712255807162\n # pets: 0.03665525816121956\n # sports: 0.04961580907019007\n # tools: 0.035861209236496445\n\n elif FLAGS.task == \"product\":\n # criterion = nn.CrossEntropyLoss(\n # weight=torch.Tensor(\n # [0.05, 0.035, 0.03, 0.035, 0.05, 0.02, 0.12, 0.01, 0.03, 0.20, 0.41]\n # )\n # )\n criterion = nn.CrossEntropyLoss()\n accuracy_fn = multiclass_accuracy\n\n # Plug Opacus if DP training is activated\n if FLAGS.dp:\n if FLAGS.noise >= 0:\n logging.info(f\"User-provided noise: {FLAGS.noise}.\")\n else:\n logging.info(\"Computing noise for the given parameters.\")\n FLAGS.noise = compute_noise_from_target_epsilon(\n target_epsilon=FLAGS.epsilon,\n target_delta=FLAGS.delta,\n epochs=FLAGS.n_epochs,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n dataset_size=len(train_data),\n alphas=ALPHAS,\n )\n logging.info(f\"Noise computed from RDP budget: {FLAGS.noise}.\")\n\n # NOTE: when user-level DP is activated, the training dataset __len__ method returns\n # the number of users, and the DataLoader calls the batch-of-user method that overrides\n # the regular __getitem__ method\n\n # WARNING: fishy non-DP adaptive clipping\n privacy_engine = opacus.PrivacyEngine(\n module=model,\n batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n if FLAGS.virtual_batch_multiplier > 0\n else FLAGS.batch_size,\n sample_size=len(train_data),\n alphas=ALPHAS,\n noise_multiplier=FLAGS.noise,\n max_grad_norm=FLAGS.max_grad_norm,\n experimental=bool(FLAGS.dynamic_clipping),\n clipping_method=FLAGS.dynamic_clipping,\n clip_per_layer=bool(FLAGS.per_layer_clipping),\n )\n privacy_engine.attach(optimizer)\n\n # Do the actual training\n t = time.time()\n (\n training_loss_epochs,\n training_accuracy_epochs,\n validation_loss_epochs,\n validation_accuracy_epochs,\n ) = train_validate(\n train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler\n )\n training_time = time.time() - t\n\n if FLAGS.dp:\n epsilon_consumed, best_alpha = optimizer.privacy_engine.get_privacy_spent(\n FLAGS.delta\n )\n epsilon_consumed = float(epsilon_consumed)\n best_alpha = float(best_alpha)\n logging.info(f\"Best alpha: {best_alpha}\")\n rdp_epsilons_consumed = (\n optimizer.privacy_engine.get_renyi_divergence()\n * optimizer.privacy_engine.steps\n ).tolist()\n\n logging.info(f\"RDP budget consumed: {rdp_epsilons_consumed} for orders.\")\n\n # Identical to planned budget when we don't have early stopping\n # rdp_epsilon_planned = compute_rdp_sgm(\n # epochs=FLAGS.n_epochs,\n # batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier\n # if FLAGS.virtual_batch_multiplier > 0\n # else FLAGS.batch_size,\n # dataset_size=len(train_data),\n # noise=FLAGS.noise,\n # alphas=ALPHAS,\n # )\n # logging.info(f\"Planned RDP budget: {rdp_epsilon_planned}\")\n else:\n epsilon_consumed = None\n rdp_epsilons_consumed = None\n best_alpha = None\n\n # Evaluate the model (non-DP evaluation here)\n testing_size = len(test_data)\n test_iterator = torch.utils.data.DataLoader(\n test_data,\n batch_size=FLAGS.batch_size,\n shuffle=True,\n num_workers=FLAGS.n_workers,\n drop_last=False,\n )\n final_loss, final_accuracy = evaluate(model, test_iterator, criterion, accuracy_fn)\n\n # Collect the metrics and the logs\n logs = {\n \"training_time\": training_time,\n \"total_time\": time.time() - start_time,\n \"test_size\": testing_size,\n \"n_trainable_parameters\": count_parameters(model),\n }\n\n # Update the logs with the training data\n if isinstance(train_data, UserTimeLevelDataset):\n logs[\"train_size\"] = train_data.get_n_events()\n logs[\"n_train_users\"] = len(train_data)\n else:\n logs[\"train_size\"] = len(train_data)\n\n logs.update(\n flags_to_dict(dataset_args, model_args, training_args)\n ) # Dump the configuration flags\n metrics = {\n \"accuracy\": final_accuracy,\n \"training_loss_epochs\": training_loss_epochs,\n \"training_accuracy_epochs\": training_accuracy_epochs,\n \"validation_loss_epochs\": validation_loss_epochs,\n \"validation_accuracy_epochs\": validation_accuracy_epochs,\n \"loss\": final_loss,\n \"epsilon\": epsilon_consumed,\n \"target_epsilon\": FLAGS.epsilon,\n \"alphas\": ALPHAS,\n \"rdp_epsilons\": rdp_epsilons_consumed,\n \"best_alpha\": best_alpha,\n # \"dataset_files\": os.listdir(FLAGS.dataset_dir),\n }\n\n # Save or logging.info the outputs\n # Useless to separate for our experiments\n if FLAGS.metrics_path != \"\":\n save_yaml(FLAGS.metrics_path, metrics)\n logging.info(f\"Saved metrics: {FLAGS.metrics_path}\")\n else:\n logging.info(\"Metrics not saved but concatenated to the logs.\")\n logs.update(metrics)\n\n if FLAGS.log_path != \"\":\n save_yaml(FLAGS.log_path, logs)\n logging.info(f\"Saved logs: {FLAGS.log_path}\")\n\n if FLAGS.model_path != \"\":\n save_model(FLAGS.model_path, model)\n logging.info(f\"Saved model: {FLAGS.model_path}\")\n\n logging.info(logs)\n logging.info(metrics)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"numpy.sqrt",
"torch.utils.data.DataLoader",
"torch.ones_like",
"torch.zeros_like",
"torch.no_grad",
"numpy.random.seed",
"numpy.random.choice",
"torch.nn.CrossEntropyLoss",
"torch.nn.BCEWithLogitsLoss",
"torch.zeros",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
]
] |
LEGOS-CTOH/xarray | [
"d543d09aaa7fdfc4f5f92edcd4e3c0af1207c95b"
] | [
"xarray/core/nanops.py"
] | [
"import numpy as np\n\nfrom . import dtypes, nputils, utils\nfrom .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method\nfrom .pycompat import dask_array_type\n\ntry:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None\n\n\ndef _replace_nan(a, val):\n \"\"\"\n replace nan in a by val, and returns the replaced array and the nan\n position\n \"\"\"\n mask = isnull(a)\n return where_method(val, mask, a), mask\n\n\ndef _maybe_null_out(result, axis, mask, min_count=1):\n \"\"\"\n xarray version of pandas.core.nanops._maybe_null_out\n \"\"\"\n if hasattr(axis, \"__len__\"): # if tuple or list\n raise ValueError(\n \"min_count is not available for reduction with more than one dimensions.\"\n )\n\n if axis is not None and getattr(result, \"ndim\", False):\n null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0\n if null_mask.any():\n dtype, fill_value = dtypes.maybe_promote(result.dtype)\n result = result.astype(dtype)\n result[null_mask] = fill_value\n\n elif getattr(result, \"dtype\", None) not in dtypes.NAT_TYPES:\n null_mask = mask.size - mask.sum()\n if null_mask < min_count:\n result = np.nan\n\n return result\n\n\ndef _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanargmin, nanargmax for object arrays. Always return integer\n type\n \"\"\"\n valid_count = count(value, axis=axis)\n value = fillna(value, fill_value)\n data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)\n\n # TODO This will evaluate dask arrays and might be costly.\n if (valid_count == 0).any():\n raise ValueError(\"All-NaN slice encountered\")\n\n return data\n\n\ndef _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanmin and nanmax for object array \"\"\"\n valid_count = count(value, axis=axis)\n filled_value = fillna(value, fill_value)\n data = getattr(np, func)(filled_value, axis=axis, **kwargs)\n if not hasattr(data, \"dtype\"): # scalar case\n data = fill_value if valid_count == 0 else data\n # we've computed a single min, max value of type object.\n # don't let np.array turn a tuple back into an array\n return utils.to_0d_object_array(data)\n return where_method(data, valid_count != 0)\n\n\ndef nanmin(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"min\", dtypes.get_pos_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmin(a, axis=axis)\n\n\ndef nanmax(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"max\", dtypes.get_neg_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmax(a, axis=axis)\n\n\ndef nanargmin(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_pos_infinity(a.dtype)\n return _nan_argminmax_object(\"argmin\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmin(a, axis=axis)\n\n\ndef nanargmax(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_neg_infinity(a.dtype)\n return _nan_argminmax_object(\"argmax\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmax(a, axis=axis)\n\n\ndef nansum(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 0)\n result = _dask_or_eager_func(\"sum\")(a, axis=axis, dtype=dtype)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):\n \"\"\" In house nanmean. ddof argument will be used in _nanvar method \"\"\"\n from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method\n\n valid_count = count(value, axis=axis)\n value = fillna(value, 0)\n # As dtype inference is impossible for object dtype, we assume float\n # https://github.com/dask/dask/issues/3162\n if dtype is None and value.dtype.kind == \"O\":\n dtype = value.dtype if value.dtype.kind in [\"cf\"] else float\n\n data = _dask_or_eager_func(\"sum\")(value, axis=axis, dtype=dtype, **kwargs)\n data = data / (valid_count - ddof)\n return where_method(data, valid_count != 0)\n\n\ndef nanmean(a, axis=None, dtype=None, out=None):\n if a.dtype.kind == \"O\":\n return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)\n\n if isinstance(a, dask_array_type):\n return dask_array.nanmean(a, axis=axis, dtype=dtype)\n\n return np.nanmean(a, axis=axis, dtype=dtype)\n\n\ndef nanmedian(a, axis=None, out=None):\n return _dask_or_eager_func(\"nanmedian\", eager_module=nputils)(a, axis=axis)\n\n\ndef _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):\n value_mean = _nanmean_ddof_object(\n ddof=0, value=value, axis=axis, keepdims=True, **kwargs\n )\n squared = (value.astype(value_mean.dtype) - value_mean) ** 2\n return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)\n\n\ndef nanvar(a, axis=None, dtype=None, out=None, ddof=0):\n if a.dtype.kind == \"O\":\n return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)\n\n return _dask_or_eager_func(\"nanvar\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanstd(a, axis=None, dtype=None, out=None, ddof=0):\n return _dask_or_eager_func(\"nanstd\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanprod(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 1)\n result = _dask_or_eager_func(\"nanprod\")(a, axis=axis, dtype=dtype, out=out)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef nancumsum(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumsum\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n\n\ndef nancumprod(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumprod\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n"
] | [
[
"numpy.nanmean"
]
] |
thilinicooray/mac-network-pytorch | [
"0e4bf3f7f301570b652490f697758361c866f3c1"
] | [
"main_verbq_working.py"
] | [
"import torch\n#from imsitu_encoder_verbq import imsitu_encoder\nfrom imsitu_encoder_roleqverbq_embdhz import imsitu_encoder\nfrom imsitu_loader import imsitu_loader_roleq_updated\nfrom imsitu_scorer_log import imsitu_scorer\nimport json\nimport model_verbq_working\nimport os\nimport utils\nimport time\nimport random\n#from torchviz import make_dot\n#from graphviz import Digraph\n\n\ndef train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, lr_max, model_name, args,eval_frequency=4):\n model.train()\n train_loss = 0\n total_steps = 0\n print_freq = 400\n dev_score_list = []\n time_all = time.time()\n\n if model.gpu_mode >= 0 :\n ngpus = 2\n device_array = [i for i in range(0,ngpus)]\n\n pmodel = torch.nn.DataParallel(model, device_ids=device_array)\n else:\n pmodel = model\n #pmodel = model\n\n '''if scheduler.get_lr()[0] < lr_max:\n scheduler.step()'''\n\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n '''print('init param data check :')\n for f in model.parameters():\n if f.requires_grad:\n print(f.data.size())'''\n\n\n for epoch in range(max_epoch):\n\n #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())\n #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1\n mx = len(train_loader)\n for i, (id, img, verb, labels) in enumerate(train_loader):\n #print(\"epoch{}-{}/{} batches\\r\".format(epoch,i+1,mx)) ,\n t0 = time.time()\n t1 = time.time()\n total_steps += 1\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n labels = torch.autograd.Variable(labels.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n labels = torch.autograd.Variable(labels)\n\n\n\n '''print('all inputs')\n print(img)\n print('=========================================================================')\n print(verb)\n print('=========================================================================')\n print(roles)\n print('=========================================================================')\n print(labels)'''\n\n verb_predict, loss = pmodel(img, verb, labels)\n #verb_predict, rol1pred, role_predict = pmodel.forward_eval5(img)\n #print (\"forward time = {}\".format(time.time() - t1))\n t1 = time.time()\n\n '''g = make_dot(verb_predict, model.state_dict())\n g.view()'''\n\n #loss = model.calculate_loss(verb_predict, verb)\n #loss = model.calculate_eval_loss_new(verb_predict, verb, rol1pred, labels, args)\n #loss = loss_ * random.random() #try random loss\n #print (\"loss time = {}\".format(time.time() - t1))\n t1 = time.time()\n #print('current loss = ', loss)\n if gpu_mode >= 0 :\n #loss.backward(torch.ones([2,1]).to(torch.device('cuda')))\n loss.mean().backward()\n else:\n loss.backward()\n #loss.backward()\n #print (\"backward time = {}\".format(time.time() - t1))\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)\n\n\n '''for param in filter(lambda p: p.requires_grad,model.parameters()):\n print(param.grad.data.sum())'''\n\n #start debugger\n #import pdb; pdb.set_trace()\n\n\n optimizer.step()\n\n '''print('grad check after:')\n for f in model.conv.parameters():\n print('data is')\n print(f.data [0][0])\n #print('grad is')\n #print(f.grad[0][0].item())\n break'''\n\n optimizer.zero_grad()\n\n\n\n train_loss += float(loss.mean())\n\n #top1.add_point_eval5(verb_predict, verb, role_predict, labels)\n #top5.add_point_eval5(verb_predict, verb, role_predict, labels)\n\n top1.add_point_verb_only_eval(id, verb_predict, verb)\n top5.add_point_verb_only_eval(id, verb_predict, verb)\n\n\n if total_steps % print_freq == 0:\n top1_a = top1.get_average_results()\n top5_a = top5.get_average_results()\n print (\"{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}\"\n .format(total_steps-1,epoch,i, utils.format_dict(top1_a, \"{:.2f}\", \"1-\"),\n utils.format_dict(top5_a,\"{:.2f}\",\"5-\"), loss.mean().item(),\n train_loss / ((total_steps-1)%eval_frequency) ))\n\n\n if total_steps % eval_frequency == 0:\n top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"]\n avg_score /= 8\n\n print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n #print('Dev loss :', val_loss)\n\n dev_score_list.append(avg_score)\n max_score = max(dev_score_list)\n\n if max_score == dev_score_list[-1]:\n torch.save(model.state_dict(), model_dir + \"/{}_verbq_iter0_change.model\".format( model_name))\n print ('New best model saved! {0}'.format(max_score))\n\n #eval on the trainset\n\n '''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)\n model.train()\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))'''\n\n print('current train loss', train_loss)\n train_loss = 0\n top1 = imsitu_scorer(encoder, 1, 3)\n top5 = imsitu_scorer(encoder, 5, 3)\n\n del verb_predict, loss, img, verb, labels\n #break\n print('Epoch ', epoch, ' completed!')\n scheduler.step()\n #break\n\ndef eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):\n model.eval()\n val_loss = 0\n\n print ('evaluating model...')\n top1 = imsitu_scorer(encoder, 1, 3, write_to_file)\n top5 = imsitu_scorer(encoder, 5, 3)\n with torch.no_grad():\n mx = len(dev_loader)\n for i, (img_id, img, verb, labels) in enumerate(dev_loader):\n #print(\"{}/{} batches\\r\".format(i+1,mx)) ,\n '''im_data = torch.squeeze(im_data,0)\n im_info = torch.squeeze(im_info,0)\n gt_boxes = torch.squeeze(gt_boxes,0)\n num_boxes = torch.squeeze(num_boxes,0)\n verb = torch.squeeze(verb,0)\n roles = torch.squeeze(roles,0)\n labels = torch.squeeze(labels,0)'''\n\n if gpu_mode >= 0:\n img = torch.autograd.Variable(img.cuda())\n verb = torch.autograd.Variable(verb.cuda())\n labels = torch.autograd.Variable(labels.cuda())\n else:\n img = torch.autograd.Variable(img)\n verb = torch.autograd.Variable(verb)\n labels = torch.autograd.Variable(labels)\n\n verb_predict, _= model(img, verb, labels)\n '''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)\n val_loss += loss.item()'''\n top1.add_point_verb_only_eval(img_id, verb_predict, verb)\n top5.add_point_verb_only_eval(img_id, verb_predict, verb)\n\n del img, verb, labels\n break\n\n #return top1, top5, val_loss/mx\n\n return top1, top5, 0\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser(description=\"imsitu VSRL. Training, evaluation and prediction.\")\n parser.add_argument(\"--gpuid\", default=-1, help=\"put GPU id > -1 in GPU mode\", type=int)\n #parser.add_argument(\"--command\", choices = [\"train\", \"eval\", \"resume\", 'predict'], required = True)\n parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')\n parser.add_argument('--resume_model', type=str, default='', help='The model we resume')\n parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module')\n parser.add_argument('--role_module', type=str, default='', help='pretrained role module')\n parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch')\n parser.add_argument('--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch')\n parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch')\n parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')\n parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')\n parser.add_argument('--test', action='store_true', help='Only use the testing mode')\n parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')\n parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')\n parser.add_argument('--frcnn_feat_dir', type=str, help='Location of output from detectron')\n #todo: train role module separately with gt verbs\n\n args = parser.parse_args()\n\n batch_size = 640\n #lr = 5e-6\n lr = 0.0001\n lr_max = 5e-4\n lr_gamma = 0.1\n lr_step = 15\n clip_norm = 0.5\n weight_decay = 1e-4\n n_epoch = 500\n n_worker = 3\n\n #dataset_folder = 'imSitu'\n #imgset_folder = 'resized_256'\n dataset_folder = args.dataset_folder\n imgset_folder = args.imgset_dir\n\n print('model spec :, top down att with role q ')\n\n train_set = json.load(open(dataset_folder + \"/updated_train_new.json\"))\n imsitu_roleq = json.load(open(\"imsitu_data/imsitu_questions_prev.json\"))\n verb_templates = json.load(open(\"imsitu_data/verb_questions_template_new.json\"))\n encoder = imsitu_encoder(train_set, imsitu_roleq, verb_templates)\n\n model = model_verbq_working.BaseModel(encoder, args.gpuid)\n\n # To group up the features\n #cnn_features, role_features = utils.group_features_noun(model)\n cnn_features, role_features = utils.group_features_noun(model)\n\n train_set = imsitu_loader_roleq_updated(imgset_folder, train_set, encoder, model.train_preprocess())\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=n_worker)\n\n dev_set = json.load(open(dataset_folder +\"/dev.json\"))\n dev_set = imsitu_loader_roleq_updated(imgset_folder, dev_set, encoder, model.dev_preprocess())\n dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=4, shuffle=True, num_workers=n_worker)\n\n test_set = json.load(open(dataset_folder +\"/test.json\"))\n test_set = imsitu_loader_roleq_updated(imgset_folder, test_set, encoder, model.dev_preprocess())\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker)\n\n traindev_set = json.load(open(dataset_folder +\"/dev.json\"))\n traindev_set = imsitu_loader_roleq_updated(imgset_folder, traindev_set, encoder, model.dev_preprocess())\n traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker)\n\n\n #utils.load_net(args.verb_module, [model.verb_module])\n #utils.load_net(args.role_module, [model.role_module])\n model_name = 'train_full'\n\n\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n torch.manual_seed(1234)\n if args.gpuid >= 0:\n #print('GPU enabled')\n model.cuda()\n torch.cuda.manual_seed(1234)\n torch.backends.cudnn.deterministic = True\n\n optimizer = torch.optim.Adamax([\n {'params': cnn_features, 'lr': 5e-5},\n {'params': role_features}\n ], lr=1e-3)\n\n #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma)\n #gradient clipping, grad check\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)\n\n if args.evaluate:\n top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Dev average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n #write results to csv file\n role_dict = top1.role_dict\n fail_val_all = top1.value_all_dict\n pass_val_dict = top1.vall_all_correct\n\n with open('role_pred_data.json', 'w') as fp:\n json.dump(role_dict, fp, indent=4)\n\n with open('fail_val_all.json', 'w') as fp:\n json.dump(fail_val_all, fp, indent=4)\n\n with open('pass_val_all.json', 'w') as fp:\n json.dump(pass_val_dict, fp, indent=4)\n\n print('Writing predictions to file completed !')\n\n elif args.test:\n top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)\n\n top1_avg = top1.get_average_results()\n top5_avg = top5.get_average_results()\n\n avg_score = top1_avg[\"verb\"] + top1_avg[\"value\"] + top1_avg[\"value-all\"] + top5_avg[\"verb\"] + \\\n top5_avg[\"value\"] + top5_avg[\"value-all\"] + top5_avg[\"value*\"] + top5_avg[\"value-all*\"]\n avg_score /= 8\n\n print ('Test average :{:.2f} {} {}'.format( avg_score*100,\n utils.format_dict(top1_avg,'{:.2f}', '1-'),\n utils.format_dict(top5_avg, '{:.2f}', '5-')))\n\n\n else:\n\n print('Model training started!')\n train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.no_grad",
"torch.autograd.Variable",
"torch.nn.DataParallel",
"torch.optim.Adamax"
]
] |
GabrielWen/spartan | [
"ce3bf7f2bb551d7f996a1884acef819b620cc854"
] | [
"spartan/examples/ssvd/qr.py"
] | [
"import spartan\nfrom spartan import expr, core\nimport numpy as np\nfrom sys import stderr\n\ndef qr(Y):\n ''' Compute the thin qr factorization of a matrix.\n Factor the matrix Y as QR, where Q is orthonormal and R is\n upper-triangular.\n\n Parameters\n ----------\n Y: Spartan array of shape (M, K).\n\n Notes\n ----------\n Y'Y must fit in memory. Y is a Spartan array of shape (M, K).\n Since this QR decomposition is mainly used in Stochastic SVD,\n K will be the rank of the matrix of shape (M, N) and the assumption\n is that the rank K should be far less than M or N.\n\n Returns\n -------\n Q : Spartan array of shape (M, K).\n R : Numpy array of shape (K, K).\n '''\n # Since the K should be far less than M. So the matrix multiplication\n # should be the bottleneck instead of local cholesky decomposition and\n # finding inverse of R. So we just parallelize the matrix mulitplication.\n # If K is really large, we may consider using our Spartan cholesky\n # decomposition, but for now, we use numpy version, it works fine.\n\n # YTY = Y'Y. YTY has shape of (K, K).\n YTY = expr.dot(expr.transpose(Y), Y).optimized().glom()\n\n # Do cholesky decomposition and get R.\n R = np.linalg.cholesky(YTY).T\n\n # Find the inverse of R\n inv_R = np.linalg.inv(R)\n\n # Q = Y * inv(R)\n Q = expr.dot(Y, inv_R).optimized().evaluate()\n\n return Q, R\n"
] | [
[
"numpy.linalg.inv",
"numpy.linalg.cholesky"
]
] |
GrantRoss-Tenki/Malawi-CQC-CSC-OSU-Work | [
"a720e0451579945ba10eafdafe2e0d59a86d5cfb"
] | [
"Graphing_Summaries.py"
] | [
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n#from pylab import plot, show, xlim,figure,hold, ylim,legend, boxplot, setup, axes\r\n\r\nimport seaborn as sns\r\n\r\n# Is this a personal or work computer\r\n# Are you graphing for hood or no hood\r\n\r\nComputer = 'personal' #or 'personal' or 'work'\r\nHood_or_no = 'no_hood' # 'no_hood' or 'hood'\r\n#what household do you want to remove make sure it is in ascending order\r\n# if there is nothing, then put a placeholder of 1045 or higher\r\nHousehold_removal = [1045]\r\n#Household_removal = Household_removal.sort(reverse=False)\r\nHousehold_removal_NO_Hood_fuel_day_adult = [1045]\r\nHousehold_removal_Hood_fuel_day_adult = [2020]\r\n\r\nHousehold_removal_NO_Hood_PM = [1045]\r\nHousehold_removal_Hood_PM = [2020]\r\n\r\n\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\n\r\nif Hood_or_no == 'hood':\r\n C_Place_holder = 2001\r\nelse:\r\n C_Place_holder = 1001\r\n \r\nif Computer == 'personal' and Hood_or_no == 'no_hood':\r\n # 1N\r\n datafile_path_day_1N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv\"\r\n Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)\r\n datafile_path_event_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv\"\r\n Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)\r\n # there is no second exact in phase 1N\r\n #1N Survey\r\n datafile_path_survey_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)\r\n #print(Filter_1n_survey.iloc[0:40, :])\r\n Survey_1N = Filter_1n_survey.iloc[0:40,:]\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_1N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)\r\n \r\n #2N\r\n datafile_path_day_2N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv\"\r\n Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)\r\n datafile_path_event_2N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv\"\r\n Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)\r\n #2N second Exact\r\n datafile_path_event_2N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv\"\r\n Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)\r\n #2N Survey\r\n datafile_path_survey_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv\"\r\n Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_2N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)\r\n \r\n #3N\r\n datafile_path_day_3N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv\"\r\n Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv\"\r\n Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3N second Exact\r\n datafile_path_event_3N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv\"\r\n Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)\r\n #3N Survey \r\n datafile_path_survey_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv\"\r\n Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_3N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)\r\n \r\n #4N\r\n datafile_path_day_4N =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv\"\r\n Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)\r\n datafile_path_event_4N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv\"\r\n Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)\r\n #4N second Exact\r\n datafile_path_event_4N_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv\"\r\n Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)\r\n #4N Survey \r\n datafile_path_survey_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv\"\r\n Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_4N = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)\r\n \r\nelif Computer == 'personal' and Hood_or_no == 'hood':\r\n #1H\r\n datafile_path_day_1H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv\"\r\n Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)\r\n datafile_path_event_1H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv\"\r\n Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)\r\n #there is no second exact in phase 1H\r\n #1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)\r\n datafile_path_survey_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_1H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)\r\n \r\n \r\n #2H\r\n datafile_path_day_2H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv\"\r\n Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)\r\n datafile_path_event_2H_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv\"\r\n Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)\r\n #2H second Exact\r\n datafile_path_event_2H_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv\"\r\n Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)\r\n #2H survey \r\n datafile_path_survey_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv\"\r\n Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_2H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)\r\n \r\n #3H\r\n datafile_path_day_3H =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv\"\r\n Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv\"\r\n Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3H second Exact\r\n datafile_path_event_3H_2 =\"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv\"\r\n Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)\r\n #3H survey \r\n datafile_path_survey_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv\"\r\n Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_3H = \"C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)\r\n \r\n #work uses box information and not local data\r\nelif Computer == 'work' and Hood_or_no == 'no_hood':\r\n # 1N for box file system\r\n datafile_path_day_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv\"\r\n Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)\r\n datafile_path_event_1N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv\"\r\n Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)\r\n # there is no second exact in phase 1N\r\n #1N Survey \r\n datafile_path_survey_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)\r\n #print(Filter_1n_survey.iloc[0:40, :])\r\n Survey_1N = Filter_1n_survey.iloc[0:40,:]\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_1N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)\r\n \r\n #2N\r\n datafile_path_day_2N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv\"\r\n Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)\r\n datafile_path_event_2N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv\"\r\n Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)\r\n #2N second Exact\r\n datafile_path_event_2N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv\"\r\n Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)\r\n #2N Survey\r\n datafile_path_survey_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv\"\r\n Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_2N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)\r\n \r\n #3N\r\n datafile_path_day_3N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv\"\r\n Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv\"\r\n Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3N second Exact\r\n datafile_path_event_3N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv\"\r\n Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)\r\n #3N survey\r\n datafile_path_survey_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv\"\r\n Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_3N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)\r\n \r\n #4N\r\n datafile_path_day_4N =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv\"\r\n Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)\r\n datafile_path_event_4N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv\"\r\n Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)\r\n #4N second Exact\r\n datafile_path_event_4N_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv\"\r\n Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)\r\n #4N Survey \r\n datafile_path_survey_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv\"\r\n Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_Fuel_4N = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)\r\n \r\nelse:\r\n #1H\r\n datafile_path_day_1H =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv\"\r\n Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)\r\n datafile_path_event_1H =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv\"\r\n Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)\r\n #there is no second exact in phase 1H\r\n #1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)\r\n datafile_path_survey_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv\"\r\n Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_1H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)\r\n \r\n #2H\r\n datafile_path_day_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv\"\r\n Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)\r\n datafile_path_event_2H_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv\"\r\n Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)\r\n #2H second Exact\r\n datafile_path_event_2H_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv\"\r\n Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)\r\n #2H survey \r\n datafile_path_survey_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv\"\r\n Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_2H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)\r\n \r\n #3H\r\n datafile_path_day_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv\"\r\n Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)\r\n datafile_path_event_3N_1 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv\"\r\n Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)\r\n #3H second Exact\r\n datafile_path_event_3H_2 =\"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv\"\r\n Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)\r\n #3H survey \r\n datafile_path_survey_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv\"\r\n Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)\r\n #24 hour Kitchen pm breakdown\r\n data_file_path_24_PM_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv\"\r\n Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)\r\n #24 hour Fuel Removal breakdown\r\n data_file_path_24_fuel_3H = \"C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv\"\r\n Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)\r\n \r\n#time to start ploting fun things \r\n#1st starting with the fuel per day per adult histogram and box plot\r\nNO_hood_counter = np.arange(0,39)\r\nhood_counter = np.arange(0,14)\r\n#what household do you want to remove from the graphs (1046 is a dummy spacer)\r\n\r\n\r\nprint('---------------Fuel per Day per Adult No-Hood Phase---------------------')\r\nif Hood_or_no == 'no_hood':\r\n Fuel_per_day_per_adult_1N = []\r\n f_d_a_1N = []\r\n Fuel_per_day_per_adult_2N = []\r\n f_d_a_2N = []\r\n Fuel_per_day_per_adult_3N = []\r\n f_d_a_3N = []\r\n Fuel_per_day_per_adult_4N = []\r\n f_d_a_4N =[]\r\n count_t = 0\r\n count_f = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_NO_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if Fuel_remove_1N_24hr.iloc[c,6]!= -1.00:\r\n Fuel_per_day_per_adult_1N.append(Fuel_remove_1N_24hr.iloc[c,6]/Survey_1N.iloc[c,7])\r\n f_d_a_1N.append(Day_1N.iloc[c,0])\r\n if Fuel_remove_2N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_2N.append(Fuel_remove_2N_24hr.iloc[c,6] / Survey_2N.iloc[c, 7])\r\n f_d_a_2N.append(Day_2N.iloc[c,0])\r\n if Fuel_remove_3N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_3N.append(Fuel_remove_3N_24hr.iloc[c,6]/ Survey_3N.iloc[c, 7])\r\n f_d_a_3N.append(Day_3N.iloc[c, 0])\r\n if Fuel_remove_4N_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_4N.append(Fuel_remove_4N_24hr.iloc[c,6] / Survey_4N.iloc[c, 7])\r\n f_d_a_4N.append(Day_3N.iloc[c, 0])\r\n # percentage Change of Fuel per day between the phases\r\n Fuel_per_day_per_adult_2N_1N = []\r\n f_d_a_2N_1N = []\r\n Fuel_per_day_per_adult_3N_1N = []\r\n f_d_a_3N_1N = []\r\n Fuel_per_day_per_adult_4N_1N = []\r\n f_d_a_4N_1N = []\r\n \r\n Fuel_per_day_per_adult_3N_2N = []\r\n f_d_a_3N_2N = []\r\n Fuel_per_day_per_adult_4N_3N = []\r\n f_d_a_4N_3N = []\r\n Fuel_per_day_per_adult_4N_2N = []\r\n f_d_a_4N_2N = []\r\n\r\n count_t = 0\r\n count_f = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_NO_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if (len(Fuel_per_day_per_adult_2N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_1N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_2N_1N.append(Fuel_per_day_per_adult_2N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_2N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_3N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n Fuel_per_day_per_adult_3N_1N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_3N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_1N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_1N[c])\r\n f_d_a_4N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:\r\n if Day_3N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_3N_2N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_2N[c])\r\n f_d_a_3N_2N.append(Day_2N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_3N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_3N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_3N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_3N[c])\r\n f_d_a_4N_3N.append(Day_3N.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:\r\n if Day_4N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n Fuel_per_day_per_adult_4N_2N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_2N[c])\r\n f_d_a_4N_2N.append(Day_4N.iloc[c,0])\r\n \r\n \r\n \r\n # now for box plotting for Fuel per day beteen Phases\r\n #1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_1N, ax=ax_box, color='b')\r\n sns.distplot(Fuel_per_day_per_adult_1N, ax=ax_hist, color='b')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('1N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #2N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_2N, ax=ax_box, color='g')\r\n sns.distplot(Fuel_per_day_per_adult_2N, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('2N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #3N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_3N, ax=ax_box, color='r')\r\n sns.distplot(Fuel_per_day_per_adult_3N, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('3N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_4N, ax=ax_box, color='y')\r\n sns.distplot(Fuel_per_day_per_adult_4N, ax=ax_hist, color='y')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('4N Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('No-Hood Fuel per Day per Adult')\r\n #plt.hold(True)\r\n #1N\r\n quant_1_1N = np.percentile(Fuel_per_day_per_adult_1N, [25,50,75])\r\n Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_1N, positions = [1], widths = 0.6)\r\n Fuel_D_A_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_1N):\r\n if a > Top_lim_1_1N or a < Low_lim_1_1N:\r\n Fuel_D_A_1N_outlier.append(f_d_a_1N[v])\r\n plt.text(1,a,f_d_a_1N[v])\r\n plt.text(1,0.1,'1N',color='b')\r\n\r\n #2N \r\n quant_1_2N = np.percentile(Fuel_per_day_per_adult_2N, [25,50,75])\r\n Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_2N,positions = [2], widths = 0.6)\r\n Fuel_D_A_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2N):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n Fuel_D_A_2N_outlier.append(f_d_a_2N[v])\r\n plt.text(2,a,f_d_a_2N[v])\r\n plt.text(2,0.1,'2N', color= 'g')\r\n #3N\r\n quant_1_3N = np.percentile(Fuel_per_day_per_adult_3N, [25,50,75])\r\n Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_3N,positions = [3], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_3N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n Fuel_D_A_3N_outlier.append(f_d_a_3N[v])\r\n count = count + 1\r\n if count == 2:\r\n plt.text(3,a,f_d_a_3N[v],ha='left',va='bottom')\r\n elif count != 2:\r\n plt.text(3,a,f_d_a_3N[v],ha='right',va='bottom')\r\n plt.text(3,0.1,'3N', color='r') \r\n \r\n #4N\r\n quant_1_4N = np.percentile(Fuel_per_day_per_adult_4N, [25,50,75])\r\n Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_4N,positions = [4], widths = 0.6)\r\n Fuel_D_A_4N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N):\r\n if a > Top_lim_1_4N or a < Low_lim_1_4N:\r\n Fuel_D_A_4N_outlier.append(f_d_a_4N[v])\r\n plt.text(4,a,f_d_a_4N[v])\r\n plt.text(4,0.1,'4N', color='y') \r\n \r\n plt.xlim(0,5)\r\n plt.ylim(0,2.3)\r\n print('Fuel/Day/Adult 1N had these values as outliers ', Fuel_D_A_1N_outlier)\r\n print('Fuel/Day/Adult 2N had these values as outliers ', Fuel_D_A_2N_outlier)\r\n print('Fuel/Day/Adult 3N had these values as outliers ', Fuel_D_A_3N_outlier)\r\n print('Fuel/Day/Adult 4N had these values as outliers ', Fuel_D_A_4N_outlier)\r\n plt.show()\r\n\r\n\r\n\r\n # % change of fuel per day per adult between each phase\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood Change from Fuel per Day per Adult' )\r\n #plt.hold(True)\r\n #2N to 1N\r\n quant_1_2N_1N = np.percentile(Fuel_per_day_per_adult_2N_1N, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2N_1N, positions=[1], widths= 0.6)\r\n Fuel_D_A_2N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2N_1N):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n Fuel_D_A_2N_1N_outlier.append(f_d_a_2N_1N[v])\r\n plt.text(1, a, f_d_a_2N_1N[v])\r\n plt.text(0.5, 0, '2N / 1N', color= 'g')\r\n \r\n #3N to 1N\r\n quant_1_3N_1N = np.percentile(Fuel_per_day_per_adult_3N_1N, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_1N, positions=[2], widths= 0.6)\r\n Fuel_D_A_3N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N_1N):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n Fuel_D_A_3N_1N_outlier.append(f_d_a_3N_1N[v])\r\n plt.text(2, a, f_d_a_3N_1N[v])\r\n plt.text(1.5, 0, '3N / 1N', color= 'r')\r\n \r\n #4N to 1N\r\n quant_1_4N_1N = np.percentile(Fuel_per_day_per_adult_4N_1N, [25,50,75])\r\n Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_1N, positions=[3], widths= 0.6)\r\n Fuel_D_A_4N_1N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_1N):\r\n if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:\r\n Fuel_D_A_4N_1N_outlier.append(f_d_a_4N_1N[v])\r\n plt.text(3, a, f_d_a_4N_1N[v])\r\n plt.text(2.5, 0, '4N / 1N', color= 'y')\r\n \r\n #3N to 2N\r\n quant_1_3N_2N = np.percentile(Fuel_per_day_per_adult_3N_2N, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_2N, positions=[4], widths= 0.6)\r\n Fuel_D_A_3N_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3N_2N):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n Fuel_D_A_3N_2N_outlier.append(f_d_a_3N_2N[v])\r\n plt.text(4, a, f_d_a_3N_2N[v])\r\n plt.text(3.5, 0, '3N / 2N', color= 'm')\r\n \r\n #4N to 3N\r\n quant_1_4N_3N = np.percentile(Fuel_per_day_per_adult_4N_3N, [25,50,75])\r\n Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_3N, positions=[5], widths= 0.6)\r\n Fuel_D_A_4N_3N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_3N):\r\n if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:\r\n Fuel_D_A_4N_3N_outlier.append(f_d_a_4N_3N[v])\r\n plt.text(5, a, f_d_a_4N_3N[v])\r\n plt.text(4.5, 0, '4N / 3N', color= 'k')\r\n \r\n #4N to 2N\r\n quant_1_4N_2N = np.percentile(Fuel_per_day_per_adult_4N_2N, [25,50,75])\r\n Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_2N, positions=[6], widths= 0.6)\r\n Fuel_D_A_4N_2N_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_4N_2N):\r\n if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:\r\n Fuel_D_A_4N_2N_outlier.append(f_d_a_4N_2N[v])\r\n plt.text(6, a, f_d_a_4N_2N[v])\r\n plt.text(5.5, 0, '4N / 2N', color= 'tab:orange')\r\n \r\n \r\n plt.xlim(0,7)\r\n plt.ylim(-0.5,4)\r\n print('Fuel/Day/Adult 2N/1N had these values as outliers ', Fuel_D_A_2N_1N_outlier)\r\n print('Fuel/Day/Adult 3N/1N had these values as outliers ', Fuel_D_A_3N_1N_outlier)\r\n print('Fuel/Day/Adult 4N/1N had these values as outliers ', Fuel_D_A_4N_1N_outlier)\r\n print('Fuel/Day/Adult 3N/2N had these values as outliers ', Fuel_D_A_3N_2N_outlier)\r\n print('Fuel/Day/Adult 4N/3N had these values as outliers ', Fuel_D_A_4N_3N_outlier)\r\n print('Fuel/Day/Adult 4N/2N had these values as outliers ', Fuel_D_A_4N_2N_outlier)\r\n plt.show()\r\n #adding averages to the tables\r\n quant_1_1N = np.append(quant_1_1N, np.average(Fuel_per_day_per_adult_1N))\r\n quant_1_2N = np.append(quant_1_2N, np.average(Fuel_per_day_per_adult_2N))\r\n quant_1_3N = np.append(quant_1_3N, np.average(Fuel_per_day_per_adult_3N))\r\n quant_1_4N = np.append(quant_1_4N, np.average(Fuel_per_day_per_adult_4N))\r\n \r\n D_50_quant_phase_f_d_a = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}\r\n F_D_A_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a, columns=['Percentile %','1N', '2N', '3N','4N'])\r\n \r\n quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Fuel_per_day_per_adult_2N_1N))\r\n quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Fuel_per_day_per_adult_3N_1N))\r\n quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Fuel_per_day_per_adult_4N_1N))\r\n quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Fuel_per_day_per_adult_3N_2N))\r\n quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Fuel_per_day_per_adult_4N_3N))\r\n quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Fuel_per_day_per_adult_4N_2N))\r\n \r\n D_50_quant_percent_f_d_a ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,\r\n '3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}\r\n F_D_A_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'\r\n ,'3N / 2N','4N / 3N','4N / 2N'])\r\n print(F_D_A_50_phase_no_hood)\r\n print(F_D_A_50_percent_change_no_hood)\r\n# add more\r\nprint ('-------------------Fuel per Day per Adult Hood Phase -------------------')\r\n\r\nif Hood_or_no == 'hood':\r\n Fuel_per_day_per_adult_1H = []\r\n f_d_a_1H = []\r\n Fuel_per_day_per_adult_2H = []\r\n f_d_a_2H = []\r\n Fuel_per_day_per_adult_3H = []\r\n f_d_a_3H = []\r\n \r\n count_t = 0\r\n count_f = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if Fuel_remove_1H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_1H.append(Fuel_remove_1H_24hr.iloc[c,6]/Survey_1H.iloc[c,7])\r\n f_d_a_1H.append(Day_1H.iloc[c,0])\r\n \r\n if Fuel_remove_2H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_2H.append(Fuel_remove_2H_24hr.iloc[c,6] / Survey_2H.iloc[c, 7])\r\n f_d_a_2H.append(Day_2H.iloc[c,0])\r\n \r\n if Fuel_remove_3H_24hr.iloc[c,6] != -1.00:\r\n Fuel_per_day_per_adult_3H.append(Fuel_remove_3H_24hr.iloc[c,6]/ Survey_3H.iloc[c, 7])\r\n f_d_a_3H.append(Day_3H.iloc[c, 0])\r\n \r\n # percentage Change of Fuel per day between the phases\r\n Fuel_per_day_per_adult_2H_1H = []\r\n f_d_a_2H_1H = []\r\n Fuel_per_day_per_adult_3H_1H = []\r\n f_d_a_3H_1H = []\r\n Fuel_per_day_per_adult_3H_2H = []\r\n f_d_a_3H_2H = []\r\n \r\n count_t = 0\r\n count_f = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):\r\n count_f = count_f + 1\r\n if count_f == len(Household_removal_Hood_fuel_day_adult):\r\n count_f = 0\r\n continue\r\n if (len(Fuel_per_day_per_adult_2H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:\r\n if Day_1H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n Fuel_per_day_per_adult_2H_1H.append(Fuel_per_day_per_adult_2H[c]/Fuel_per_day_per_adult_1H[c])\r\n f_d_a_2H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:\r\n if Day_3H.iloc[c,13] > 0 and Day_1H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:\r\n Fuel_per_day_per_adult_3H_1H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_1H[c])\r\n f_d_a_3H_1H.append(Day_1H.iloc[c,0]) \r\n if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_2H)-1) >= c:\r\n if Day_3H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n Fuel_per_day_per_adult_3H_2H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_2H[c])\r\n f_d_a_3H_2H.append(Day_1H.iloc[c,0])\r\n \r\n # now for plotting\r\n #1H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_1H, ax=ax_box, color='b')\r\n sns.distplot(Fuel_per_day_per_adult_1H, ax=ax_hist, color='b')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('1H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #2H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_2H, ax=ax_box, color='g')\r\n sns.distplot(Fuel_per_day_per_adult_2H, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('2H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Fuel_per_day_per_adult_3H, ax=ax_box, color='r')\r\n sns.distplot(Fuel_per_day_per_adult_3H, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('3H Fuel per Day per Adult')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n fig_2, ax_2 = plt.subplots()\r\n plt.title('Hood Fuel per Day per Adult')\r\n #plt.hold(True)\r\n \r\n quant_1_1H = np.percentile(Fuel_per_day_per_adult_1H, [25,50,75])\r\n Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_1H, positions = [1], widths = 0.6)\r\n Fuel_D_A_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_1H):\r\n if a > Top_lim_1_1H or a < Low_lim_1_1H:\r\n Fuel_D_A_1H_outlier.append(f_d_a_1H[v])\r\n plt.text(1,a,f_d_a_1H[v])\r\n plt.text(1,0,'1H',color='b')\r\n \r\n \r\n quant_1_2H = np.percentile(Fuel_per_day_per_adult_2H, [25,50,75])\r\n Top_lim_1_2H = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n Low_lim_1_2H = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_2H,positions = [2], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_2H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2H):\r\n if a > Top_lim_1_2H or a < Low_lim_1_2H:\r\n Fuel_D_A_2H_outlier.append(f_d_a_2H[v])\r\n count = count + 1\r\n if count == 1:\r\n plt.text(2,a,f_d_a_2H[v],ha='left',va='bottom')\r\n elif count !=1:\r\n plt.text(2,a,f_d_a_2H[v],ha='right',va='bottom')\r\n plt.text(2,0,'2H', color= 'g')\r\n \r\n quant_1_3H = np.percentile(Fuel_per_day_per_adult_3H, [25,50,75])\r\n Top_lim_1_3H = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n Low_lim_1_3H = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n \r\n bp_1 = plt.boxplot(Fuel_per_day_per_adult_3H,positions = [3], widths = 0.6)\r\n count = 0\r\n Fuel_D_A_3H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H):\r\n if a > Top_lim_1_3H or a < Low_lim_1_3H:\r\n Fuel_D_A_3H_outlier.append(f_d_a_3H[v])\r\n count = count + 1\r\n if count == 3:\r\n plt.text(3,a,f_d_a_3H[v],ha='left',va='bottom')\r\n elif count != 1:\r\n plt.text(3,a,f_d_a_3H[v],ha='right',va='bottom')\r\n plt.text(3,0,'3H', color='r') \r\n \r\n \r\n plt.xlim(-0,4)\r\n plt.ylim(-0.25,2.5)\r\n print('Fuel/Day/Adult 1H had these values as outliers ', Fuel_D_A_1H_outlier)\r\n print('Fuel/Day/Adult 2H had these values as outliers ', Fuel_D_A_2H_outlier)\r\n print('Fuel/Day/Adult 3H had these values as outliers ', Fuel_D_A_3H_outlier)\r\n plt.show()\r\n \r\n \r\n #% change of fuel perday per adult between each phase \r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood Change from Fuel per Day per Adult' )\r\n #plt.hold(True)\r\n #2H to 1H\r\n quant_1_2H_1H = np.percentile(Fuel_per_day_per_adult_2H_1H, [25,50,75])\r\n Top_lim_1_2H_1H = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n Low_lim_1_2H_1H = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2H_1H, positions=[1], widths= 0.6)\r\n Fuel_D_A_2H_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_2H_1H):\r\n if a > Top_lim_1_2H_1H or a < Low_lim_1_2H_1H:\r\n Fuel_D_A_2H_1H_outlier.append(f_d_a_2H_1H[v])\r\n plt.text(1, a, f_d_a_2H_1H[v])\r\n plt.text(0.75, -0.25, '2H / 1H', color= 'g')\r\n \r\n #3H to 1H\r\n quant_1_3H_1H = np.percentile(Fuel_per_day_per_adult_3H_1H, [25,50,75])\r\n Top_lim_1_3H_1H = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n Low_lim_1_3H_1H = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_1H, positions=[2], widths= 0.6)\r\n Fuel_D_A_3H_1H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H_1H):\r\n if a > Top_lim_1_3H_1H or a < Low_lim_1_3H_1H:\r\n Fuel_D_A_3H_1H_outlier.append(f_d_a_3H_1H[v])\r\n plt.text(2, a, f_d_a_3H_1H[v])\r\n plt.text(1.75, -0.25, '3H / 1H', color= 'r')\r\n \r\n #3H to 2H\r\n quant_1_3H_2H = np.percentile(Fuel_per_day_per_adult_3H_2H, [25,50,75])\r\n Top_lim_1_3H_2H = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n Low_lim_1_3H_2H = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n\r\n bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_2H, positions=[3], widths= 0.6)\r\n Fuel_D_A_3H_2H_outlier = []\r\n for v,a in enumerate(Fuel_per_day_per_adult_3H_2H):\r\n if a > Top_lim_1_3H_2H or a < Low_lim_1_3H_2H:\r\n Fuel_D_A_3H_2H_outlier.append(f_d_a_3H_2H[v])\r\n plt.text(3, a, f_d_a_3H_2H[v])\r\n plt.text(2.75, -0.25, '2H / 1H', color= 'm')\r\n \r\n plt.xlim(-0,4)\r\n plt.ylim(-0.25,6)\r\n print('Fuel/Day/Adult 2H/1H had these values as outliers ', Fuel_D_A_2H_1H_outlier)\r\n print('Fuel/Day/Adult 3H/1H had these values as outliers ', Fuel_D_A_3H_1H_outlier)\r\n print('Fuel/Day/Adult 3H/2H had these values as outliers ', Fuel_D_A_3H_2H_outlier)\r\n plt.show()\r\n \r\n quant_1_1H = np.append(quant_1_1H, np.average(Fuel_per_day_per_adult_1H))\r\n quant_1_2H = np.append(quant_1_2H, np.average(Fuel_per_day_per_adult_2H))\r\n quant_1_3H = np.append(quant_1_3H, np.average(Fuel_per_day_per_adult_3H))\r\n\r\n \r\n D_50_quant_phase_f_d_a_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}\r\n F_D_A_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a_hood, columns=['Percentile %','1H', '2H','3H'] )\r\n \r\n quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Fuel_per_day_per_adult_2H_1H))\r\n quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Fuel_per_day_per_adult_3H_1H))\r\n quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Fuel_per_day_per_adult_3H_2H))\r\n \r\n D_50_quant_percent_f_d_a_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}\r\n F_D_A_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])\r\n\r\n print(F_D_A_50_phase_hood)\r\n print(F_D_A_50_percent_change_hood)\r\nprint('----------------------- Kitchen PM per Day -----------------------------')\r\nif Hood_or_no == 'no_hood':\r\n Kit_PM_per_day_1N = []\r\n K_PM_D_1N = []\r\n Kit_PM_per_day_2N = []\r\n K_PM_D_2N = []\r\n Kit_PM_per_day_3N = []\r\n K_PM_D_3N = []\r\n Kit_PM_per_day_4N = []\r\n K_PM_D_4N = []\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_NO_Hood_PM):\r\n count_pm = 0\r\n continue\r\n # if Day_1N.iloc[c,7] != -1.00:\r\n # Kit_PM_per_day_1N.append(Day_1N.iloc[c,7]/Day_1N.iloc[c,1])\r\n # K_PM_D_1N.append(Day_1N.iloc[c,0])\r\n if Kit_PM_1N_24hr.iloc[c,6] != -1.00:\r\n Kit_PM_per_day_1N.append(Kit_PM_1N_24hr.iloc[c,6])\r\n K_PM_D_1N.append(Kit_PM_1N_24hr.iloc[c, 0])\r\n #if Day_2N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_2N.append(Day_2N.iloc[c,7]/Day_2N.iloc[c,1])\r\n # K_PM_D_2N.append(Day_2N.iloc[c,0])\r\n if Kit_PM_2N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_2N.append(Kit_PM_2N_24hr.iloc[c, 6])\r\n K_PM_D_2N.append(Kit_PM_2N_24hr.iloc[c, 0])\r\n # if Day_3N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_3N.append(Day_3N.iloc[c,7]/Day_3N.iloc[c,1])\r\n # K_PM_D_3N.append(Day_3N.iloc[c, 0])\r\n if Kit_PM_3N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_3N.append(Kit_PM_3N_24hr.iloc[c, 6])\r\n K_PM_D_3N.append(Kit_PM_3N_24hr.iloc[c, 0])\r\n # if Day_4N.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_4N.append(Day_4N.iloc[c,7]/Day_4N.iloc[c,1])\r\n # K_PM_D_4N.append(Day_4N.iloc[c, 0])\r\n if Kit_PM_4N_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_4N.append(Kit_PM_4N_24hr.iloc[c, 6])\r\n K_PM_D_4N.append(Kit_PM_4N_24hr.iloc[c, 0])\r\n \r\n # percentages Between Phases of kitchen PM per day\r\n Kit_per_day_2N_1N = []\r\n K_PM_D_2N_1N = []\r\n Kit_per_day_3N_1N = []\r\n K_PM_D_3N_1N = []\r\n Kit_per_day_4N_1N = []\r\n K_PM_D_4N_1N = []\r\n \r\n Kit_per_day_3N_2N = []\r\n K_PM_D_3N_2N = []\r\n Kit_per_day_4N_3N = []\r\n K_PM_D_4N_3N = []\r\n Kit_per_day_4N_2N = []\r\n K_PM_D_4N_2N = []\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_NO_Hood_PM):\r\n count_pm = 0\r\n continue\r\n if (len(Kit_PM_per_day_2N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_1N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_1N_24hr.iloc[c,6] > 0 and Kit_PM_2N_24hr.iloc[c,6] > 0 and Kit_PM_1N_24hr.iloc[c,0] == Kit_PM_2N_24hr.iloc[c,0]:\r\n Kit_per_day_2N_1N.append(Kit_PM_per_day_2N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_2N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_3N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_1N_24hr.iloc[c, 0]:\r\n Kit_per_day_3N_1N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_3N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \\\r\n Kit_PM_1N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_1N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_1N[c])\r\n K_PM_D_4N_1N.append(Day_1N.iloc[c,0])\r\n if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:\r\n #if Day_3N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_2N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_2N_24hr.iloc[c, 0]:\r\n Kit_per_day_3N_2N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_2N[c])\r\n K_PM_D_3N_2N.append(Day_2N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_3N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_3N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \\\r\n Kit_PM_4N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_3N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_3N[c])\r\n K_PM_D_4N_3N.append(Day_3N.iloc[c,0])\r\n if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:\r\n #if Day_4N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:\r\n if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \\\r\n Kit_PM_2N_24hr.iloc[c, 0]:\r\n Kit_per_day_4N_2N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_2N[c])\r\n K_PM_D_4N_2N.append(Day_4N.iloc[c,0])\r\n \r\n # now for box plotting for Kitchen PM per day percent changes\r\n \r\n #2N to 1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_2N_1N, ax=ax_box, color='g')\r\n sns.distplot(Kit_per_day_2N_1N, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 2N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3N to 1N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3N_1N, ax=ax_box, color='r')\r\n sns.distplot(Kit_per_day_3N_1N, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 1N\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_1N, ax=ax_box, color='y')\r\n sns.distplot(Kit_per_day_4N_1N, ax=ax_hist, color='y')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/1N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3N to 2N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3N_2N, ax=ax_box, color='m')\r\n sns.distplot(Kit_per_day_3N_2N, ax=ax_hist, color='m')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3N/2N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 3N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_3N, ax=ax_box, color='k')\r\n sns.distplot(Kit_per_day_4N_3N, ax=ax_hist, color='k')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/3N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n #4N to 2N \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_4N_2N, ax=ax_box, color='tab:orange')\r\n sns.distplot(Kit_per_day_4N_2N, ax=ax_hist, color='tab:orange')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 4N/2N (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('No-Hood Kitchen PM per day')\r\n #plt.hold()\r\n #1N\r\n quant_1_1N = np.percentile(Kit_PM_per_day_1N, [25,50,75])\r\n Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_1N, positions = [1], widths = 0.6)\r\n kitchen_pm_1N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_1N):\r\n if a > Top_lim_1_1N or a < Low_lim_1_1N:\r\n kitchen_pm_1N_outlier.append(K_PM_D_1N[v])\r\n plt.text(1,a,K_PM_D_1N[v])\r\n plt.text(1,0.1,'1N',color='b')\r\n\r\n #2N \r\n quant_1_2N = np.percentile(Kit_PM_per_day_2N, [25,50,75])\r\n Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_2N,positions = [2], widths = 0.6)\r\n kitchen_pm_2N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_2N):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n kitchen_pm_2N_outlier.append(K_PM_D_2N[v])\r\n plt.text(2,a,K_PM_D_2N[v])\r\n plt.text(2,0.1,'2N', color= 'g')\r\n #3N\r\n quant_1_3N = np.percentile(Kit_PM_per_day_3N, [25,50,75])\r\n Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])\r\n kitchen_pm_3N_outlier = []\r\n bp_1 = plt.boxplot(Kit_PM_per_day_3N,positions = [3], widths = 0.6)\r\n count = 0\r\n for v,a in enumerate(Kit_PM_per_day_3N):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n kitchen_pm_3N_outlier.append(K_PM_D_3N[v])\r\n count = count + 1\r\n if count == (3):\r\n plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')\r\n if count == (1):\r\n plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')\r\n else:\r\n plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')\r\n plt.text(3,0.1,'3N', color='r') \r\n \r\n #4N\r\n quant_1_4N = np.percentile(Kit_PM_per_day_4N, [25,50,75])\r\n Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_4N,positions = [4], widths = 0.6)\r\n kitchen_pm_4N_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_4N):\r\n if a > Top_lim_1_4N or a < Low_lim_1_4N:\r\n kitchen_pm_4N_outlier.append(K_PM_D_4N[v])\r\n plt.text(4,a,K_PM_D_4N[v])\r\n plt.text(4,0.1,'4N', color='y') \r\n \r\n plt.xlim(0,5)\r\n plt.ylim(0,1200)\r\n print('Kitchen PM 1N had these values as outliers ', kitchen_pm_1N_outlier)\r\n print('Kitchen PM 2N had these values as outliers ', kitchen_pm_2N_outlier)\r\n print('Kitchen PM 3N had these values as outliers ', kitchen_pm_3N_outlier)\r\n print('Kitchen PM 4N had these values as outliers ', kitchen_pm_4N_outlier)\r\n plt.show()\r\n\r\n\r\n # % change of PM per day \r\n\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% No_hood PM per Day Change' )\r\n #plt.hold(True)\r\n #2N to 1N\r\n quant_1_2N_1N = np.percentile(Kit_per_day_2N_1N, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_2N_1N, positions=[1], widths= 0.6)\r\n kitchen_pm_2N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_2N_1N):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n kitchen_pm_2N_1N_outlier.append(K_PM_D_2N_1N[v])\r\n plt.text(1, a, K_PM_D_2N_1N[v])\r\n plt.text(0.5, -0.25, '2N / 1N', color= 'g')\r\n \r\n #3N to 1N\r\n quant_1_3N_1N = np.percentile(Kit_per_day_3N_1N, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3N_1N, positions=[2], widths= 0.6)\r\n kitchen_pm_3N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_3N_1N):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n kitchen_pm_3N_1N_outlier.append(K_PM_D_3N_1N[v])\r\n plt.text(2, a, K_PM_D_3N_1N[v])\r\n plt.text(1.5, -0.25, '3N / 1N', color= 'r')\r\n \r\n #4N to 1N\r\n quant_1_4N_1N = np.percentile(Kit_per_day_4N_1N, [25,50,75])\r\n Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_1N, positions=[3], widths= 0.6)\r\n kitchen_pm_4N_1N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_1N):\r\n if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:\r\n kitchen_pm_4N_1N_outlier.append(K_PM_D_4N_1N[v])\r\n plt.text(3, a, K_PM_D_4N_1N[v])\r\n plt.text(2.5, -0.25, '4N / 1N', color= 'y')\r\n \r\n #3N to 2N\r\n quant_1_3N_2N = np.percentile(Kit_per_day_3N_2N, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3N_2N, positions=[4], widths= 0.6)\r\n kitchen_pm_3N_2N_outlier = []\r\n for v,a in enumerate(Kit_per_day_3N_2N):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n kitchen_pm_3N_2N_outlier.append(K_PM_D_3N_2N[v])\r\n plt.text(4, a, K_PM_D_3N_2N[v])\r\n plt.text(3.5, -0.25, '3N / 2N', color= 'm')\r\n \r\n #4N to 3N\r\n quant_1_4N_3N = np.percentile(Kit_per_day_4N_3N, [25,50,75])\r\n Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_3N, positions=[5], widths= 0.6)\r\n kitchen_pm_4N_3N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_3N):\r\n if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:\r\n kitchen_pm_4N_3N_outlier.append(K_PM_D_4N_3N[v])\r\n plt.text(5, a, K_PM_D_4N_3N[v])\r\n plt.text(4.5, -0.25, '4N / 3N', color= 'k')\r\n \r\n #4N to 2N\r\n quant_1_4N_2N = np.percentile(Kit_per_day_4N_2N, [25,50,75])\r\n Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_4N_2N, positions=[6], widths= 0.6)\r\n kitchen_pm_4N_2N_outlier = []\r\n for v,a in enumerate(Kit_per_day_4N_2N):\r\n if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:\r\n kitchen_pm_4N_2N_outlier.append(K_PM_D_4N_2N[v])\r\n plt.text(6, a, K_PM_D_4N_2N[v])\r\n plt.text(5.5, -0.25, '4N / 2N', color= 'tab:orange')\r\n \r\n\r\n plt.xlim(0,7)\r\n plt.ylim(-0.5,5)\r\n\r\n print('Kitchen PM 2N/1N had these values as outliers ', kitchen_pm_2N_1N_outlier)\r\n print('Kitchen PM 3N/1N had these values as outliers ', kitchen_pm_3N_1N_outlier)\r\n print('Kitchen PM 4N/1N had these values as outliers ', kitchen_pm_4N_1N_outlier)\r\n print('Kitchen PM 3N/2N had these values as outliers ', kitchen_pm_3N_2N_outlier)\r\n print('Kitchen PM 4N/3N had these values as outliers ', kitchen_pm_4N_3N_outlier)\r\n print('Kitchen PM 4N/2N had these values as outliers ', kitchen_pm_4N_2N_outlier)\r\n plt.show()\r\n \r\n #adding averages to the tables\r\n quant_1_1N = np.append(quant_1_1N, np.average(Kit_PM_per_day_1N))\r\n quant_1_2N = np.append(quant_1_2N, np.average(Kit_PM_per_day_2N))\r\n quant_1_3N = np.append(quant_1_3N, np.average(Kit_PM_per_day_3N))\r\n quant_1_4N = np.append(quant_1_4N, np.average(Kit_PM_per_day_4N))\r\n \r\n D_50_quant_phase_PM_d = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}\r\n PM_D_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_PM_d,columns=['Percentile %','1N', '2N', '3N','4N'])\r\n \r\n quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Kit_per_day_2N_1N))\r\n quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Kit_per_day_3N_1N))\r\n quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Kit_per_day_4N_1N))\r\n quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Kit_per_day_3N_2N))\r\n quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Kit_per_day_4N_3N))\r\n quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Kit_per_day_4N_2N))\r\n \r\n D_50_quant_percent_PM_d ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,\r\n '3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}\r\n PM_D_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_PM_d, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'\r\n ,'3N / 2N','4N / 3N','4N / 2N'])\r\n\r\n \r\n print(PM_D_50_phase_no_hood)\r\n print(PM_D_50_percent_change_no_hood)\r\n \r\n# hood Pm per day\r\nif Hood_or_no == 'hood':\r\n Kit_PM_per_day_1H = []\r\n K_PM_D_1H = []\r\n Kit_PM_per_day_2H = []\r\n K_PM_D_2H = []\r\n Kit_PM_per_day_3H = []\r\n K_PM_D_3H = []\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_Hood_PM):\r\n count_pm = 0\r\n continue\r\n # if Day_1H.iloc[c,7] != -1.00:\r\n # Kit_PM_per_day_1H.append(Day_1H.iloc[c,7]/Day_1H.iloc[c,1])\r\n # K_PM_D_1H.append(Day_1H.iloc[c,0])\r\n if Kit_PM_1H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_1H.append(Kit_PM_1H_24hr.iloc[c,6])\r\n K_PM_D_1H.append(Kit_PM_1H_24hr.iloc[c,0])\r\n # if Day_2H.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_2H.append(Day_2H.iloc[c,7]/Day_2H.iloc[c,1])\r\n # K_PM_D_2H.append(Day_2H.iloc[c,0])\r\n if Kit_PM_2H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_2H.append(Kit_PM_2H_24hr.iloc[c,6])\r\n K_PM_D_2H.append(Kit_PM_2H_24hr.iloc[c,0])\r\n # if Day_3H.iloc[c, 7] != -1.00:\r\n # Kit_PM_per_day_3H.append(Day_3H.iloc[c,7]/Day_3H.iloc[c,1])\r\n # K_PM_D_3H.append(Day_3H.iloc[c, 0])\r\n if Kit_PM_3H_24hr.iloc[c, 6] != -1.00:\r\n Kit_PM_per_day_3H.append(Kit_PM_3H_24hr.iloc[c,6])\r\n K_PM_D_3H.append(Kit_PM_3H_24hr.iloc[c,0])\r\n \r\n \r\n # percentages Between Phases of kitchen PM per day\r\n Kit_per_day_2H_1H = []\r\n K_PM_D_2H_1H = []\r\n Kit_per_day_3H_1H = []\r\n K_PM_D_3H_1H = []\r\n Kit_per_day_3H_2H = []\r\n K_PM_D_3H_2H = []\r\n\r\n\r\n count_t = 0\r\n count_pm = 0\r\n for c in NO_hood_counter:\r\n if c == (Household_removal[count_t] - C_Place_holder):\r\n count_t = count_t + 1\r\n if count_t == len(Household_removal):\r\n count_t = 0\r\n continue\r\n if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):\r\n count_pm = count_pm + 1\r\n if count_pm == len(Household_removal_Hood_PM):\r\n count_pm = 0\r\n continue\r\n if (len(Kit_PM_per_day_2H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:\r\n #if Day_1H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n if Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == Kit_PM_2H_24hr.iloc[c, 0]:\r\n Kit_per_day_2H_1H.append(Kit_PM_per_day_2H[c]/Kit_PM_per_day_1H[c])\r\n K_PM_D_2H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:\r\n #if Day_3H.iloc[c,7] > 0 and Day_1H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:\r\n if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == \\\r\n Kit_PM_3H_24hr.iloc[c, 0]:\r\n Kit_per_day_3H_1H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_1H[c])\r\n K_PM_D_3H_1H.append(Day_1H.iloc[c,0])\r\n if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_2H)-1) >= c:\r\n #if Day_3H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:\r\n if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_3H_24hr.iloc[c, 0] == \\\r\n Kit_PM_2H_24hr.iloc[c, 0]:\r\n Kit_per_day_3H_2H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_2H[c])\r\n K_PM_D_3H_2H.append(Day_2H.iloc[c,0])\r\n\r\n \r\n # now for box plotting for Kitchen PM per day percent changes\r\n \r\n #2H to 1H\r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_2H_1H, ax=ax_box, color='g')\r\n sns.distplot(Kit_per_day_2H_1H, ax=ax_hist, color='g')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 2H/1H (Kitchen PM per Day)')\r\n plt.ylim(top=1.5)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H to 1H \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3H_1H, ax=ax_box, color='r')\r\n sns.distplot(Kit_per_day_3H_1H, ax=ax_hist, color='r')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3H/1H (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n #3H to 2H \r\n sns.set(style=\"ticks\")\r\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={\"height_ratios\": (0.15, 0.85)})\r\n sns.boxplot(Kit_per_day_3H_2H, ax=ax_box, color='m')\r\n sns.distplot(Kit_per_day_3H_2H, ax=ax_hist, color='m')\r\n ax_box.set(yticks=[])\r\n sns.despine(ax=ax_hist)\r\n sns.despine(ax=ax_box, left=True)\r\n plt.title('% 3H/2H (Kitchen PM per Day)')\r\n plt.ylim(top=2)\r\n plt.ylim(bottom = 0)\r\n \r\n \r\n #Plotting on the same graph\r\n fig, ax = plt.subplots()\r\n plt.title('Hood Kitchen PM per day')\r\n #1H\r\n quant_1_1H = np.percentile(Kit_PM_per_day_1H, [25,50,75])\r\n Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_1H, positions = [1], widths = 0.6)\r\n kitchen_pm_1H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_1H):\r\n if a > Top_lim_1_1H or a < Low_lim_1_1H:\r\n kitchen_pm_1H_outlier.append(K_PM_D_1H[v])\r\n plt.text(1,a,K_PM_D_1H[v])\r\n plt.text(0.5,0.1,'1H',color='b')\r\n\r\n #2N \r\n quant_1_2H = np.percentile(Kit_PM_per_day_2H, [25,50,75])\r\n Top_lim_1_2N = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n Low_lim_1_2N = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])\r\n \r\n bp_1 = plt.boxplot(Kit_PM_per_day_2H,positions = [2], widths = 0.6)\r\n kitchen_pm_2H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_2H):\r\n if a > Top_lim_1_2N or a < Low_lim_1_2N:\r\n kitchen_pm_2H_outlier.append(K_PM_D_2H[v])\r\n plt.text(2,a,K_PM_D_2H[v])\r\n plt.text(1.5,0.1,'2H', color= 'g')\r\n #3H\r\n quant_1_3H = np.percentile(Kit_PM_per_day_3H, [25,50,75])\r\n Top_lim_1_3N = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n Low_lim_1_3N = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])\r\n kitchen_3H_outlier = []\r\n bp_1 = plt.boxplot(Kit_PM_per_day_3H,positions = [3], widths = 0.6)\r\n count = 0\r\n kitchen_pm_3H_outlier = []\r\n for v,a in enumerate(Kit_PM_per_day_3H):\r\n if a > Top_lim_1_3N or a < Low_lim_1_3N:\r\n kitchen_pm_3H_outlier.append(K_PM_D_3H[v])\r\n plt.text(3,a,K_PM_D_3H[v])\r\n# kitchen_3N_outlier.append(K_PM_D_3N[v])\r\n# count = count + 1\r\n# if count == (3):\r\n# plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')\r\n# if count == (1):\r\n# plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')\r\n# else:\r\n# plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')\r\n plt.text(2.5,0.1,'3H', color='r') \r\n plt.xlim(0,4)\r\n plt.ylim(0,1200)\r\n print('Kitchen PM 1H had these values as outliers ', kitchen_pm_1H_outlier)\r\n print('Kitchen PM 2H had these values as outliers ', kitchen_pm_2H_outlier)\r\n print('Kitchen PM 3H had these values as outliers ', kitchen_pm_3H_outlier)\r\n plt.show()\r\n #print('3N had these values as outliers ' , kitchen_3N_outlier)\r\n\r\n # % change of PM per day \r\n\r\n fig_2, ax2 = plt.subplots()\r\n plt.title('% hood PM per Day Change' )\r\n #plt.hold(True)\r\n #2H to 1H\r\n quant_1_2H_1H = np.percentile(Kit_per_day_2H_1H, [25,50,75])\r\n Top_lim_1_2N_1N = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n Low_lim_1_2N_1N = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_2H_1H, positions=[1], widths= 0.6)\r\n kitchen_pm_2H_1H_outlier = []\r\n for v,a in enumerate(Kit_per_day_2H_1H):\r\n if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:\r\n kitchen_pm_2H_1H_outlier.append(K_PM_D_2H_1H[v])\r\n plt.text(1, a, K_PM_D_2H_1H[v])\r\n plt.text(0.75, -0.25, '2H / 1H', color= 'g')\r\n \r\n #3H to 1H\r\n quant_1_3H_1H = np.percentile(Kit_per_day_3H_1H, [25,50,75])\r\n Top_lim_1_3N_1N = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n Low_lim_1_3N_1N = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3H_1H, positions=[2], widths= 0.6)\r\n kitchen_pm_3H_1H_outlier = []\r\n for v,a in enumerate(Kit_per_day_3H_1H):\r\n if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:\r\n kitchen_pm_3H_1H_outlier.append(K_PM_D_3H_1H[v])\r\n plt.text(2, a, K_PM_D_3H_1H[v])\r\n plt.text(1.75, -0.25, '3H / 1H', color= 'r')\r\n\r\n #3H to 2H\r\n quant_1_3H_2H = np.percentile(Kit_per_day_3H_2H, [25,50,75])\r\n Top_lim_1_3N_2N = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n Low_lim_1_3N_2N = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])\r\n\r\n bp_1_1 = plt.boxplot(Kit_per_day_3H_2H, positions=[3], widths= 0.6)\r\n kitchen_pm_3H_2H_outlier = []\r\n for v,a in enumerate(Kit_per_day_3H_2H):\r\n if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:\r\n kitchen_pm_3H_2H_outlier.append(K_PM_D_3H_2H[v])\r\n plt.text(3, a, K_PM_D_3H_2H[v])\r\n plt.text(2.75, -0.25, '3H / 2H', color= 'm')\r\n\r\n plt.xlim(0,4)\r\n plt.ylim(-0.5,5)\r\n print('Kitchen PM 2H/1H had these values as outliers ', kitchen_pm_2H_1H_outlier)\r\n print('Kitchen PM 3H/1H had these values as outliers ', kitchen_pm_3H_1H_outlier)\r\n print('Kitchen PM 3H/2H had these values as outliers ', kitchen_pm_3H_2H_outlier)\r\n plt.show()\r\n \r\n quant_1_1H = np.append(quant_1_1H, np.average(Kit_PM_per_day_1H))\r\n quant_1_2H = np.append(quant_1_2H, np.average(Kit_PM_per_day_2H))\r\n quant_1_3H = np.append(quant_1_3H, np.average(Kit_PM_per_day_3H))\r\n \r\n D_50_quant_phase_PM_D_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}\r\n PM_D_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_PM_D_hood, columns= ['Percentile %','1H','2H','3H' ])\r\n \r\n quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Kit_per_day_2H_1H))\r\n quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Kit_per_day_3H_1H))\r\n quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Kit_per_day_3H_2H))\r\n \r\n \r\n D_50_quant_percent_PM_D_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}\r\n PM_D_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_PM_D_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])\r\n\r\n print(PM_D_50_phase_hood)\r\n print(PM_D_50_percent_change_hood)\r\n# when i am ready to transfer to a data frame and get the differences\r\n\r\n#histograms for the comparison\r\nif Hood_or_no == 'no_hood':\r\n plt.title('Histogram of Fuel per 24 Hours per Person - No Hood' )\r\n plt.hist([Fuel_per_day_per_adult_1N],\r\n color=['b'], alpha=0.5, label='1N')\r\n plt.hist([Fuel_per_day_per_adult_2N],\r\n color=['g'], alpha=0.5, label='2N')\r\n plt.hist([Fuel_per_day_per_adult_3N],\r\n color=['r'], alpha=0.5, label='3N')\r\n plt.hist([Fuel_per_day_per_adult_4N],\r\n color=['y'], alpha=0.5, label='4N')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\n\r\n plt.title('Histogram of Kitchen PM 24 Hours - No Hood' )\r\n plt.hist([Kit_PM_per_day_1N],\r\n color=['b'], alpha=0.5, label='1N')\r\n plt.hist([Kit_PM_per_day_2N],\r\n color=['g'], alpha=0.5, label='2N')\r\n plt.hist([Kit_PM_per_day_3N],\r\n color=['r'], alpha=0.5, label='3N')\r\n plt.hist([Kit_PM_per_day_4N],\r\n color=['y'], alpha=0.5, label='4N')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\nif Hood_or_no == 'hood':\r\n plt.title('Histogram of Fuel per 24 Hours per Person - Hood' )\r\n plt.hist([Fuel_per_day_per_adult_1H],\r\n color=['b'], alpha=0.5, label='1H')\r\n plt.hist([Fuel_per_day_per_adult_2H],\r\n color=['g'], alpha=0.5, label='2H')\r\n plt.hist([Fuel_per_day_per_adult_3H],\r\n color=['r'], alpha=0.5, label='3H')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n\r\n plt.title('Histogram of Kitchen PM 24 Hours - Hood' )\r\n plt.hist([Kit_PM_per_day_1H],\r\n color=['b'], alpha=0.5, label='1H')\r\n plt.hist([Kit_PM_per_day_2H],\r\n color=['g'], alpha=0.5, label='2H')\r\n plt.hist([Kit_PM_per_day_3H],\r\n color=['r'], alpha=0.5, label='3H')\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n"
] | [
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"pandas.set_option",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hist",
"numpy.average",
"numpy.percentile"
]
] |
icml2020submission6857/metarl | [
"9b66cefa2b6bcb6a38096d629ce8853b47c7171d"
] | [
"tests/metarl/torch/algos/test_torch_algo_utils.py"
] | [
"\"\"\"Test torch algo utility functions.\"\"\"\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nimport torch\nimport torch.nn.functional as F\n\nimport metarl.tf.misc.tensor_utils as tf_utils\nimport metarl.torch.algos._utils as torch_algo_utils\nfrom tests.fixtures import TfGraphTestCase\n\n\ndef stack(d, arr):\n \"\"\"Stack 'arr' 'd' times.\"\"\"\n return np.repeat(np.expand_dims(arr, axis=0), repeats=d, axis=0)\n\n\nONES = np.ones((4, 6))\nZEROS = np.zeros((4, 6))\nARRANGE = stack(4, np.arange(6))\nPI_DIGITS = stack(4, [3, 1, 4, 1, 5, 9])\nE_DIGITS = stack(4, [2, 7, 1, 8, 2, 8])\nFIBS = stack(4, [1, 1, 2, 3, 5, 8])\n\nnums_1d = np.arange(0, 4).astype(float)\nnums_2d = np.arange(0, 4).astype(float).reshape(2, 2)\nnums_3d = np.arange(0, 8).astype(float).reshape(2, 2, 2)\n\n\nclass TestTorchAlgoUtils(TfGraphTestCase):\n \"\"\"Test class for torch algo utility functions.\"\"\"\n # yapf: disable\n @pytest.mark.parametrize('gae_lambda, rewards_val, baselines_val', [\n (0.4, ONES, ZEROS),\n (0.8, PI_DIGITS, ARRANGE),\n (1.2, ONES, FIBS),\n (1.7, E_DIGITS, PI_DIGITS),\n ])\n # yapf: enable\n def testcompute_advantages(self, gae_lambda, rewards_val, baselines_val):\n \"\"\"Test compute_advantage function.\"\"\"\n discount = 0.99\n max_len = rewards_val.shape[-1]\n\n torch_advs = torch_algo_utils.compute_advantages(\n discount, gae_lambda, max_len, torch.Tensor(baselines_val),\n torch.Tensor(rewards_val))\n\n rewards = tf.compat.v1.placeholder(dtype=tf.float32,\n name='reward',\n shape=[None, None])\n baselines = tf.compat.v1.placeholder(dtype=tf.float32,\n name='baseline',\n shape=[None, None])\n adv = tf_utils.compute_advantages(discount, gae_lambda, max_len,\n baselines, rewards)\n tf_advs = self.sess.run(adv,\n feed_dict={\n rewards: rewards_val,\n baselines: baselines_val,\n })\n\n assert np.allclose(torch_advs.numpy(),\n tf_advs.reshape(torch_advs.shape),\n atol=1e-5)\n\n def test_add_padding_last_1d(self):\n \"\"\"Test pad_to_last function for 1d.\"\"\"\n max_length = 10\n\n expected = F.pad(torch.Tensor(nums_1d),\n (0, max_length - nums_1d.shape[-1]))\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_1d,\n total_length=max_length)\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_1d,\n total_length=10,\n axis=0)\n assert expected.eq(tensor_padding).all()\n\n def test_add_padding_last_2d(self):\n \"\"\"Test pad_to_last function for 2d.\"\"\"\n max_length = 10\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d, total_length=10)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, max_length - nums_2d.shape[-1]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d,\n total_length=10,\n axis=0)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, 0, 0, max_length - nums_2d.shape[0]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_2d,\n total_length=10,\n axis=1)\n expected = F.pad(torch.Tensor(nums_2d),\n (0, max_length - nums_2d.shape[-1], 0, 0))\n assert expected.eq(tensor_padding).all()\n\n def test_add_padding_last_3d(self):\n \"\"\"Test pad_to_last function for 3d.\"\"\"\n max_length = 10\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d, total_length=10)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=0)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, 0, 0, 0, 0, max_length - nums_3d.shape[0]))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=1)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, 0, 0, max_length - nums_3d.shape[-1], 0, 0))\n assert expected.eq(tensor_padding).all()\n\n tensor_padding = torch_algo_utils.pad_to_last(nums_3d,\n total_length=10,\n axis=2)\n expected = F.pad(torch.Tensor(nums_3d),\n (0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))\n assert expected.eq(tensor_padding).all()\n\n @pytest.mark.parametrize('nums', [nums_1d, nums_2d, nums_3d])\n def test_out_of_index_error(self, nums):\n \"\"\"Test pad_to_last raises IndexError.\"\"\"\n with pytest.raises(IndexError):\n torch_algo_utils.pad_to_last(nums,\n total_length=10,\n axis=len(nums.shape))\n\n def testmake_optimizer_with_type(self):\n \"\"\"Test make_optimizer function with type as first argument.\"\"\"\n optimizer_type = torch.optim.Adam\n module = torch.nn.Linear(2, 1)\n lr = 0.123\n optimizer = torch_algo_utils.make_optimizer(optimizer_type,\n module,\n lr=lr)\n assert isinstance(optimizer, optimizer_type)\n assert optimizer.defaults['lr'] == lr\n\n def testmake_optimizer_with_tuple(self):\n \"\"\"Test make_optimizer function with tuple as first argument.\"\"\"\n optimizer_type = (torch.optim.Adam, {'lr': 0.1})\n module = torch.nn.Linear(2, 1)\n optimizer = torch_algo_utils.make_optimizer(optimizer_type, module)\n assert isinstance(optimizer, optimizer_type)\n assert optimizer.defaults['lr'] == optimizer_type[1]['lr']\n\n def testmake_optimizer_raise_value_error(self):\n \"\"\"Test make_optimizer raises value error.\"\"\"\n optimizer_type = (torch.optim.Adam, {'lr': 0.1})\n module = torch.nn.Linear(2, 1)\n with pytest.raises(ValueError):\n _ = torch_algo_utils.make_optimizer(optimizer_type,\n module,\n lr=0.123)\n"
] | [
[
"tensorflow.compat.v1.placeholder",
"numpy.ones",
"torch.nn.Linear",
"numpy.zeros",
"numpy.arange",
"numpy.expand_dims",
"torch.Tensor"
]
] |
jhbrito/HelloWorlds | [
"7e2247ca7f312a516ce6a5054913d59e2f1de0f9"
] | [
"HelloWorldOpenCV.py"
] | [
"# Demo with a few examples of using OpenCV functions and UI\n# packages: opencv-python\n# uses lena: https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png\n\nimport numpy as np\nimport cv2\n\nprint(\"Hello World OpenCV\")\nprint(\"OpenCV Version:\", cv2.__version__)\n\nimage = np.ones((256, 256), dtype=\"uint8\")\nimage = image * 127\nimage[0:128, 0:128] = 0\nimage[128:, 128:] = 255\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\n\n# Opening and Viewing an Image\nimport os.path\n\nif os.path.isfile('lena.png'):\n print(\"Test Image File exist\")\nelse:\n print(\"Test Image File does not exist; downloading...\")\n import urllib.request as urllib_request\n\n urllib_request.urlretrieve(\"https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png\", \"lena.png\")\n\nimage = cv2.imread(\"./lena.png\")\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nrgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ncv2.imshow(\"Image RGB\", rgb_image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\ndef viewImage(image, name_of_window):\n cv2.namedWindow(name_of_window, cv2.WINDOW_AUTOSIZE)\n cv2.imshow(name_of_window, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nviewImage(image, \"Lena\")\n\n# Edit pixels\nedited = image.copy()\nedited[200:390, 200:360, 0] = 255\nviewImage(edited, \"Lena edited\")\n\n# Cropping\ncropped = image[200:390, 200:360]\nviewImage(cropped, \"Lena cropped\")\n\n# Resizing\nscale_percent = 10 # percent of original size\nwidth = int(image.shape[1] * scale_percent / 100)\nheight = int(image.shape[0] * scale_percent / 100)\ndim = (width, height)\nresized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\nviewImage(resized, \"Lena resized to {}%\".format(scale_percent))\n\n# Drawing a Rectangle\noutput = image.copy()\ncv2.rectangle(output, (200, 200), (360, 390), (255, 0, 0), 10)\nviewImage(output, \"Lena with a rectangle\")\n\n# Drawing a line\ncv2.line(output, (256, 390), (256, 512), (0, 0, 255), 5)\nviewImage(output, \"Lena with a line\")\n\n# Writing on an image\ncv2.putText(output, \"Lena\", (360, 390), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)\nviewImage(output, \"Lena with text\")\n\n# Saving an image\ncv2.imwrite(\"./output.jpg\", output)\n\n# Blurring/Smoothing\nblurred = cv2.GaussianBlur(image, (15, 15), 0)\nviewImage(blurred, \"Lena blurred\")\n\n# Rotating\n(h, w, d) = image.shape\ncenter = (w // 2, h // 2)\nrot = 45\nM = cv2.getRotationMatrix2D(center, rot, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\nviewImage(rotated, \"Lena rotated by {} degrees\".format(rot))\n\n# Blend\nalpha_slider_max = 100\n\n\ndef on_trackbar_weight(val):\n alpha = val / alpha_slider_max\n beta = (1.0 - alpha)\n blend = cv2.addWeighted(image, alpha, rotated, beta, 0.0)\n cv2.imshow('Lena blended', blend)\n\n\ncv2.namedWindow('Lena blended')\ntrackbar_name = 'Alpha 0 - {}'.format(alpha_slider_max)\ncv2.createTrackbar(trackbar_name, 'Lena blended', 50, alpha_slider_max, on_trackbar_weight)\non_trackbar_weight(50)\ncv2.waitKey()\ncv2.destroyWindow('Lena blended')\n\n# Grayscaling\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nviewImage(gray_image, \"Lena gray-scale\")\n\n# Thresholding\nthreshold_slider_max = 255\nthreshold = 200\nret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)\n\n\ndef on_trackbar_threshold(val):\n threshold = val\n ret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)\n cv2.imshow(\"Lena thresholded\", threshold_image)\n\n\ncv2.namedWindow(\"Lena thresholded\")\ntrackbar_name = \"Threshold 0 - {}\".format(threshold_slider_max)\ncv2.createTrackbar(trackbar_name, \"Lena thresholded\", threshold, threshold_slider_max, on_trackbar_threshold)\non_trackbar_threshold(threshold)\ncv2.waitKey()\ncv2.destroyWindow(\"Lena thresholded\")\n\n# Contours\ncontours, hierarchy = cv2.findContours(threshold_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\nimage_with_contours = image.copy()\ncv2.drawContours(image_with_contours, contours, -1, (255, 0, 0), 1)\nviewImage(image_with_contours, \"Lena contours\")\n\n# Face Detection\nface_cascade = cv2.CascadeClassifier('venv\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')\nfaces = face_cascade.detectMultiScale(gray_image)\nprint(\"Lena with {} faces detected\".format(len(faces)))\nimage_faces = image.copy()\nfor (x, y, w, h) in faces:\n cv2.rectangle(image_faces, (x, y), (x + w, y + h), (0, 255, 0), 2)\nviewImage(image_faces, \"Lena with {} faces detected\".format(len(faces)))\n\n\ndef display_box(im, bbox):\n n_boxes = len(bbox)\n for j_box in range(n_boxes):\n for j in range(4):\n cv2.line(im,\n (int(bbox[j_box][j][0]), int(bbox[j_box][j][1])),\n (int(bbox[j_box][(j + 1) % 4][0]), int(bbox[j_box][(j + 1) % 4][1])),\n (255, 0, 0), 3)\n # Display results\n cv2.imshow(\"Results\", im)\n\n\ninputImage = cv2.imread(\"qrcode.jpg\")\nqrDecoder = cv2.QRCodeDetector()\ndata, bbox, rectifiedImage = qrDecoder.detectAndDecode(inputImage)\nif len(data) > 0:\n print(\"Decoded Data : {}\".format(data))\n display_box(inputImage, bbox)\n rectifiedImage = np.uint8(rectifiedImage)\n cv2.imshow(\"Rectified QRCode\", rectifiedImage)\nelse:\n print(\"QR Code not detected\")\n cv2.imshow(\"Results\", inputImage)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.ones",
"numpy.uint8"
]
] |
rish-16/gym-navmaze | [
"cc21d730ec6ab1e96a4a1a8f602a5bbb951d2929"
] | [
"src/cartpole.py"
] | [
"import numpy as np\nfrom collections import deque\nimport pickle\nimport torch\nfrom utils import collect_trajectories, random_sample\nfrom PPO import PPO\nimport matplotlib.pyplot as plt\nfrom parallelEnv import *\nimport gym\n\nenv = gym.make(\"CartPole-v0\")\nenv.reset()\nenv.seed(2)\n\nobs_dim = env.observation_space.shape[0]\nn_actions = env.action_space.n\nact_dist = [0 for i in range(n_actions)]\n\ndef train(episode, env_name):\n gamma = .99\n gae_lambda = 0.95\n use_gae = True\n beta = .01\n cliprange = 0.1\n best_score = -np.inf\n goal_score = 195.0\n ep_length = []\n\n nenvs = 1\n rollout_length = 200\n minibatches = 10*8\n nbatch = nenvs * rollout_length\n optimization_epochs = 4\n \n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n envs = parallelEnv(env_name, nenvs, seed=1234)\n agent = PPO(state_size=obs_dim,\n action_size=n_actions,\n seed=0,\n hidden_layers=[64,64],\n lr_policy=1e-4, \n use_reset=True,\n device=device)\n\n print(agent.policy)\n\n # keep track of progress\n mean_rewards = []\n scores_window = deque(maxlen=100)\n loss_storage = []\n\n for i_episode in range(episode+1):\n log_probs_old, states, actions, rewards, values, dones, vals_last, infos, ep_length = collect_trajectories(envs, act_dist, ep_length, agent.policy, rollout_length)\n\n returns = np.zeros_like(rewards)\n advantages = np.zeros_like(rewards)\n \n if not use_gae:\n for t in reversed(range(rollout_length)):\n if t == rollout_length - 1:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last\n else:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]\n advantages[t] = returns[t] - values[t]\n else:\n for t in reversed(range(rollout_length)):\n if t == rollout_length - 1:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last\n td_error = returns[t] - values[t]\n else:\n returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]\n td_error = rewards[t] + gamma * (1-dones[t]) * values[t+1] - values[t]\n advantages[t] = advantages[t] * gae_lambda * gamma * (1-dones[t]) + td_error\n \n # convert to pytorch tensors and move to gpu if available\n returns = torch.from_numpy(returns).float().to(device).view(-1,)\n advantages = torch.from_numpy(advantages).float().to(device).view(-1,)\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n \n for _ in range(optimization_epochs):\n sampler = random_sample(nbatch, minibatches)\n for inds in sampler:\n mb_log_probs_old = log_probs_old[inds]\n mb_states = states[inds]\n mb_actions = actions[inds]\n mb_returns = returns[inds]\n mb_advantages = advantages[inds]\n loss_p, loss_v, loss_ent = agent.update(mb_log_probs_old, mb_states, mb_actions, mb_returns, mb_advantages, cliprange=cliprange, beta=beta)\n loss_storage.append([loss_p, loss_v, loss_ent])\n \n total_rewards = np.sum(rewards, axis=0)\n scores_window.append(np.mean(total_rewards)) # last 100 scores\n mean_rewards.append(np.mean(total_rewards)) # get the average reward of the parallel environments\n cliprange *= 0.999 # the clipping parameter reduces as time goes on\n beta *= 0.999 # the regulation term reduces\n \n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n print(total_rewards)\n if np.mean(scores_window)>=goal_score and np.mean(scores_window)>=best_score: \n torch.save(agent.policy.state_dict(), \"policy_cartpole.pth\")\n best_score = np.mean(scores_window)\n \n return mean_rewards, loss_storage, act_dist, ep_length\n\nmean_rewards, loss, new_act_dist, ep_length = train(10000, 'CartPole-v0')\n\nprint (new_act_dist[-1])\nprint (ep_length)\n\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rcParams['font.size'] = 10\n\nplt.title(\"PPO + MLP + GAE for 10000 episodes\")\n\nplt.subplot(131)\nplt.plot(mean_rewards)\nplt.ylabel('Average score')\nplt.xlabel('Episode')\n\nplt.subplot(132)\nplt.plot(list(range(len(ep_length))), ep_length, color=\"red\")\nplt.ylabel('Episode Length')\nplt.xlabel('Episode')\n\nplt.subplot(133)\nplt.ylabel('Frequency')\nplt.xlabel('Actions')\nplt.bar(['Action {}'.format(i) for i in range(len(new_act_dist))], new_act_dist[-1])\n\nplt.show()"
] | [
[
"numpy.zeros_like",
"numpy.sum",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"torch.from_numpy",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
williamberrios/lofo-importance | [
"34967cf47dc1c2797d3a77f8926918ae91e4197a"
] | [
"lofo/infer_defaults.py"
] | [
"import numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom lightgbm import LGBMClassifier, LGBMRegressor\n\n\ndef infer_model(df, features, y, n_jobs):\n model_class = LGBMRegressor\n if len(np.unique(y)) == 2:\n y = LabelEncoder().fit_transform(y)\n model_class = LGBMClassifier\n\n categoricals = df[features].select_dtypes(exclude=[np.number]).columns.tolist()\n for f in categoricals:\n df[f] = LabelEncoder().fit_transform(df[f].apply(str))\n\n min_child_samples = int(0.01*df.shape[0])\n\n model = model_class(min_child_samples=min_child_samples, n_jobs=n_jobs)\n\n return model, df, categoricals, y\n"
] | [
[
"numpy.unique",
"sklearn.preprocessing.LabelEncoder"
]
] |
kolibril13/napari | [
"b39647d94e587f0255b0d4cc3087855e160a8929"
] | [
"napari/_vispy/overlays/axes.py"
] | [
"import numpy as np\nfrom vispy.scene.visuals import Compound, Line, Mesh, Text\nfrom vispy.visuals.transforms import STTransform\n\nfrom ...layers.shapes._shapes_utils import triangulate_ellipse\nfrom ...utils.colormaps.standardize_color import transform_color\nfrom ...utils.theme import get_theme\nfrom ...utils.translations import trans\n\n\ndef make_dashed_line(num_dashes, axis):\n \"\"\"Make a dashed line.\n\n Parameters\n ----------\n num_dashes : int\n Number of dashes in the line.\n axis : int\n Axis which is dashed.\n\n Returns\n -------\n np.ndarray\n Dashed line, of shape (num_dashes, 3) with zeros in\n the non dashed axes and line segments in the dashed\n axis.\n \"\"\"\n dashes = np.linspace(0, 1, num_dashes * 2)\n dashed_line_ends = np.concatenate(\n [[dashes[2 * i], dashes[2 * i + 1]] for i in range(num_dashes)], axis=0\n )\n dashed_line = np.zeros((2 * num_dashes, 3))\n dashed_line[:, axis] = np.array(dashed_line_ends)\n return dashed_line\n\n\ndef make_arrow_head(num_segments, axis):\n \"\"\"Make an arrowhead line.\n\n Parameters\n ----------\n num_segments : int\n Number of segments in the arrowhead.\n axis\n Arrowhead direction.\n\n Returns\n -------\n np.ndarray, np.ndarray\n Vertices and faces of the arrowhead.\n \"\"\"\n corners = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]]) * 0.1\n vertices, faces = triangulate_ellipse(corners, num_segments)\n full_vertices = np.zeros((num_segments + 1, 3))\n inds = list(range(3))\n inds.pop(axis)\n full_vertices[:, inds] = vertices\n full_vertices[:, axis] = 0.9\n full_vertices[0, axis] = 1.02\n return full_vertices, faces\n\n\ndef color_lines(colors):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 2],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 2, [colors[2]] * 2],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\ndef color_dashed_lines(colors):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 4 * 2],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [[colors[0]] * 2, [colors[1]] * 4 * 2, [colors[2]] * 8 * 2],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\ndef color_arrowheads(colors, num_segments):\n if len(colors) == 2:\n return np.concatenate(\n [[colors[0]] * num_segments, [colors[1]] * num_segments],\n axis=0,\n )\n elif len(colors) == 3:\n return np.concatenate(\n [\n [colors[0]] * num_segments,\n [colors[1]] * num_segments,\n [colors[2]] * num_segments,\n ],\n axis=0,\n )\n else:\n return ValueError(\n trans._(\n 'Either 2 or 3 colors must be provided, got {number}.',\n deferred=True,\n number=len(colors),\n )\n )\n\n\nclass VispyAxesOverlay:\n \"\"\"Axes indicating world coordinate origin and orientation.\"\"\"\n\n _NUM_SEGMENTS_ARROWHEAD = 100\n\n def __init__(self, viewer, parent=None, order=0):\n self._viewer = viewer\n self._scale = 1\n\n # Target axes length in canvas pixels\n self._target_length = 80\n # CMYRGB for 6 axes data in x, y, z, ... ordering\n self._default_color = [\n [0, 1, 1, 1],\n [1, 0, 1, 1],\n [1, 1, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n ]\n # Text offset from line end position\n self._text_offsets = 0.1 * np.array([1, 1, 1])\n\n # note order is x, y, z for VisPy\n self._line_data2D = np.array(\n [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0]]\n )\n self._line_data3D = np.array(\n [[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]]\n )\n\n # note order is x, y, z for VisPy\n self._dashed_line_data2D = np.concatenate(\n [[[1, 0, 0], [0, 0, 0]], make_dashed_line(4, axis=1)],\n axis=0,\n )\n self._dashed_line_data3D = np.concatenate(\n [\n [[1, 0, 0], [0, 0, 0]],\n make_dashed_line(4, axis=1),\n make_dashed_line(8, axis=2),\n ],\n axis=0,\n )\n\n # note order is x, y, z for VisPy\n vertices = np.empty((0, 3))\n faces = np.empty((0, 3))\n for axis in range(2):\n v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)\n faces = np.concatenate([faces, f + len(vertices)], axis=0)\n vertices = np.concatenate([vertices, v], axis=0)\n self._default_arrow_vertices2D = vertices\n self._default_arrow_faces2D = faces.astype(int)\n\n vertices = np.empty((0, 3))\n faces = np.empty((0, 3))\n for axis in range(3):\n v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)\n faces = np.concatenate([faces, f + len(vertices)], axis=0)\n vertices = np.concatenate([vertices, v], axis=0)\n self._default_arrow_vertices3D = vertices\n self._default_arrow_faces3D = faces.astype(int)\n\n self.node = Compound(\n [Line(connect='segments', method='gl', width=3), Mesh(), Text()],\n parent=parent,\n )\n self.node.transform = STTransform()\n self.node.order = order\n\n # Add a text node to display axes labels\n self.text_node = self.node._subvisuals[2]\n self.text_node.font_size = 10\n self.text_node.anchors = ('center', 'center')\n self.text_node.text = f'{1}'\n\n self.node.canvas._backend.destroyed.connect(self._set_canvas_none)\n # End Note\n\n self._viewer.events.theme.connect(self._on_data_change)\n self._viewer.axes.events.visible.connect(self._on_visible_change)\n self._viewer.axes.events.colored.connect(self._on_data_change)\n self._viewer.axes.events.dashed.connect(self._on_data_change)\n self._viewer.axes.events.labels.connect(self._on_data_change)\n self._viewer.axes.events.arrows.connect(self._on_data_change)\n self._viewer.dims.events.order.connect(self._on_data_change)\n self._viewer.dims.events.range.connect(self._on_data_change)\n self._viewer.dims.events.ndisplay.connect(self._on_data_change)\n self._viewer.dims.events.axis_labels.connect(self._on_data_change)\n self._viewer.camera.events.zoom.connect(self._on_zoom_change)\n\n self._on_visible_change(None)\n self._on_data_change(None)\n\n def _set_canvas_none(self):\n self.node._set_canvas(None)\n self.text_node._set_canvas(None)\n\n def _on_visible_change(self, event):\n \"\"\"Change visibiliy of axes.\"\"\"\n self.node.visible = self._viewer.axes.visible\n self._on_zoom_change(event)\n self._on_data_change(event)\n\n def _on_data_change(self, event):\n \"\"\"Change style of axes.\"\"\"\n if not self._viewer.axes.visible:\n return\n\n # Determine which axes are displayed\n axes = self._viewer.dims.displayed\n\n # Actual number of displayed dims\n ndisplay = len(self._viewer.dims.displayed)\n\n # Determine the labels of those axes\n axes_labels = [self._viewer.dims.axis_labels[a] for a in axes[::-1]]\n # Counting backwards from total number of dimensions\n # determine axes positions. This is done as by default\n # the last NumPy axis corresponds to the first Vispy axis\n reversed_axes = [self._viewer.dims.ndim - 1 - a for a in axes[::-1]]\n\n # Determine colors of axes based on reverse position\n if self._viewer.axes.colored:\n axes_colors = [\n self._default_color[ra % len(self._default_color)]\n for ra in reversed_axes\n ]\n else:\n # the reason for using the `as_hex` here is to avoid\n # `UserWarning` which is emitted when RGB values are above 1\n background_color = get_theme(\n self._viewer.theme, False\n ).canvas.as_hex()\n background_color = transform_color(background_color)[0]\n color = np.subtract(1, background_color)\n color[-1] = background_color[-1]\n axes_colors = [color] * ndisplay\n\n # Determine data based on number of displayed dimensions and\n # axes visualization parameters\n if self._viewer.axes.dashed and ndisplay == 2:\n data = self._dashed_line_data2D\n color = color_dashed_lines(axes_colors)\n text_data = self._line_data2D[1::2]\n elif self._viewer.axes.dashed and ndisplay == 3:\n data = self._dashed_line_data3D\n color = color_dashed_lines(axes_colors)\n text_data = self._line_data3D[1::2]\n elif not self._viewer.axes.dashed and ndisplay == 2:\n data = self._line_data2D\n color = color_lines(axes_colors)\n text_data = self._line_data2D[1::2]\n elif not self._viewer.axes.dashed and ndisplay == 3:\n data = self._line_data3D\n color = color_lines(axes_colors)\n text_data = self._line_data3D[1::2]\n else:\n raise ValueError(\n trans._(\n 'Axes dash status and ndisplay combination not supported',\n deferred=True,\n )\n )\n\n if self._viewer.axes.arrows and ndisplay == 2:\n arrow_vertices = self._default_arrow_vertices2D\n arrow_faces = self._default_arrow_faces2D\n arrow_color = color_arrowheads(\n axes_colors, self._NUM_SEGMENTS_ARROWHEAD\n )\n elif self._viewer.axes.arrows and ndisplay == 3:\n arrow_vertices = self._default_arrow_vertices3D\n arrow_faces = self._default_arrow_faces3D\n arrow_color = color_arrowheads(\n axes_colors, self._NUM_SEGMENTS_ARROWHEAD\n )\n else:\n arrow_vertices = np.zeros((3, 3))\n arrow_faces = np.array([[0, 1, 2]])\n arrow_color = [[0, 0, 0, 0]]\n\n self.node._subvisuals[0].set_data(data, color)\n self.node._subvisuals[1].set_data(\n vertices=arrow_vertices,\n faces=arrow_faces,\n face_colors=arrow_color,\n )\n\n # Set visibility status of text\n self.text_node.visible = (\n self._viewer.axes.visible and self._viewer.axes.labels\n )\n self.text_node.text = axes_labels\n self.text_node.color = axes_colors\n self.text_node.pos = text_data + self._text_offsets\n\n def _on_zoom_change(self, event):\n \"\"\"Update axes length based on zoom scale.\"\"\"\n if not self._viewer.axes.visible:\n return\n\n scale = 1 / self._viewer.camera.zoom\n\n # If scale has not changed, do not redraw\n if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:\n return\n self._scale = scale\n scale_canvas2world = self._scale\n target_canvas_pixels = self._target_length\n scale = target_canvas_pixels * scale_canvas2world\n # Update axes scale\n self.node.transform.scale = [scale, scale, scale, 1]\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"numpy.subtract",
"numpy.log10",
"numpy.array",
"numpy.concatenate",
"numpy.linspace"
]
] |
PaperCodeReview/MoCo-TF | [
"1ea01b2d005de3e030229f79a37135468fa1631e"
] | [
"dataloader.py"
] | [
"import os\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom augment import Augment\r\n\r\n\r\nAUTO = tf.data.experimental.AUTOTUNE\r\n\r\n\r\ndef set_dataset(task, data_path):\r\n trainset = pd.read_csv(\r\n os.path.join(\r\n data_path, 'imagenet_trainset.csv'\r\n )).values.tolist()\r\n trainset = [[os.path.join(data_path, t[0]), t[1]] for t in trainset]\r\n\r\n if task == 'lincls':\r\n valset = pd.read_csv(\r\n os.path.join(\r\n data_path, 'imagenet_valset.csv'\r\n )).values.tolist()\r\n valset = [[os.path.join(data_path, t[0]), t[1]] for t in valset]\r\n return np.array(trainset, dtype='object'), np.array(valset, dtype='object')\r\n\r\n return np.array(trainset, dtype='object')\r\n\r\n\r\nclass DataLoader:\r\n def __init__(self, args, mode, datalist, batch_size, num_workers=1, shuffle=True):\r\n self.args = args\r\n self.mode = mode\r\n self.datalist = datalist\r\n self.batch_size = batch_size\r\n self.num_workers = num_workers\r\n self.shuffle = shuffle\r\n\r\n self.dataloader = self._dataloader()\r\n\r\n def __len__(self):\r\n return len(self.datalist)\r\n\r\n def fetch_dataset(self, path, y=None):\r\n x = tf.io.read_file(path)\r\n if y is not None:\r\n return tf.data.Dataset.from_tensors((x, y))\r\n return tf.data.Dataset.from_tensors(x)\r\n\r\n def augmentation(self, img, shape):\r\n augset = Augment(self.args, self.mode)\r\n if self.args.task in ['v1', 'v2']:\r\n img_list = []\r\n for _ in range(2): # query, key\r\n aug_img = tf.identity(img)\r\n if self.args.task == 'v1':\r\n aug_img = augset._augmentv1(aug_img, shape) # moco v1\r\n else:\r\n radius = np.random.choice([3, 5])\r\n aug_img = augset._augmentv2(aug_img, shape, (radius, radius)) # moco v2\r\n img_list.append(aug_img)\r\n return img_list\r\n else:\r\n return augset._augment_lincls(img, shape)\r\n\r\n def dataset_parser(self, value, label=None):\r\n shape = tf.image.extract_jpeg_shape(value)\r\n img = tf.io.decode_jpeg(value, channels=3)\r\n if label is None:\r\n # moco\r\n query, key = self.augmentation(img, shape)\r\n inputs = {'query': query, 'key': key}\r\n labels = tf.zeros([])\r\n else:\r\n # lincls\r\n inputs = self.augmentation(img, shape)\r\n labels = tf.one_hot(label, self.args.classes)\r\n return (inputs, labels)\r\n\r\n def shuffle_BN(self, value, labels):\r\n if self.num_workers > 1:\r\n pre_shuffle = [(i, value['key'][i]) for i in range(self.batch_size)]\r\n random.shuffle(pre_shuffle)\r\n shuffle_idx = []\r\n value_temp = []\r\n for vv in pre_shuffle:\r\n shuffle_idx.append(vv[0])\r\n value_temp.append(tf.expand_dims(vv[1], axis=0))\r\n value['key'] = tf.concat(value_temp, axis=0)\r\n unshuffle_idx = np.array(shuffle_idx).argsort().tolist()\r\n value.update({'unshuffle': unshuffle_idx})\r\n return (value, labels)\r\n \r\n def _dataloader(self):\r\n self.imglist = self.datalist[:,0].tolist()\r\n if self.args.task in ['v1', 'v2']:\r\n dataset = tf.data.Dataset.from_tensor_slices(self.imglist)\r\n else:\r\n self.labellist = self.datalist[:,1].tolist()\r\n dataset = tf.data.Dataset.from_tensor_slices((self.imglist, self.labellist))\r\n\r\n dataset = dataset.repeat()\r\n if self.shuffle:\r\n dataset = dataset.shuffle(len(self.datalist))\r\n\r\n dataset = dataset.interleave(self.fetch_dataset, num_parallel_calls=AUTO)\r\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=AUTO)\r\n dataset = dataset.batch(self.batch_size)\r\n dataset = dataset.prefetch(AUTO)\r\n if self.args.shuffle_bn and self.args.task in ['v1', 'v2']:\r\n # only moco\r\n dataset = dataset.map(self.shuffle_BN, num_parallel_calls=AUTO)\r\n return dataset"
] | [
[
"tensorflow.io.decode_jpeg",
"tensorflow.zeros",
"tensorflow.identity",
"tensorflow.image.extract_jpeg_shape",
"tensorflow.expand_dims",
"numpy.random.choice",
"tensorflow.data.Dataset.from_tensors",
"tensorflow.one_hot",
"tensorflow.io.read_file",
"tensorflow.concat",
"numpy.array",
"tensorflow.data.Dataset.from_tensor_slices"
]
] |
leeamen/k_means | [
"dfa9cad22033c108e3988a99f4d58c685eb06921"
] | [
"user_modeling.py"
] | [
"#!/usr/bin/python\n#coding:utf-8\nimport numpy as np\nimport logging\nimport mylog\nimport mykmeans as ml\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.ERROR)\n\ndef str2num(s):\n a = ['very_low', 'Low', 'Middle', 'High']\n for i in range(0, len(a)):\n if a[i] == s:\n return float(i)\nif __name__ == '__main__':\n filename = './data/data_user_modeling.txt'\n train_data = np.loadtxt(filename, delimiter = ',', converters = {5:str2num})\n logger.debug(train_data)\n logger.debug(train_data.shape)\n\n train_x = train_data[:,0:-1]\n train_y = train_data[:,-1]\n logger.debug(train_x)\n logger.debug(train_y)\n\n param = {}\n param['use_random_for_k'] = 1\n param['k'] = [i for i in range(0, 258, 1)]\n param['n_clusters'] = 4\n param['max_iter'] = 100\n kmeans = ml.Kmeans(param)\n kmeans.Fit(train_x)\n# logger.debug(kmeans)\n pred = kmeans.Predict(train_x)\n logger.info('train_y:%s', train_y)\n logger.info(' pred:%s', pred)\n# logger.info('k-means准确率:%f', 1.0*sum(pred == train_y)/len(train_y))\n# ml.PickingRightK(train_x, param)\n import myplot\n myplot.Figure()\n ml.FitMulti(train_x, param, 100)\n ml.BisectingFitMulti(train_x, param, 100) \n myplot.Legend(['k-means','bisecting'])\n myplot.Title('user modeling')\n myplot.Show() \n"
] | [
[
"numpy.loadtxt"
]
] |
esnet/hps-rl | [
"8426652e622394a955a44c42201e2204f6bfa0f2"
] | [
"searchmethods/modularGA.py"
] | [
"\nimport numpy, random\n\nclass Individual:\n def __init__(self,genome, llimits =[], ulimits=[], type=[], LEN = 1,fitness_func = None):\n if genome is None:\n self.genome = numpy.zeros(LEN,dtype=float)\n for gene in range(LEN):\n if type[gene] == \"integer\":\n self.genome[gene] = numpy.random.randint(llimits[gene], ulimits[gene])\n else:\n self.genome[gene] = numpy.random.uniform(llimits[gene], ulimits[gene])\n else:\n self.genome = genome\n self.fitness = fitness_func(self.genome)\n\n def __str__(self):\n return \"\".join(str(int(i)) for i in self.genome)\n\n\ndef crossover(a, b, fitness):\n g, h = a.genome.copy(), b.genome.copy()\n for pt in range(len(g)):\n if numpy.random.random() < 0.5:\n g[pt], h[pt] = h[pt], g[pt]\n return (Individual(genome=g,fitness_func=fitness), Individual(genome=h,fitness_func=fitness))\n\ndef mutate(a, mut_prob,fitness):\n g = a.genome.copy()\n for pt in range(len(g)):\n if numpy.random.random() < mut_prob:\n g[pt] = not g[pt]\n return Individual(g,fitness_func=fitness)\n\n\ndef stats(pop, gen,threshold):\n best = max(pop, key=lambda x: x.fitness)\n print(\"{0} {1:.2f} {2} {3}\".format(gen, numpy.mean([i.fitness for i in pop]), best.fitness, str(best)))\n return (best.fitness >= threshold)\n\n\ndef roulette(items, n):\n total = float(sum(w.fitness for w in items))\n i = 0\n w, v = items[0].fitness, items[0]\n while n:\n x = total * (1 - numpy.random.random() ** (1.0 / n))\n total -= x\n while x > w:\n x -= w\n i += 1\n w, v = items[i].fitness, items[i]\n w -= x\n yield v\n n -= 1\n\n\ndef tournament(items, n, tsize=5):\n for i in range(n):\n candidates = random.sample(items, tsize)\n yield max(candidates, key=lambda x: x.fitness)\n\ndef step(pop,cross_prob,mut_prob,fitness):\n newpop = []\n parents = roulette(pop, len(pop) + 1) # one extra for final xover\n while len(newpop) < len(pop):\n if numpy.random.random() < cross_prob:\n newpop.extend(map(mutate, crossover(next(parents), next(parents),fitness=fitness),[mut_prob,mut_prob],[fitness,fitness]))\n else:\n newpop.append(mutate(next(parents),mut_prob=mut_prob,fitness=fitness))\n return newpop\n\n\ndef run(llimit, ulimit, type, GENERATIONS, CROSSOVER_PROB, POPSIZE, LEN, MUTATION_PROB,FITNESS,THRESHOLD):\n numpy.random.seed(100)\n pop = [Individual(None,llimit,ulimit,type,LEN,FITNESS) for i in range(POPSIZE)]\n print(pop)\n stats(pop, 0, THRESHOLD)\n for gen in range(1, GENERATIONS):\n pop = step(pop,CROSSOVER_PROB,MUTATION_PROB,FITNESS)\n if stats(pop, gen, THRESHOLD):\n print(\"Success\")\n\nllimit = [0.5,1e-6,1e-6,0]\nulimit = [1.5,0.1,0.1,3]\ntype = ['real','real','real','integer']\nLEN = 4\nFITNESS, SUCCESS_THRESHOLD = (numpy.sum, LEN)\nrun(llimit,ulimit,type,100,1,100,4,0.9,FITNESS,10)"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.random",
"numpy.random.randint",
"numpy.mean"
]
] |
Bruce-zxy/deep-study-lenet5 | [
"bba6531c9234c077107f79ff852f141cfed58229"
] | [
"data_creation.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport random\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport matplotlib.pyplot as plt\nfrom math import cos, sin, atan2, sqrt, pi, radians, degrees, ceil, isnan\nfrom skimage import io, transform\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\n\nTRAIN_CSV_PATH = './pointdata4/traindata/'\nTEST_CSV_PATH = './pointdata4/testdata/'\n\ndata_path = './h5/'\ntrain_file_path = data_path + 'initial_train_data.h5'\ntest_file_path = data_path + 'initial_test_data.h5'\n\n# 按旋转角度分类的子级目录\nlabel_dirs = [[16, 19], [43,71,129, 260], [95,128,129, 274]]\n# 按道路分类的父级目录\nlabel_set = [0, 1, 2]\n\n# 获取二维点集的中心点坐标\ndef get_centroid(point_set):\n c_x, c_y = zip(*point_set)\n centroid_x = sum(c_x)/len(c_x)\n centroid_y = sum(c_y)/len(c_y)\n return centroid_x, centroid_y\n\n# 逆时针旋转坐标点\n\n\ndef n_rotate(angle, valuex, valuey, centerx, centery):\n valuex = np.array(valuex)\n valuey = np.array(valuey)\n nRotatex = (valuex-centerx)*cos(angle) - \\\n (valuey-centery)*sin(angle) + centerx\n nRotatey = (valuex-centerx)*sin(angle) + \\\n (valuey-centery)*cos(angle) + centery\n return nRotatex, nRotatey\n\n# 获取csv文件的列表\n\n\ndef get_csv_list(path):\n csv_file_list = []\n file_list = os.listdir(path)\n for file_name in file_list:\n if file_name.endswith('csv'):\n csv_file_list.append(path + \"/\" + file_name)\n return csv_file_list\n\n# 获取csv文件中的点集数据\n\n\ndef get_csv_data(path_list):\n # 创建空的定维数组\n sum_data = np.empty([0, 1024, 2], dtype=np.float32)\n\n # 遍历每个csv文件\n for path in path_list:\n # 将每个csv文件读取为Numpy的数据\n data = np.genfromtxt(path, delimiter=',', dtype=np.float32)[:, :2]\n data_len = len(data)\n empty_len = 1024 - data_len\n\n # 完整的1024个元数据=csv文件数据+在csv文件中随机指定下标数据\n count = 0\n while count < empty_len:\n data = np.append(\n data, [data[random.randint(0, data_len-1)]], axis=0)\n count += 1\n sum_data = np.append(sum_data, [data], axis=0)\n print(sum_data.shape)\n return sum_data\n\n\n# 随机打乱点集数据\ndef exchange_data_index(sum_data, label_data):\n cursor_index = 0\n max_range = len(sum_data)\n while cursor_index < max_range:\n random_index = random.randint(0, max_range-1)\n temp_sum_data = sum_data[0]\n temp_label_data = label_data[0]\n\n sum_data = np.delete(sum_data, 0, axis=0)\n label_data = np.delete(label_data, 0, axis=0)\n sum_data = np.insert(sum_data, random_index, temp_sum_data, axis=0)\n label_data = np.insert(label_data, random_index,\n temp_label_data, axis=0)\n\n cursor_index += 1\n return sum_data, label_data\n\n\ndef get_label_and_data(root_path, label_dirs):\n sum_data = np.empty([0, 1024, 2], dtype=np.float32)\n typical_data = np.empty([0], dtype=np.int32)\n\n for data_type, label_dir_set in enumerate(label_dirs):\n print(\">> 现在进入【第%d类】数据\" % (data_type+1))\n for rotate_angle in label_dir_set:\n print(\"-- 需要旋转%d度的数据集:\" % (rotate_angle))\n # 获取csv文件列表\n csv_list = get_csv_list(\n root_path + str(data_type) + '/' + str(rotate_angle))\n # 获取csv文件点集数据\n csv_data = get_csv_data(csv_list)\n # 遍历样本数据\n for i, sample_data in enumerate(csv_data):\n # 求出点集的中心坐标点\n centroid_x, centroid_y = get_centroid(sample_data)\n # 根据中心坐标点旋转点集中的点\n \n for index, coordinate in enumerate(sample_data):\n x, y = coordinate\n n_x, n_y = n_rotate(\n radians(rotate_angle), x, y, centroid_x, centroid_y)\n # 旋转后的点集坐标中心化\n sample_data[index] = [n_x-centroid_x, n_y-centroid_y]\n # 旋转后的点集回归原列表\n csv_data[i] = sample_data\n # 归集点集标签\n typical_data = np.append(typical_data, [data_type], axis=0)\n # 将每个不同数量的样本合并到主列表中(n,1024,2)=>(m,n,1024,2)\n sum_data = np.append(sum_data, csv_data, axis=0)\n\n return sum_data, typical_data\n\n\nif __name__ == \"__main__\":\n\n sum_train_data, train_typical_data = get_label_and_data(\n TRAIN_CSV_PATH, label_dirs)\n sum_test_data, test_typical_data = get_label_and_data(\n TEST_CSV_PATH, label_dirs)\n\n # 随机打乱点集数据\n rand_sum_train_data, rand_train_typical_data = exchange_data_index(\n sum_train_data, train_typical_data)\n rand_sum_test_data, rand_test_typical_data = exchange_data_index(\n sum_test_data, test_typical_data)\n\n if os.access(data_path, os.F_OK) == False:\n os.mkdir(data_path)\n\n if os.access(train_file_path, os.F_OK) == True:\n os.remove(train_file_path)\n open(train_file_path, 'w')\n with h5py.File(train_file_path, 'r+') as f:\n f.create_dataset('data', data=rand_sum_train_data)\n f.create_dataset('label', data=rand_train_typical_data)\n\n if os.access(test_file_path, os.F_OK) == True:\n os.remove(test_file_path)\n open(test_file_path, 'w')\n with h5py.File(test_file_path, 'r+') as f:\n f.create_dataset('data', data=rand_sum_test_data)\n f.create_dataset('label', data=rand_test_typical_data)\n"
] | [
[
"numpy.append",
"numpy.empty",
"numpy.insert",
"numpy.delete",
"numpy.array",
"numpy.genfromtxt"
]
] |
skinnider/low-data-generative-models | [
"6e743b6d1ba3265f58fcbd33f2c60e633cf25999"
] | [
"python/train_model.py"
] | [
"\"\"\"\nTrain a language model to generate SMILES.\n\"\"\"\n\nimport argparse\nimport os\nimport numpy as np\nimport pandas as pd\nimport random\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n# suppress Chem.MolFromSmiles error output\nfrom rdkit import rdBase\nrdBase.DisableLog('rdApp.error')\n\n# set working directory\ngit_dir = os.path.expanduser(\"~/git/low-data-generative-models\")\npython_dir = git_dir + \"/python\"\nos.chdir(python_dir)\n\n# import classes\nfrom models import RNN, OneHotRNN, EarlyStopping\nfrom datasets import SmilesDataset, SelfiesDataset, SmilesCollate\nfrom functions import decrease_learning_rate, print_update, track_loss, \\\n sample_smiles, write_smiles\n\n### CLI\nparser = argparse.ArgumentParser(\n description='Chemical structure language model interface')\n# input file\nparser.add_argument('--smiles_file', type=str,\n help='location of the SMILES file to train on')\nparser.add_argument('--selfies', dest='selfies', action='store_true')\nparser.set_defaults(selfies=False)\n# output files\nparser.add_argument('--output_dir', type=str,\n help='directory to save trained models to')\n# RNN parameters\nparser.add_argument('--rnn_type', type=str, choices=['RNN', 'LSTM', 'GRU'],\n default='GRU', help='type of language model to train')\nparser.add_argument('--embedding_size', type=int, default=128,\n help='size of vocabulary embedding')\nparser.add_argument('--hidden_size', type=int, default=512,\n help='size of language model hidden layers')\nparser.add_argument('--n_layers', type=int, default=3,\n help='number of layers in language model')\nparser.add_argument('--dropout', type=float, default=0,\n help='amount of dropout (0-1) to apply to model')\nparser.add_argument('--bidirectional', type=bool, default=False,\n help='for LSTMs only, train a bidirectional model')\nparser.add_argument('--nonlinearity', type=str, choices=['tanh', 'relu'],\n default='tanh', help='for RNNs only, nonlinearity to use')\nparser.add_argument('--tie_weights', dest='tie_weights',\n help='require embedding/dense linear layers use the ' +\\\n 'same weights',\n action='store_true')\nparser.set_defaults(tie_weights=False)\n# optimization parameters\nparser.add_argument('--learning_rate', type=float, default=0.001,\n help='initial learning rate')\nparser.add_argument('--learning_rate_decay', default=None, # type=float,\n help='amount (0-1) to decrease learning rate by every ' +\\\n 'fixed number of steps')\nparser.add_argument('--learning_rate_decay_steps', default=10000, type=int,\n help='# of steps between learning rate decrements')\nparser.add_argument('--gradient_clip', default=None, # type=float,\n help='amount to which to clip the gradients')\n# training schedule\nparser.add_argument('--seed', type=int, default=0,\n help='seed for random number generator')\nparser.add_argument('--batch_size', type=int, default=128,\n help='batch size')\nparser.add_argument('--max_epochs', type=int, default=1000,\n help='maximum number of epochs to train for')\nparser.add_argument('--patience', type=int, default=100,\n help='patience for early stopping')\n# sampling from trained models\nparser.add_argument('--sample_idx', type=int, default=0,\n help='index of the model being trained (zero-indexed)')\nparser.add_argument('--sample_every_epochs', type=int,\n help='if set, sample SMILES from the trained model' +\n 'every n epochs')\nparser.add_argument('--sample_every_steps', type=int,\n help='if set, sample SMILES from the trained model' +\n 'every n steps')\nparser.add_argument('--log_every_epochs', type=int,\n help='log training/validation losses every n epochs')\nparser.add_argument('--log_every_steps', type=int,\n help='log training/validation losses every n steps')\nparser.add_argument('--sample_size', type=int, default=100000,\n help='size of each sample from the trained model')\n# start with pretrained model\nparser.add_argument('--pretrain_model', type=str, default=None,\n help='load parameters from a pretrained model')\n# enforce a larger vocabulary\nparser.add_argument('--vocab_file', type=str, default=None,\n help='file containing all tokens in vocabulary')\n# for use in grid\nparser.add_argument('--stop_if_exists', dest='stop_if_exists',\n action='store_true')\nparser.set_defaults(stop_if_exists=False)\n\n# parse arguments\nargs = parser.parse_args()\n\n# manually deal with gradient clipping\ntry:\n args.gradient_clip = float(args.gradient_clip)\nexcept (ValueError, TypeError):\n args.gradient_clip = None\n\n# manually deal with learning rate decay\ntry:\n args.learning_rate_decay = float(args.learning_rate_decay)\nexcept (ValueError, TypeError):\n args.learning_rate_decay = None\n\n# log args (make searching through logging directory easier)\nfor arg in vars(args):\n print(arg, \": \", getattr(args, arg), \"(\", type(getattr(args, arg)), \")\")\n\n# optionally stop if output file already exists\nif args.selfies:\n smiles_filename = \"sample-\" + str(args.sample_idx + 1) + \"-SELFIES.smi\"\nelse:\n smiles_filename = \"sample-\" + str(args.sample_idx + 1) + \"-SMILES.smi\"\nsmiles_file = os.path.join(args.output_dir, smiles_filename)\nif os.path.isfile(smiles_file) and args.stop_if_exists:\n print(\"output file \" + smiles_file + \" exists: stopping early\")\n sys.exit()\n\n# make output directories\nif not os.path.isdir(args.output_dir):\n try:\n os.makedirs(args.output_dir)\n except FileExistsError:\n pass\n\n## seed all RNGs\ntorch.manual_seed(args.seed)\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\nif torch.cuda.is_available():\n print(\"using cuda\")\n torch.cuda.manual_seed_all(args.seed)\n\n# set up dataset\nif args.selfies:\n dataset = SelfiesDataset(selfies_file=args.smiles_file)\nelse:\n dataset = SmilesDataset(smiles_file=args.smiles_file,\n vocab_file=args.vocab_file)\n\n# set up batching\nloader = DataLoader(dataset,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=SmilesCollate(dataset.vocabulary))\n\n# set up model\nif args.embedding_size > 0:\n model = RNN(vocabulary=dataset.vocabulary,\n rnn_type=args.rnn_type,\n embedding_size=args.embedding_size,\n hidden_size=args.hidden_size,\n n_layers=args.n_layers,\n dropout=args.dropout,\n bidirectional=args.bidirectional,\n tie_weights=args.tie_weights,\n nonlinearity=args.nonlinearity)\nelse:\n # no embedding layer (one-hot encoding)\n model = OneHotRNN(vocabulary=dataset.vocabulary,\n rnn_type=args.rnn_type,\n hidden_size=args.hidden_size,\n n_layers=args.n_layers,\n dropout=args.dropout,\n bidirectional=args.bidirectional,\n nonlinearity=args.nonlinearity)\n\n# optionally, load model parameters from file\nif args.pretrain_model is not None:\n model.load_state_dict(torch.load(args.pretrain_model))\n\n# set up optimizer\noptimizer = optim.Adam(model.parameters(),\n betas=(0.9, 0.999), ## default\n eps=1e-08, ## default\n lr=args.learning_rate)\n\n# set up early stopping\nearly_stop = EarlyStopping(patience=args.patience)\n\n# set up training schedule file\nsched_filename = \"training_schedule-\" + str(args.sample_idx + 1) + \".csv\"\nsched_file = os.path.join(args.output_dir, sched_filename)\n\n# iterate over epochs\ncounter = 0\nfor epoch in range(args.max_epochs):\n # iterate over batches\n for batch_idx, batch in tqdm(enumerate(loader), total=len(loader)):\n batch, lengths = batch\n\n # increment counter\n counter += 1\n\n # calculate loss\n log_p = model.loss(batch, lengths)\n loss = log_p.mean()\n\n # zero gradients, calculate new gradients, and take a step\n optimizer.zero_grad()\n loss.backward()\n # clip gradient\n if args.gradient_clip is not None:\n nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)\n\n optimizer.step()\n\n # check learning rate decay\n if args.learning_rate_decay is not None and \\\n counter % args.learning_rate_decay_steps == 0:\n decrease_learning_rate(optimizer,\n multiplier=args.learning_rate_decay)\n\n # print update and write training schedule?\n if args.log_every_steps is not None:\n if counter % args.log_every_steps == 0:\n print_update(model, dataset, epoch, batch_idx + 1, loss.item(),\n args.batch_size, selfies=args.selfies)\n track_loss(sched_file, model, dataset, epoch,\n counter, loss.item(), args.batch_size)\n\n # save SMILES?\n if args.sample_every_steps is not None:\n if counter % args.sample_every_steps == 0:\n sample_smiles(args.output_dir, args.sample_idx, model,\n args.sample_size, epoch, counter)\n\n # calculate validation loss\n validation, lengths = dataset.get_validation(args.batch_size)\n validation_loss = model.loss(validation, lengths).mean().detach()\n # check early stopping\n model_filename = \"model-\" + str(args.sample_idx + 1) + \".pt\"\n model_file = os.path.join(args.output_dir, model_filename)\n early_stop(validation_loss.item(), model, model_file, counter)\n\n if early_stop.stop:\n break\n\n # print update and write training schedule?\n if args.log_every_epochs is not None:\n print_update(model, dataset, epoch, 'NA', loss.item(), args.batch_size)\n track_loss(sched_file, model, dataset, epoch,\n counter, loss.item(), args.batch_size)\n\n # save SMILES?\n if args.sample_every_epochs is not None:\n sample_smiles(args.output_dir, args.sample_idx, model,\n args.sample_size, epoch, counter)\n\n if early_stop.stop:\n break\n\n# append information about final training step\nif args.log_every_epochs is not None or args.log_every_steps is not None:\n sched = pd.DataFrame({'epoch': [None],\n 'step': [early_stop.step_at_best],\n 'outcome': ['training loss'],\n 'value': [early_stop.best_loss]})\n sched.to_csv(sched_file, index=False, mode='a', header=False)\n\n# load the best model\nmodel.load_state_dict(torch.load(model_file))\nmodel.eval() ## enable evaluation modes\n\n# sample a set of SMILES from the final, trained model\nsampled_smiles = []\nwhile len(sampled_smiles) < args.sample_size:\n sampled_smiles.extend(model.sample(args.batch_size, return_smiles=True))\n\n# write sampled SMILES\nwrite_smiles(sampled_smiles, smiles_file)\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.load",
"torch.manual_seed",
"pandas.DataFrame",
"numpy.random.seed",
"torch.cuda.is_available"
]
] |
rlaehgns5399/GoogLeNet-Inception-tf | [
"eb9597634eec9a7b511e967ad8c7b2552563755f"
] | [
"src/helper/trainer.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: trainer.py\n# Author: Qian Ge <[email protected]>\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\n\ndef display(global_step,\n step,\n scaler_sum_list,\n name_list,\n collection,\n summary_val=None,\n summary_writer=None,\n ):\n print('[step: {}]'.format(global_step), end='')\n for val, name in zip(scaler_sum_list, name_list):\n print(' {}: {:.4f}'.format(name, val * 1. / step), end='')\n print('')\n if summary_writer is not None:\n s = tf.Summary()\n for val, name in zip(scaler_sum_list, name_list):\n s.value.add(tag='{}/{}'.format(collection, name),\n simple_value=val * 1. / step)\n summary_writer.add_summary(s, global_step)\n if summary_val is not None:\n summary_writer.add_summary(summary_val, global_step)\n\nclass Trainer(object):\n def __init__(self, train_model, valid_model, train_data, init_lr=1e-3):\n\n self._t_model = train_model\n self._v_model = valid_model\n self._train_data = train_data\n self._init_lr = init_lr\n\n self._train_op = train_model.get_train_op()\n self._train_loss_op = train_model.get_loss()\n self._train_accuracy_op = train_model.get_accuracy()\n\n self._valid_loss_op = valid_model.get_loss()\n self._valid_accuracy_op = valid_model.get_accuracy()\n # self._train_summary_op = train_model.get_train_summary()\n # self._valid_summary_op = train_model.get_valid_summary()\n\n self.global_step = 0\n self.epoch_id = 0\n\n def train_epoch(self, sess, keep_prob=1., summary_writer=None):\n if self.epoch_id < 35:\n self._lr = self._init_lr\n elif self.epoch_id < 50:\n self._lr = self._init_lr / 10.\n else:\n self._lr = self._init_lr / 100.\n # self._t_model.set_is_training(True)\n display_name_list = ['loss', 'accuracy']\n cur_summary = None\n\n cur_epoch = self._train_data.epochs_completed\n\n step = 0\n loss_sum = 0\n acc_sum = 0\n self.epoch_id += 1\n while cur_epoch == self._train_data.epochs_completed:\n self.global_step += 1\n step += 1\n\n batch_data = self._train_data.next_batch_dict()\n im = batch_data['image']\n label = batch_data['label']\n _, loss, acc = sess.run(\n [self._train_op, self._train_loss_op, self._train_accuracy_op], \n feed_dict={self._t_model.image: im,\n self._t_model.label: label,\n self._t_model.lr: self._lr,\n self._t_model.keep_prob: keep_prob})\n\n loss_sum += loss\n acc_sum += acc\n\n if step % 100 == 0 or step == 1:\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'train',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n\n print('==== epoch: {}, lr:{} ===='.format(cur_epoch, self._lr))\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'train',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n\n def valid_epoch(self, sess, dataflow, summary_writer=None):\n display_name_list = ['loss', 'accuracy']\n cur_summary = None\n dataflow.reset_epoch()\n\n step = 0\n loss_sum = 0\n acc_sum = 0\n while dataflow.epochs_completed < 1:\n step += 1\n\n batch_data = dataflow.next_batch_dict()\n im = batch_data['image']\n label = batch_data['label']\n loss, acc = sess.run(\n [self._valid_loss_op, self._valid_accuracy_op], \n feed_dict={self._v_model.image: im,\n self._v_model.label: label})\n\n loss_sum += loss\n acc_sum += acc\n\n print('[Valid]: ', end='')\n display(self.global_step,\n step,\n [loss_sum, acc_sum],\n display_name_list,\n 'valid',\n summary_val=cur_summary,\n summary_writer=summary_writer)\n"
] | [
[
"tensorflow.Summary"
]
] |
CalebEverett/fastai-dl2 | [
"64d23592eddca6ca1f3647e73c319e97c8eb392b"
] | [
"fastai/torch_imports.py"
] | [
"import os\nimport torch, torchvision, torchtext\nfrom torch import nn, cuda, backends, FloatTensor, LongTensor, optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, TensorDataset\nfrom torch.nn.init import kaiming_uniform, kaiming_normal\nfrom torchvision.transforms import Compose\nfrom torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152\nfrom torchvision.models import vgg16_bn, vgg19_bn\nfrom torchvision.models import densenet121, densenet161, densenet169, densenet201\n\nfrom .models.resnext_50_32x4d import resnext_50_32x4d\nfrom .models.resnext_101_32x4d import resnext_101_32x4d\nfrom .models.resnext_101_64x4d import resnext_101_64x4d\nfrom .models.wrn_50_2f import wrn_50_2f\nfrom .models.inceptionresnetv2 import InceptionResnetV2\nfrom .models.inceptionv4 import InceptionV4\nfrom .models.nasnet import nasnetalarge\n\nfrom unet_models import unet11\n\nimport warnings\nwarnings.filterwarnings('ignore', message='Implicit dimension choice', category=UserWarning)\n\ndef children(m): return m if isinstance(m, (list, tuple)) else list(m.children())\ndef save_model(m, p): torch.save(m.state_dict(), p)\ndef load_model(m, p): m.load_state_dict(torch.load(p, map_location=lambda storage, loc: storage))\n\ndef load_pre(pre, f, fn):\n m = f()\n path = os.path.dirname(__file__)\n if pre: load_model(m, f'{path}/weights/{fn}.pth')\n return m\n\ndef _fastai_model(name, paper_title, paper_href):\n def add_docs_wrapper(f):\n f.__doc__ = f\"\"\"{name} model from\n `\"{paper_title}\" <{paper_href}>`_\n\n Args:\n pre (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n return f\n return add_docs_wrapper\n\n@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',\n 'https://arxiv.org/pdf/1602.07261.pdf')\ndef inception_4(pre): return children(inceptionv4(pretrained=pre))[0]\n\n@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',\n 'https://arxiv.org/pdf/1602.07261.pdf')\ndef inceptionresnet_2(pre): return load_pre(pre, InceptionResnetV2, 'inceptionresnetv2-d579a627')\n\n@_fastai_model('ResNeXt 50', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext50(pre): return load_pre(pre, resnext_50_32x4d, 'resnext_50_32x4d')\n\n@_fastai_model('ResNeXt 101_32', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext101(pre): return load_pre(pre, resnext_101_32x4d, 'resnext_101_32x4d')\n\n@_fastai_model('ResNeXt 101_64', 'Aggregated Residual Transformations for Deep Neural Networks',\n 'https://arxiv.org/abs/1611.05431')\ndef resnext101_64(pre): return load_pre(pre, resnext_101_64x4d, 'resnext_101_64x4d')\n\n@_fastai_model('Wide Residual Networks', 'Wide Residual Networks',\n 'https://arxiv.org/pdf/1605.07146.pdf')\ndef wrn(pre): return load_pre(pre, wrn_50_2f, 'wrn_50_2f')\n\n@_fastai_model('Densenet-121', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn121(pre): return children(densenet121(pre))[0]\n\n@_fastai_model('Densenet-169', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn161(pre): return children(densenet161(pre))[0]\n\n@_fastai_model('Densenet-161', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn169(pre): return children(densenet169(pre))[0]\n\n@_fastai_model('Densenet-201', 'Densely Connected Convolutional Networks',\n 'https://arxiv.org/pdf/1608.06993.pdf')\ndef dn201(pre): return children(densenet201(pre))[0]\n\n@_fastai_model('Vgg-16 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',\n 'https://arxiv.org/pdf/1409.1556.pdf')\ndef vgg16(pre): return children(vgg16_bn(pre))[0]\n\n@_fastai_model('Vgg-19 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',\n 'https://arxiv.org/pdf/1409.1556.pdf')\ndef vgg19(pre): return children(vgg19_bn(pre))[0]\n\n@_fastai_model('Vgg-11 with U-Net', 'TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation',\n 'https://arxiv.org/pdf/1801.05746.pdf')\ndef ternausnet(pre): return children(unet11(pre))"
] | [
[
"torch.load"
]
] |
qq456cvb/CPPF | [
"79366978854ae18b14c69ac850ea64b9dc286081"
] | [
"models/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .sprin import GlobalInfoProp, SparseSO3Conv\nimport numpy as np\n\n\nclass ResLayer(torch.nn.Module):\n def __init__(self, dim_in, dim_out, bn=False) -> None:\n super().__init__()\n assert(bn is False)\n self.fc1 = torch.nn.Linear(dim_in, dim_out)\n if bn:\n self.bn1 = torch.nn.BatchNorm1d(dim_out)\n else:\n self.bn1 = lambda x: x\n self.fc2 = torch.nn.Linear(dim_out, dim_out)\n if bn:\n self.bn2 = torch.nn.BatchNorm1d(dim_out)\n else:\n self.bn2 = lambda x: x\n if dim_in != dim_out:\n self.fc0 = torch.nn.Linear(dim_in, dim_out)\n else:\n self.fc0 = None\n \n def forward(self, x):\n x_res = x if self.fc0 is None else self.fc0(x)\n x = F.relu(self.bn1(self.fc1(x)))\n x = self.bn2(self.fc2(x))\n return x + x_res\n\n \nclass PointEncoder(nn.Module):\n def __init__(self, k, spfcs, out_dim, num_layers=2, num_nbr_feats=2) -> None:\n super().__init__()\n self.k = k\n self.spconvs = nn.ModuleList()\n self.spconvs.append(SparseSO3Conv(32, num_nbr_feats, out_dim, *spfcs))\n self.aggrs = nn.ModuleList()\n self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))\n for _ in range(num_layers - 1):\n self.spconvs.append(SparseSO3Conv(32, out_dim + out_dim // 4, out_dim, *spfcs))\n self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))\n\n def forward(self, pc, pc_normal, dist):\n nbrs_idx = torch.topk(dist, self.k, largest=False, sorted=False)[1] #[..., N, K]\n pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]\n pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]\n pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)\n \n pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]\n pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)\n \n feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))\n for i in range(len(self.spconvs) - 1):\n spconv = self.spconvs[i + 1]\n aggr = self.aggrs[i + 1]\n feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))\n feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))\n return feat\n \n def forward_nbrs(self, pc, pc_normal, nbrs_idx):\n pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]\n pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]\n pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)\n \n pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]\n pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)\n \n feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))\n for i in range(len(self.spconvs) - 1):\n spconv = self.spconvs[i + 1]\n aggr = self.aggrs[i + 1]\n feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))\n feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))\n return feat\n\n\nclass PPFEncoder(nn.Module):\n def __init__(self, ppffcs, out_dim) -> None:\n super().__init__()\n self.res_layers = nn.ModuleList()\n for i in range(len(ppffcs) - 1):\n dim_in, dim_out = ppffcs[i], ppffcs[i + 1]\n self.res_layers.append(ResLayer(dim_in, dim_out, bn=False))\n self.final = nn.Linear(ppffcs[-1], out_dim)\n\n def forward(self, pc, pc_normal, feat, dist=None, idxs=None):\n if idxs is not None:\n return self.forward_with_idx(pc[0], pc_normal[0], feat[0], idxs)[None]\n xx = pc.unsqueeze(-2) - pc.unsqueeze(-3)\n xx_normed = xx / (dist[..., None] + 1e-7)\n\n outputs = []\n for idx in torch.chunk(torch.arange(pc.shape[1]), 5):\n feat_chunk = feat[..., idx, :]\n target_shape = [*feat_chunk.shape[:-2], feat_chunk.shape[-2], feat.shape[-2], feat_chunk.shape[-1]] # B x NC x N x F\n xx_normed_chunk = xx_normed[..., idx, :, :]\n ppf = torch.cat([\n torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * xx_normed_chunk, -1, keepdim=True), \n torch.sum(pc_normal.unsqueeze(-3) * xx_normed_chunk, -1, keepdim=True), \n torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * pc_normal.unsqueeze(-3), -1, keepdim=True), \n dist[..., idx, :, None],\n ], -1)\n # ppf.zero_()\n final_feat = torch.cat([feat_chunk[..., None, :].expand(*target_shape), feat[..., None, :, :].expand(*target_shape), ppf], -1)\n \n output = final_feat\n for res_layer in self.res_layers:\n output = res_layer(output)\n outputs.append(output)\n \n output = torch.cat(outputs, dim=-3)\n return self.final(output)\n\n def forward_with_idx(self, pc, pc_normal, feat, idxs):\n a_idxs = idxs[:, 0]\n b_idxs = idxs[:, 1]\n xy = pc[a_idxs] - pc[b_idxs]\n xy_norm = torch.norm(xy, dim=-1)\n xy_normed = xy / (xy_norm[..., None] + 1e-7)\n pnormal_cos = pc_normal[a_idxs] * pc_normal[b_idxs]\n ppf = torch.cat([\n torch.sum(pc_normal[a_idxs] * xy_normed, -1, keepdim=True),\n torch.sum(pc_normal[b_idxs] * xy_normed, -1, keepdim=True),\n torch.sum(pnormal_cos, -1, keepdim=True),\n xy_norm[..., None],\n ], -1)\n # ppf.zero_()\n \n final_feat = torch.cat([feat[a_idxs], feat[b_idxs], ppf], -1)\n \n output = final_feat\n for res_layer in self.res_layers:\n output = res_layer(output)\n return self.final(output)\n"
] | [
[
"torch.sum",
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.norm",
"torch.topk",
"torch.arange",
"torch.nn.ModuleList",
"torch.cat"
]
] |
Ram81/habitat-imitation-baselines | [
"c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505"
] | [
"habitat/tasks/nav/object_nav_task.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom typing import Any, List, Optional\n\nimport attr\nfrom cv2 import log\nimport numpy as np\nfrom gym import spaces\n\nfrom habitat.config import Config\nfrom habitat.core.dataset import SceneState\nfrom habitat.core.logging import logger\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import AgentState, Sensor, SensorTypes\nfrom habitat.core.utils import not_none_validator\nfrom habitat.tasks.nav.nav import (\n NavigationEpisode,\n NavigationGoal,\n NavigationTask\n)\n\ntry:\n from habitat.datasets.object_nav.object_nav_dataset import (\n ObjectNavDatasetV1,\n )\nexcept ImportError:\n pass\n\n\ntask_cat2mpcat40 = [\n 3, # ('chair', 2, 0)\n 5, # ('table', 4, 1)\n 6, # ('picture', 5, 2)\n 7, # ('cabinet', 6, 3)\n 8, # ('cushion', 7, 4)\n 10, # ('sofa', 9, 5),\n 11, # ('bed', 10, 6)\n 13, # ('chest_of_drawers', 12, 7),\n 14, # ('plant', 13, 8)\n 15, # ('sink', 14, 9)\n 18, # ('toilet', 17, 10),\n 19, # ('stool', 18, 11),\n 20, # ('towel', 19, 12)\n 22, # ('tv_monitor', 21, 13)\n 23, # ('shower', 22, 14)\n 25, # ('bathtub', 24, 15)\n 26, # ('counter', 25, 16),\n 27, # ('fireplace', 26, 17),\n 33, # ('gym_equipment', 32, 18),\n 34, # ('seating', 33, 19),\n 38, # ('clothes', 37, 20),\n 43, # ('foodstuff', 42, 21),\n 44, # ('stationery', 43, 22),\n 45, # ('fruit', 44, 23),\n 46, # ('plaything', 45, 24),\n 47, # ('hand_tool', 46, 25),\n 48, # ('game_equipment', 47, 26),\n 49, # ('kitchenware', 48, 27)\n]\n\nmapping_mpcat40_to_goal21 = {\n 3: 1,\n 5: 2,\n 6: 3,\n 7: 4,\n 8: 5,\n 10: 6,\n 11: 7,\n 13: 8,\n 14: 9,\n 15: 10,\n 18: 11,\n 19: 12,\n 20: 13,\n 22: 14,\n 23: 15,\n 25: 16,\n 26: 17,\n 27: 18,\n 33: 19,\n 34: 20,\n 38: 21,\n 43: 22, # ('foodstuff', 42, task_cat: 21)\n 44: 28, # ('stationery', 43, task_cat: 22)\n 45: 26, # ('fruit', 44, task_cat: 23)\n 46: 25, # ('plaything', 45, task_cat: 24)\n 47: 24, # ('hand_tool', 46, task_cat: 25)\n 48: 23, # ('game_equipment', 47, task_cat: 26)\n 49: 27, # ('kitchenware', 48, task_cat: 27)\n}\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass AgentStateSpec:\n r\"\"\"Agent data specifications that capture states of agent and sensor in replay state.\n \"\"\"\n position: Optional[List[float]] = attr.ib(default=None)\n rotation: Optional[List[float]] = attr.ib(default=None)\n sensor_data: Optional[dict] = attr.ib(default=None)\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ReplayActionSpec:\n r\"\"\"Replay specifications that capture metadata associated with action.\n \"\"\"\n action: str = attr.ib(default=None, validator=not_none_validator)\n agent_state: Optional[AgentStateSpec] = attr.ib(default=None)\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ObjectGoalNavEpisode(NavigationEpisode):\n r\"\"\"ObjectGoal Navigation Episode\n\n :param object_category: Category of the obect\n \"\"\"\n object_category: Optional[str] = None\n reference_replay: Optional[List[ReplayActionSpec]] = None\n scene_state: Optional[List[SceneState]] = None\n is_thda: Optional[bool] = False\n scene_dataset: Optional[str] = \"mp3d\"\n\n @property\n def goals_key(self) -> str:\n r\"\"\"The key to retrieve the goals\"\"\"\n return f\"{os.path.basename(self.scene_id)}_{self.object_category}\"\n\n\[email protected](auto_attribs=True)\nclass ObjectViewLocation:\n r\"\"\"ObjectViewLocation provides information about a position around an object goal\n usually that is navigable and the object is visible with specific agent\n configuration that episode's dataset was created.\n that is target for\n navigation. That can be specify object_id, position and object\n category. An important part for metrics calculation are view points that\n describe success area for the navigation.\n\n Args:\n agent_state: navigable AgentState with a position and a rotation where\n the object is visible.\n iou: an intersection of a union of the object and a rectangle in the\n center of view. This metric is used to evaluate how good is the object\n view form current position. Higher iou means better view, iou equals\n 1.0 if whole object is inside of the rectangle and no pixel inside\n the rectangle belongs to anything except the object.\n \"\"\"\n agent_state: AgentState\n iou: Optional[float]\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass ObjectGoal(NavigationGoal):\n r\"\"\"Object goal provides information about an object that is target for\n navigation. That can be specify object_id, position and object\n category. An important part for metrics calculation are view points that\n describe success area for the navigation.\n\n Args:\n object_id: id that can be used to retrieve object from the semantic\n scene annotation\n object_name: name of the object\n object_category: object category name usually similar to scene semantic\n categories\n room_id: id of a room where object is located, can be used to retrieve\n room from the semantic scene annotation\n room_name: name of the room, where object is located\n view_points: navigable positions around the object with specified\n proximity of the object surface used for navigation metrics calculation.\n The object is visible from these positions.\n \"\"\"\n\n object_id: str = attr.ib(default=None, validator=not_none_validator)\n object_name: Optional[str] = None\n object_name_id: Optional[int] = None\n object_category: Optional[str] = None\n room_id: Optional[str] = None\n room_name: Optional[str] = None\n view_points: Optional[List[ObjectViewLocation]] = None\n\n\[email protected]_sensor\nclass ObjectGoalSensor(Sensor):\n r\"\"\"A sensor for Object Goal specification as observations which is used in\n ObjectGoal Navigation. The goal is expected to be specified by object_id or\n semantic category id.\n For the agent in simulator the forward direction is along negative-z.\n In polar coordinate format the angle returned is azimuth to the goal.\n Args:\n sim: a reference to the simulator for calculating task observations.\n config: a config for the ObjectGoalSensor sensor. Can contain field\n GOAL_SPEC that specifies which id use for goal specification,\n GOAL_SPEC_MAX_VAL the maximum object_id possible used for\n observation space definition.\n dataset: a Object Goal navigation dataset that contains dictionaries\n of categories id to text mapping.\n \"\"\"\n cls_uuid: str = \"objectgoal\"\n\n def __init__(\n self,\n sim,\n config: Config,\n dataset: \"ObjectNavDatasetV1\",\n *args: Any,\n **kwargs: Any,\n ):\n self._sim = sim\n self._dataset = dataset\n super().__init__(config=config)\n\n def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return self.cls_uuid\n\n def _get_sensor_type(self, *args: Any, **kwargs: Any):\n return SensorTypes.SEMANTIC\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n sensor_shape = (1,)\n max_value = self.config.GOAL_SPEC_MAX_VAL - 1\n if self.config.GOAL_SPEC == \"TASK_CATEGORY_ID\":\n max_value = max(\n self._dataset.category_to_task_category_id.values()\n )\n logger.info(\"max object cat: {}\".format(max_value))\n logger.info(\"cats: {}\".format(self._dataset.category_to_task_category_id.values()))\n\n return spaces.Box(\n low=0, high=max_value, shape=sensor_shape, dtype=np.int64\n )\n\n def get_observation(\n self,\n observations,\n *args: Any,\n episode: ObjectGoalNavEpisode,\n **kwargs: Any,\n ) -> Optional[int]:\n\n if len(episode.goals) == 0:\n logger.error(\n f\"No goal specified for episode {episode.episode_id}.\"\n )\n return None\n if not isinstance(episode.goals[0], ObjectGoal):\n logger.error(\n f\"First goal should be ObjectGoal, episode {episode.episode_id}.\"\n )\n return None\n category_name = episode.object_category\n if self.config.GOAL_SPEC == \"TASK_CATEGORY_ID\":\n return np.array(\n [self._dataset.category_to_task_category_id[category_name]],\n dtype=np.int64,\n )\n elif self.config.GOAL_SPEC == \"OBJECT_ID\":\n obj_goal = episode.goals[0]\n assert isinstance(obj_goal, ObjectGoal) # for type checking\n return np.array([obj_goal.object_name_id], dtype=np.int64)\n else:\n raise RuntimeError(\n \"Wrong GOAL_SPEC specified for ObjectGoalSensor.\"\n )\n\n\[email protected]_task(name=\"ObjectNav-v1\")\nclass ObjectNavigationTask(NavigationTask):\n r\"\"\"An Object Navigation Task class for a task specific methods.\n Used to explicitly state a type of the task in config.\n \"\"\"\n _is_episode_active: bool\n _prev_action: int\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self._is_episode_active = False\n\n def overwrite_sim_config(self, sim_config, episode):\n super().overwrite_sim_config(sim_config, episode)\n\n sim_config.defrost()\n sim_config.scene_state = episode.scene_state\n sim_config.freeze()\n \n return sim_config\n\n def _check_episode_is_active(self, action, *args: Any, **kwargs: Any) -> bool:\n return not getattr(self, \"is_stop_called\", False)\n"
] | [
[
"numpy.array"
]
] |
ehwa009/Eye_Motion_Dataset | [
"42a1c897dc4209c6bb2de94c915ab36995855202"
] | [
"run_preprocessing.py"
] | [
"import pickle\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport math\n\nfrom tqdm import tqdm\nfrom sklearn import decomposition\n\nCENTER_X = int(960 / 3 / 2)\nCENTER_Y = int(540 / 3 / 2)\n\n# CENTER_X = 0\n# CENTER_Y = 0\n\n\ndef load_data(path, data_size=None):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n if data_size != -1:\n dataset = data[:data_size]\n else:\n dataset = data[:]\n return dataset\n\n\ndef save_data(path, data):\n with open(path, 'wb') as f:\n pickle.dump(data, f)\n\n\n'''\nfilling empty coordination, \nrelocate landmark position, \nand filtering landmarks which have abnormal pulpil coordination \n'''\ndef run_fill_filter(eye_dataset):\n for ed in tqdm(eye_dataset):\n # preprocessing landmarks\n # print('[INFO] Current video: {}'.format(ed['vid']))\n for clip_info in ed['clip_info']:\n landmarks = clip_info['landmarks']\n filled_landmarks = []\n for landmark in landmarks:\n ci_df = pd.DataFrame(np.array(landmark))\n ci_df = ci_df.replace(0, np.nan)\n ci_df = ci_df.fillna(method='ffill') # fill NaN values in dataset\n ci_df = ci_df.rolling(3).mean() # moving average filtering\n temp_lm = []\n for landmark in ci_df.values.tolist(): \n filled = [int(lm) for lm in landmark if not(np.isnan(lm))]\n if len(filled) == 50:\n # centering\n diff_x = CENTER_X - filled[48]\n diff_y = CENTER_Y - filled[49]\n for f_i in range(0, len(filled), 2):\n filled[f_i] += diff_x\n filled[f_i+1] += diff_y\n # check right pupil is outside of eye region\n condition1 = filled[0] > filled[4] and filled[0] < filled[10]\n condition2 = filled[1] > filled[7] and filled[1] > filled[9]\n condition3 = filled[1] < filled[13] and filled[1] < filled[14]\n if condition1 and condition2 and condition3:\n temp_lm.append(filled)\n filled_landmarks.append(temp_lm)\n clip_info['landmarks'] = filled_landmarks\n \n return eye_dataset\n\n\n'''\nNormalize eye expression motion scale over whole dataset.\nTo avoid pulpil dislocation, we use same vector on right and left pulpil.\n'''\ndef run_normalization(eye_dataset):\n eb_standard_len = 100\n\n def get_dist(x1, y1, x2, y2):\n return np.sqrt((x1-x2) ** 2 + (y1- y2) ** 2)\n\n def get_theta(var_x, var_y, fix_x, fix_y):\n return math.atan2(var_y - fix_y, var_x - fix_x)\n\n def get_new_coor(theta, dist, point):\n return dist * np.array([math.cos(theta), \n math.sin(theta)]) + np.array([point[0], point[1]])\n \n def run_len_norm(var_x, var_y, fix_x, fix_y, expected_len):\n angle = get_theta(var_x, var_y, fix_x, fix_y)\n new_coor = get_new_coor(angle, expected_len, [fix_x, fix_y])\n return new_coor\n\n for ed in tqdm(eye_dataset):\n # preprocessing landmarks\n # print('[INFO] Current video: {}'.format(ed['vid']))\n for clip_info in ed['clip_info']:\n tmp_landmarks = []\n for landmark in clip_info['landmarks']:\n tmp_landmark = []\n for lm in landmark:\n # calculate different ratio with standard length\n right_len_ratio = eb_standard_len / get_dist(lm[46], lm[47], lm[48], lm[49])\n left_len_ratio = eb_standard_len / get_dist(lm[28], lm[29], lm[48], lm[49])\n len_ratio = (right_len_ratio + left_len_ratio) / 2\n fix_x, fix_y = lm[48], lm[49]\n new_coor_list = []\n for lm_i in range(0, len(lm[:48]), 2):\n new_coor = run_len_norm(lm[lm_i], lm[lm_i+1], fix_x, fix_y,\n get_dist(lm[lm_i], lm[lm_i+1], fix_x, fix_y) * len_ratio)\n new_coor_list += [int(new_coor[0]), int(new_coor[1])]\n # pupil preprocessing\n right_theta = get_theta(lm[0], lm[1], lm[6], lm[7])\n right_dist = get_dist(lm[0], lm[1], lm[6], lm[7])\n left_new_pulpil = get_new_coor(right_theta, right_dist, [lm[18], lm[19]])\n lm[2] = int(left_new_pulpil[0])\n lm[3] = int(left_new_pulpil[1])\n new_coor_list += [fix_x, fix_y]\n tmp_landmark.append(new_coor_list) \n tmp_landmarks.append(tmp_landmark)\n clip_info['landmarks'] = tmp_landmarks\n \n return eye_dataset\n\n\n'''\nRun PCA.\nWe set 7 components to run pca.\n'''\ndef run_estimator(eye_dataset, opt):\n landmark_list = []\n for ed in eye_dataset:\n for clip_info in ed['clip_info']:\n for clip_landmarks in clip_info['landmarks']:\n for landmarks in clip_landmarks:\n landmark_list.append(landmarks)\n\n landmark_array = np.array(landmark_list)\n n_samples, n_features = landmark_array.shape\n print('[INFO] n_samples:{}, n_features:{}'.format(n_samples, n_features))\n print('[INFO] Estimated running time: {:0.2f} hrs with {} fps'.format(n_samples/opt.fps/60/60, opt.fps))\n\n data = landmark_array[:, :-2]\n estimator = decomposition.PCA(opt.n_components, svd_solver='randomized', whiten=True)\n estimator.fit(data)\n var_ratio = estimator.explained_variance_ratio_\n print('[INFO] {} number of components explain {:0.2f} of original dataset.'.format(opt.n_components, np.sum(var_ratio)))\n print('[INFO] Without first and seconde axis, rest of hyperplain consists of {:0.2f} of original dataset.'.format(np.sum(var_ratio[3:])))\n \n return estimator\n\n\n'''\nBased on learned PCA eigen vectors (7 hyperplanes that can explain original dataset),\nWe transform 50 dimention to 7 dimention to represent eye expression.\nDue to first and second egien vectors represent rotating motion in our pca space,\nwe make these values to zero.\n'''\ndef run_transform(eye_dataset, estimator, opt):\n for ed in tqdm(eye_dataset):\n for clip_info in ed['clip_info']:\n landmarks = clip_info['landmarks']\n transformed_landmarks = []\n for landmark in landmarks:\n tmp_trans = []\n for lm in landmark:\n transformed_array = estimator.transform(np.array([lm[:-2]]))\n transformed_list = transformed_array.tolist()[0]\n if opt.is_rotation_killed: # we killed pca hyperplanes which have a rotation\n # transformed_list[0] = int(transformed_list[0]/3)\n # transformed_list[1] = int(transformed_list[1]/3)\n transformed_list[0] = 0\n transformed_list[1] = 0\n tmp_trans.append(transformed_list)\n transformed_landmarks.append(tmp_trans)\n clip_info['landmarks'] = transformed_landmarks\n \n return eye_dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-dataset_path', default='./dataset')\n parser.add_argument('-data_size', type=int, default=-1) # -1 means whole dataset\n parser.add_argument('-fps', type=int, default=10)\n parser.add_argument('-n_components', type=int, default=7)\n parser.add_argument('-is_rotation_killed', type=bool, default=True)\n\n opt = parser.parse_args()\n\n eye_dataset = load_data('{}/eye_motion_dataset.pickle'.format(opt.dataset_path), opt.data_size)\n print('[INFO] Dataset length: {}'.format(len(eye_dataset)))\n \n print('[INFO] Filling, filtering and centering is now processing.')\n eye_dataset = run_fill_filter(eye_dataset)\n\n print('[INFO] Normalization is now processing.')\n eye_dataset = run_normalization(eye_dataset)\n\n print('[INFO] Estimator is now running.')\n estimator = run_estimator(eye_dataset, opt)\n\n print('[INFO] Landmarks are now transforming.')\n eye_dataset = run_transform(eye_dataset, estimator, opt)\n \n # save processed dataset\n processed_dataset = {'eye_dataset': eye_dataset,\n 'estimator': estimator,\n }\n save_path = '{}/processed_eye_motion_dataset_pca_{}.pickle'.format(opt.dataset_path, estimator.n_components)\n print('[INFO] Save preprocessed dataset at {}'.format(save_path))\n save_data(save_path, processed_dataset)\n \n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.isnan",
"numpy.array",
"sklearn.decomposition.PCA"
]
] |
FischbachLab/hCom_variable_regions | [
"6f1108c461a7e31964d1d81a83c03b9f4dad4c76"
] | [
"summarize_clstr_table.py"
] | [
"#!/usr/bin/env python3\n## How many clusters have more than one organisms as it's members\nimport sys\nimport pandas as pd\nimport logging\n\n\ndef main():\n clstr_table = sys.argv[1]\n output = sys.argv[2]\n\n clstr_df = pd.read_table(clstr_table, header=0)\n clstr_df[\"organism\"] = clstr_df[\"id\"].apply(lambda x: x.split(\":\")[2].split(\"_\")[0])\n\n summ_df = clstr_df.groupby(\"clstr\").agg(\n num_organisms=(\"organism\", pd.Series.nunique), organism_list=(\"organism\", set)\n )\n\n close_strains = set()\n for row in summ_df.query(\"num_organisms > 1\").itertuples(index=False):\n close_strains.update(row.organism_list)\n\n logging.info(\n f\"There are {len(close_strains)} strains in the community for which another strain exists with an identical V3-V4 region\"\n )\n\n summ_df[\"organism_list\"] = summ_df[\"organism_list\"].apply(\n lambda x: \"; \".join(set(x))\n )\n summ_df = summ_df.sort_values(\"num_organisms\", ascending=False)\n\n summ_df.to_csv(output)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s\\t[%(levelname)s]:\\t%(message)s\",\n )\n main()\n"
] | [
[
"pandas.read_table"
]
] |
ZZIQIN/FATE | [
"cc6783927564cbb15c067d5010f1cdf82a5de20a"
] | [
"federatedml/ftl/hetero_ftl/hetero_ftl_host.py"
] | [
"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\n\nimport numpy as np\n\nfrom arch.api.utils import log_utils\nfrom federatedml.evaluation import Evaluation\nfrom federatedml.ftl.data_util.common_data_util import overlapping_samples_converter, load_model_parameters, \\\n save_model_parameters, create_table, convert_instance_table_to_dict, convert_instance_table_to_array, \\\n add_random_mask_for_list_of_values, remove_random_mask_from_list_of_values\nfrom federatedml.ftl.data_util.log_util import create_shape_msg\nfrom federatedml.ftl.eggroll_computation.helper import decrypt_matrix\nfrom federatedml.ftl.encrypted_ftl import EncryptedFTLHostModel\nfrom federatedml.ftl.encryption.encryption import generate_encryption_key_pair, decrypt_scalar, decrypt_array\nfrom federatedml.ftl.faster_encrypted_ftl import FasterEncryptedFTLHostModel\nfrom federatedml.ftl.hetero_ftl.hetero_ftl_base import HeteroFTLParty\nfrom federatedml.ftl.plain_ftl import PlainFTLHostModel\nfrom federatedml.param.param import FTLModelParam\nfrom federatedml.util import consts\nfrom federatedml.util.transfer_variable import HeteroFTLTransferVariable\n\nLOGGER = log_utils.getLogger()\n\n\nclass HeteroFTLHost(HeteroFTLParty):\n\n def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroFTLHost, self).__init__()\n self.host_model = host\n self.model_param = model_param\n self.transfer_variable = transfer_variable\n self.max_iter = model_param.max_iter\n self.n_iter_ = 0\n\n def prepare_data(self, host_data):\n LOGGER.info(\"@ start host prepare data\")\n host_features_dict, _, host_sample_indexes = convert_instance_table_to_dict(host_data)\n host_sample_indexes = np.array(host_sample_indexes)\n\n self._do_remote(host_sample_indexes,\n name=self.transfer_variable.host_sample_indexes.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_sample_indexes),\n role=consts.GUEST,\n idx=-1)\n\n guest_sample_indexes = self._do_get(name=self.transfer_variable.guest_sample_indexes.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_sample_indexes),\n idx=-1)[0]\n\n host_features, overlap_indexes, _ = overlapping_samples_converter(host_features_dict, host_sample_indexes,\n guest_sample_indexes)\n return host_features, overlap_indexes\n\n def classified(self, prob_table, threshold):\n \"\"\"\n convert a probability table into a predicted class table.\n \"\"\"\n predict_table = prob_table.mapValues(lambda x: 1 if x > threshold else 0)\n return predict_table\n\n def evaluate(self, labels, pred_prob, pred_labels, evaluate_param):\n LOGGER.info(\"@ start host evaluate\")\n predict_res = None\n if evaluate_param.classi_type == consts.BINARY:\n predict_res = pred_prob\n elif evaluate_param.classi_type == consts.MULTY:\n predict_res = pred_labels\n else:\n LOGGER.warning(\"unknown classification type, return None as evaluation results\")\n\n eva = Evaluation(evaluate_param.classi_type)\n eva_report = eva.report(labels, predict_res, evaluate_param.metrics, evaluate_param.thresholds,\n evaluate_param.pos_label)\n\n LOGGER.info(\"@ evaluation report:\" + str(eva_report))\n return eva_report\n\n def predict(self, host_data, predict_param):\n LOGGER.info(\"@ start host predict\")\n features, labels, instances_indexes = convert_instance_table_to_array(host_data)\n host_x = np.squeeze(features)\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n\n host_prob = self.host_model.predict(host_x)\n self._do_remote(host_prob,\n name=self.transfer_variable.host_prob.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.host_prob),\n role=consts.GUEST, idx=-1)\n\n pred_prob = self._do_get(name=self.transfer_variable.pred_prob.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.pred_prob),\n idx=-1)[0]\n\n pred_prob = np.squeeze(pred_prob)\n LOGGER.debug(\"pred_prob: \" + str(pred_prob.shape))\n\n pred_prob_table = create_table(pred_prob, instances_indexes)\n actual_label_table = create_table(labels, instances_indexes)\n pred_label_table = self.classified(pred_prob_table, predict_param.threshold)\n if predict_param.with_proba:\n predict_result = actual_label_table.join(pred_prob_table, lambda label, prob: (label if label > 0 else 0, prob))\n predict_result = predict_result.join(pred_label_table, lambda x, y: (x[0], x[1], y))\n else:\n predict_result = actual_label_table.join(pred_label_table, lambda a_label, p_label: (a_label, None, p_label))\n return predict_result\n\n def load_model(self, model_table_name, model_namespace):\n LOGGER.info(\"@ load host model from name/ns\" + \", \" + str(model_table_name) + \", \" + str(model_namespace))\n model_parameters = load_model_parameters(model_table_name, model_namespace)\n self.host_model.restore_model(model_parameters)\n\n def save_model(self, model_table_name, model_namespace):\n LOGGER.info(\"@ save host model to name/ns\" + \", \" + str(model_table_name) + \", \" + str(model_namespace))\n _ = save_model_parameters(self.host_model.get_model_parameters(), model_table_name, model_namespace)\n\n\nclass HeteroPlainFTLHost(HeteroFTLHost):\n\n def __init__(self, host: PlainFTLHostModel, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroPlainFTLHost, self).__init__(host, model_param, transfer_variable)\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n while self.n_iter_ < self.max_iter:\n host_comp = self.host_model.send_components()\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n\n self.host_model.receive_components(guest_comp)\n\n is_stop = self._do_get(name=self.transfer_variable.is_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n\n\"\"\"\nCentralized encryption scheme with an arbiter in the loop for decryption.\n\"\"\"\n\n\nclass HeteroEncryptFTLHost(HeteroFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: EncryptedFTLHostModel = host\n\n def _precompute(self):\n pass\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n # get public key from arbiter\n public_key = self._do_get(name=self.transfer_variable.paillier_pubkey.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey),\n idx=-1)[0]\n\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n self.host_model.set_public_key(public_key)\n\n start_time = time.time()\n while self.n_iter_ < self.max_iter:\n host_comp = self.host_model.send_components()\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_components(guest_comp)\n\n self._precompute()\n\n encrypt_host_gradients = self.host_model.send_gradients()\n self._do_remote(encrypt_host_gradients, name=self.transfer_variable.encrypt_host_gradient.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.encrypt_host_gradient, self.n_iter_),\n role=consts.ARBITER,\n idx=-1)\n\n decrypt_host_gradients = self._do_get(name=self.transfer_variable.decrypt_host_gradient.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.decrypt_host_gradient, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_gradients(decrypt_host_gradients)\n\n is_stop = self._do_get(name=self.transfer_variable.is_encrypted_ftl_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_encrypted_ftl_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n end_time = time.time()\n LOGGER.info(\"@ running time: \" + str(end_time - start_time))\n\n\nclass FasterHeteroEncryptFTLHost(HeteroEncryptFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(FasterHeteroEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: FasterEncryptedFTLHostModel = host\n\n def _precompute(self):\n LOGGER.info(\"@ start host precompute\")\n\n host_precomputed_comp = self.host_model.send_precomputed_components()\n self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_precomputed_components(guest_precomputed_comp)\n\n\n\"\"\"\nDecentralized encryption scheme without arbiter in the loop.\n\"\"\"\n\n\nclass HeteroDecentralizedEncryptFTLHost(HeteroFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(HeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: EncryptedFTLHostModel = host\n self.public_key = None\n self.private_key = None\n self.guest_public_key = None\n\n def _precompute(self):\n pass\n\n def prepare_encryption_key_pair(self):\n LOGGER.info(\"@ start host prepare encryption key pair\")\n\n self.public_key, self.private_key = generate_encryption_key_pair()\n # exchange public_key with guest\n self._do_remote(self.public_key, name=self.transfer_variable.host_public_key.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_public_key,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n self.guest_public_key = self._do_get(name=self.transfer_variable.guest_public_key.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_public_key, self.n_iter_),\n idx=-1)[0]\n\n def fit(self, host_data):\n LOGGER.info(\"@ start host fit\")\n self.prepare_encryption_key_pair()\n host_x, overlap_indexes = self.prepare_data(host_data)\n\n LOGGER.debug(\"host_x: \" + str(host_x.shape))\n LOGGER.debug(\"overlap_indexes: \" + str(len(overlap_indexes)))\n\n self.host_model.set_batch(host_x, overlap_indexes)\n self.host_model.set_public_key(self.public_key)\n self.host_model.set_guest_public_key(self.guest_public_key)\n self.host_model.set_private_key(self.private_key)\n\n start_time = time.time()\n while self.n_iter_ < self.max_iter:\n\n # Stage 1: compute and encrypt components (using host public key) required by guest to\n # calculate gradients and loss.\n LOGGER.debug(\"@ Stage 1: \")\n host_comp = self.host_model.send_components()\n LOGGER.debug(\"send enc host_comp: \" + create_shape_msg(host_comp))\n self._do_remote(host_comp, name=self.transfer_variable.host_component_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_component_list, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 2: receive guest components in encrypted form (encrypted by guest public key),\n # and calculate host gradients in encrypted form (encrypted by guest public key),\n # and send them to guest for decryption\n LOGGER.debug(\"@ Stage 2: \")\n guest_comp = self._do_get(name=self.transfer_variable.guest_component_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_component_list, self.n_iter_),\n idx=-1)[0]\n LOGGER.debug(\"receive enc guest_comp: \" + create_shape_msg(guest_comp))\n self.host_model.receive_components(guest_comp)\n\n self._precompute()\n\n # calculate host gradients in encrypted form (encrypted by guest public key)\n encrypt_host_gradients = self.host_model.send_gradients()\n LOGGER.debug(\"send encrypt_guest_gradients: \" + create_shape_msg(encrypt_host_gradients))\n\n # add random mask to encrypt_host_gradients and send them to guest for decryption\n masked_enc_host_gradients, gradients_masks = add_random_mask_for_list_of_values(encrypt_host_gradients)\n\n LOGGER.debug(\"send masked_enc_host_gradients: \" + create_shape_msg(masked_enc_host_gradients))\n self._do_remote(masked_enc_host_gradients, name=self.transfer_variable.masked_enc_host_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_host_gradients, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 3: receive and then decrypt masked encrypted guest gradients and masked encrypted guest loss,\n # and send them to guest\n LOGGER.debug(\"@ Stage 3: \")\n masked_enc_guest_gradients = self._do_get(name=self.transfer_variable.masked_enc_guest_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_guest_gradients, self.n_iter_),\n idx=-1)[0]\n\n masked_enc_guest_loss = self._do_get(name=self.transfer_variable.masked_enc_loss.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_enc_loss, self.n_iter_),\n idx=-1)[0]\n\n masked_dec_guest_gradients = self.__decrypt_gradients(masked_enc_guest_gradients)\n masked_dec_guest_loss = self.__decrypt_loss(masked_enc_guest_loss)\n\n LOGGER.debug(\"send masked_dec_guest_gradients: \" + create_shape_msg(masked_dec_guest_gradients))\n self._do_remote(masked_dec_guest_gradients, name=self.transfer_variable.masked_dec_guest_gradients.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_guest_gradients, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n LOGGER.debug(\"send masked_dec_guest_loss: \" + str(masked_dec_guest_loss))\n self._do_remote(masked_dec_guest_loss, name=self.transfer_variable.masked_dec_loss.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.masked_dec_loss, self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n # Stage 4: receive masked but decrypted host gradients from guest and remove mask,\n # and update host model parameters using these gradients.\n LOGGER.debug(\"@ Stage 4: \")\n masked_dec_host_gradients = self._do_get(name=self.transfer_variable.masked_dec_host_gradients.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.masked_dec_host_gradients, self.n_iter_),\n idx=-1)[0]\n LOGGER.debug(\"receive masked_dec_host_gradients: \" + create_shape_msg(masked_dec_host_gradients))\n\n cleared_dec_host_gradients = remove_random_mask_from_list_of_values(masked_dec_host_gradients, gradients_masks)\n\n # update host model parameters using these gradients.\n self.host_model.receive_gradients(cleared_dec_host_gradients)\n\n # Stage 5: determine whether training is terminated.\n LOGGER.debug(\"@ Stage 5: \")\n is_stop = self._do_get(name=self.transfer_variable.is_decentralized_enc_ftl_stopped.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_decentralized_enc_ftl_stopped, self.n_iter_),\n idx=-1)[0]\n\n LOGGER.info(\"@ time: \" + str(time.time()) + \", ep: \" + str(self.n_iter_) + \", converged: \" + str(is_stop))\n self.n_iter_ += 1\n if is_stop:\n break\n\n end_time = time.time()\n LOGGER.info(\"@ running time: \" + str(end_time - start_time))\n\n def __decrypt_gradients(self, encrypt_gradients):\n return decrypt_matrix(self.private_key, encrypt_gradients[0]), decrypt_array(self.private_key, encrypt_gradients[1])\n\n def __decrypt_loss(self, encrypt_loss):\n return decrypt_scalar(self.private_key, encrypt_loss)\n\n\nclass FasterHeteroDecentralizedEncryptFTLHost(HeteroDecentralizedEncryptFTLHost):\n\n def __init__(self, host, model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable):\n super(FasterHeteroDecentralizedEncryptFTLHost, self).__init__(host, model_param, transfer_variable)\n self.host_model: FasterEncryptedFTLHostModel = host\n\n def _precompute(self):\n LOGGER.debug(\"@ start precompute\")\n\n host_precomputed_comp = self.host_model.send_precomputed_components()\n self._do_remote(host_precomputed_comp, name=self.transfer_variable.host_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_precomputed_comp_list,\n self.n_iter_),\n role=consts.GUEST,\n idx=-1)\n\n guest_precomputed_comp = self._do_get(name=self.transfer_variable.guest_precomputed_comp_list.name,\n tag=self.transfer_variable.generate_transferid(\n self.transfer_variable.guest_precomputed_comp_list, self.n_iter_),\n idx=-1)[0]\n self.host_model.receive_precomputed_components(guest_precomputed_comp)\n\n\nclass HostFactory(object):\n\n @classmethod\n def create(cls, ftl_model_param: FTLModelParam, transfer_variable: HeteroFTLTransferVariable, ftl_local_model):\n if ftl_model_param.is_encrypt:\n if ftl_model_param.enc_ftl == \"dct_enc_ftl\":\n # decentralized encrypted ftl host\n LOGGER.debug(\"@ create decentralized encrypted ftl_host\")\n host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n elif ftl_model_param.enc_ftl == \"dct_enc_ftl2\":\n # decentralized encrypted faster ftl host\n LOGGER.debug(\"@ create decentralized encrypted faster ftl_host\")\n host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = FasterHeteroDecentralizedEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n elif ftl_model_param.enc_ftl == \"enc_ftl2\":\n # encrypted faster ftl host\n LOGGER.debug(\"@ create encrypted faster ftl_host\")\n host_model = FasterEncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = FasterHeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n else:\n # encrypted ftl host\n LOGGER.debug(\"@ create encrypted ftl_host\")\n host_model = EncryptedFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroEncryptFTLHost(host_model, ftl_model_param, transfer_variable)\n\n else:\n # plain ftl host\n LOGGER.debug(\"@ create plain ftl_host\")\n host_model = PlainFTLHostModel(local_model=ftl_local_model, model_param=ftl_model_param)\n host = HeteroPlainFTLHost(host_model, ftl_model_param, transfer_variable)\n return host\n\n\n"
] | [
[
"numpy.array",
"numpy.squeeze"
]
] |
warlock8hz/h5pyViewer | [
"4955aa6fdd66255738bd86d7b8947282133c5b82"
] | [
"h5pyViewer/FrmPyFAI.py"
] | [
"#!/usr/bin/env python\n#*-----------------------------------------------------------------------*\n#| |\n#| Copyright (c) 2013 by Paul Scherrer Institute (http://www.psi.ch) |\n#| |\n#| Author Thierry Zamofing ([email protected]) |\n#*-----------------------------------------------------------------------*\n'''\nimplements an image view to show a colored image of a hdf5 dataset.\n'''\n\nif __name__ == '__main__':\n #Used to guarantee to use at least Wx2.8\n import wxversion\n wxversion.ensureMinimal('2.8')\nimport wx\nimport matplotlib as mpl\nif __name__ == '__main__':\n mpl.use('WXAgg')\n #or mpl.use('WX')\n #matplotlib.get_backend()\n\nimport os,h5py\nimport numpy as np\nimport utilities as ut\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nimport os,h5py\nfrom GLCanvasImg import *\nimport pyFAI\nfrom hdfImageGL import HdfImageGLFrame\nfrom glumpy.image.texture import Texture\nfrom scipy import ndimage as ndi\n\ndef FindCenter(arr):\n m=ndi.median_filter(arr, 5)\n sx=m.sum(1)\n sy=m.sum(0)\n shape=arr.shape\n xx=np.arange(shape[0])\n yy=np.arange(shape[1])\n x=(xx*sx).sum()/sx.sum()\n y=(yy*sy).sum()/sy.sum()\n #print x,y\n #import pylab as plt #used for the colormaps\n #plt.figure()\n #plt.subplot(211)\n #plt.plot(sx)\n #plt.subplot(212)\n #plt.plot(sy)\n #plt.show(block=False)\n return (x,y)\n\nclass MPLCanvasPyFAI1D(FigureCanvas):\n def __init__(self,parent,SetStatusCB=None):\n if SetStatusCB:\n self.SetStatusCB=SetStatusCB\n fig = mpl.figure.Figure()\n ax = fig.add_axes([0.075,0.1,0.75,0.85])\n FigureCanvas.__init__(self,parent, -1, fig)\n #self.mpl_connect('motion_notify_event', self.OnMotion)\n #self.mpl_connect('button_press_event', self.OnBtnPress)\n #self.mpl_connect('button_release_event', self.OnBtnRelease)\n #self.mpl_connect('scroll_event', self.OnBtnScroll)\n #self.mpl_connect('key_press_event',self.OnKeyPress)\n self.fig=fig\n self.ax=ax\n\n def InitChild(self,data):\n fig=self.fig\n ax=self.ax\n ctrX,ctrY=self.center=FindCenter(data)\n self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)\n #canvas=self.canvas\n self.numPtTh=int(np.average(data.shape)/2.)\n out=self.ai.xrpd(data,self.numPtTh)\n self.hl=ax.plot(*out)\n ax.set_yscale('log')\n #canvas.data=imgPolar\n #print imgPolar.shape\n #out=ai.xrpd(imgData,1000)\n #out=ai.xrpd_OpenCL(imgData,1000)\n #import pylab\n #pylab.plot(*out)\n #pylab.yscale(\"log\")\n #pylab.show()\n\nclass HdfPyFAI1DFrame(wx.Frame):\n def __init__(self, parent,lbl,hid):\n wx.Frame.__init__(self, parent, title=lbl, size=wx.Size(850, 650))\n imgDir=ut.Path.GetImage()\n icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n\n t=type(hid)\n if t==h5py.h5d.DatasetID:\n data=h5py.Dataset(hid)\n\n canvas = MPLCanvasPyFAI1D(self,self.SetStatusCB)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(sizer)\n\n toolbar=ut.AddToolbar(canvas,sizer)\n\n wxAxCtrlLst=[]\n l=len(data.shape)\n idxXY=(l-2,l-1)\n for idx,l in enumerate(data.shape):\n if idx in idxXY:\n continue\n wxAxCtrl=ut.SliderGroup(self, label='Axis:%d'%idx,range=(0,l-1))\n wxAxCtrl.idx=idx\n wxAxCtrlLst.append(wxAxCtrl)\n sizer.Add(wxAxCtrl.sizer, 0, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, border=5)\n wxAxCtrl.SetCallback(HdfPyFAI1DFrame.OnSetView,wxAxCtrl)\n\n sl=ut.GetSlice(idxXY,data.shape,wxAxCtrlLst)\n\n canvas.InitChild(data[sl])\n\n #self.Fit()\n self.Centre()\n\n self.BuildMenu()\n self.canvas=canvas\n self.sizer=sizer\n self.toolbar=toolbar\n self.data=data\n self.idxXY=idxXY\n self.wxAxCtrlLst=wxAxCtrlLst\n\n def BuildMenu(self):\n mnBar = wx.MenuBar()\n\n #-------- Edit Menu --------\n mn = wx.Menu()\n #mnItem=mn.Append(wx.ID_ANY, 'Setup Colormap', 'Setup the color mapping ');self.Bind(wx.EVT_MENU, self.OnColmapSetup, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Linear Mapping', 'Use a linear values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLin, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Log Mapping', 'Use a logarithmic values to color mapping ');self.Bind(wx.EVT_MENU, self.OnMapLog, mnItem)\n #mnItem=mn.Append(wx.ID_ANY, 'Invert X-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n #self.mnIDxAxis=mnItem.GetId()\n #mnItem=mn.Append(wx.ID_ANY, 'Invert Y-Axis', kind=wx.ITEM_CHECK);self.Bind(wx.EVT_MENU, self.OnInvertAxis, mnItem)\n mnBar.Append(mn, '&Edit')\n mn = wx.Menu()\n #mnItem=mn.Append(wx.ID_ANY, 'Help', 'How to use the image viewer');self.Bind(wx.EVT_MENU, self.OnHelp, mnItem)\n mnBar.Append(mn, '&Help')\n\n self.SetMenuBar(mnBar)\n self.CreateStatusBar()\n\n def SetIdxXY(self,x,y):\n self.idxXY=(x,y)\n\n @staticmethod\n def SetStatusCB(obj,mode,v):\n if mode==0:\n obj.SetStatusText( \"x= %d y=%d val=%g\"%v,0)\n elif mode==1:\n obj.SetStatusText( \"Colormap Value %d (drag to scale)\"%v,0)\n else:\n raise KeyError('wrong mode')\n\n @staticmethod\n def OnSetView(usrData,value,msg):\n 'called when a slice is selected with the slider controls'\n imgFrm=usrData.slider.Parent\n #imgFrm.img.set_array(imgFrm.data[usrData.value,...])\n data=imgFrm.data\n sl=ut.GetSlice(imgFrm.idxXY,data.shape,imgFrm.wxAxCtrlLst)\n\n hl=imgFrm.canvas.hl\n ai=imgFrm.canvas.ai\n numPtTh=imgFrm.canvas.numPtTh\n out=ai.xrpd(data[sl],numPtTh)\n hl[0].set_ydata(out[1])\n imgFrm.canvas.draw()\n pass\n\n\n###########################################\n\nclass HdfPyFAIFrame(HdfImageGLFrame):\n def __init__(self, parent, title, hid):\n HdfImageGLFrame.__init__(self, parent, title, hid)\n #HdfPyFAI1DFrame(self, title, hid)\n canvas=self.canvas\n raw=canvas.data\n ctrX,ctrY=FindCenter(raw)\n self.ai = pyFAI.AzimuthalIntegrator(1.e3, ctrX, ctrY, 0.0, 0.0, 0.0, 1.e0, 1.e0)\n\n raw\n self.numPtTh=int(np.average(raw.shape)/2.)\n self.numPtCh=360\n\n imgPolar,theta,chi=self.ai.xrpd2(raw,self.numPtTh,self.numPtCh)\n canvas.data=imgPolar\n print (imgPolar.shape)\n\n def BuildMenu(self):\n HdfImageGLFrame.BuildMenu(self)\n mnBar=self.GetMenuBar()\n mn=mnBar.GetMenu(0)\n itemLst=mn.GetMenuItems()\n it=itemLst[0]\n it.GetItemLabel()\n mnItem=mn.Append(wx.ID_ANY, 'Setup FAI', 'Setup fast azimutal integration ');self.Bind(wx.EVT_MENU, self.OnFAISetup, mnItem)\n\n @staticmethod\n def OnSetView(usrData,value,msg):\n 'called when a slice is selected with the slider controls'\n frm=usrData.slider.Parent\n ds=frm.dataSet\n canvas=frm.canvas\n glImg=canvas.glImg\n sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)\n imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)\n canvas.data[:]=imgPolar[:]\n glImg.data[:]=canvas.GetTxrData()\n glImg.update()\n canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !\n #canvas.Refresh(False)\n #canvas.Update()\n pass\n\n def OnFAISetup(self, event):\n dlg=DlgSetupPyFAI(self)\n if dlg.ShowModal()==wx.ID_OK:\n pass\n dlg.Destroy()\n\n\nclass DlgSetupPyFAI(wx.Dialog):\n def __init__(self,parent):\n wx.Dialog.__init__(self,parent,-1,'pyFAI Setup')\n ai=parent.ai\n #glColBar=parent.glColBar\n #dataRange=parent.dataRange\n txtCtrX=wx.StaticText(self,-1,'center X')\n txtCtrY=wx.StaticText(self,-1,'center Y')\n txtNumPtTh=wx.StaticText(self,-1,'number of pt in Theta')\n txtNumPtCh=wx.StaticText(self,-1,'number of pt in Chi')\n txtMethod=wx.StaticText(self,-1,'method')\n\n\n\n self.edCtrX=edCtrX=wx.TextCtrl(self,-1,'%g'%ai.get_poni1(),style=wx.TE_PROCESS_ENTER)\n self.edCtrY=edCtrY=wx.TextCtrl(self,-1,'%g'%ai.get_poni2(),style=wx.TE_PROCESS_ENTER)\n self.edNumPtTh=edNumPtTh=wx.TextCtrl(self,-1,'%g'%parent.numPtTh,style=wx.TE_PROCESS_ENTER)\n self.edNumPtCh=edNumPtCh=wx.TextCtrl(self,-1,'%g'%parent.numPtCh,style=wx.TE_PROCESS_ENTER)\n self.cbMethod=cbMethod=wx.ComboBox(self, -1, choices=('default','numny'), style=wx.CB_READONLY)\n #cbtxrFunc.SetSelection(parent.txrTrfFunc)\n\n sizer=wx.BoxSizer(wx.VERTICAL)\n fgs=wx.FlexGridSizer(5,2,5,5)\n fgs.Add(txtCtrX,0,wx.ALIGN_RIGHT)\n fgs.Add(edCtrX,0,wx.EXPAND)\n fgs.Add(txtCtrY,0,wx.ALIGN_RIGHT)\n fgs.Add(edCtrY,0,wx.EXPAND)\n fgs.Add(txtNumPtTh,0,wx.ALIGN_RIGHT)\n fgs.Add(edNumPtTh,0,wx.EXPAND)\n fgs.Add(txtNumPtCh,0,wx.ALIGN_RIGHT)\n fgs.Add(edNumPtCh,0,wx.EXPAND)\n fgs.Add(txtMethod,0,wx.ALIGN_RIGHT)\n fgs.Add(cbMethod,0,wx.EXPAND)\n sizer.Add(fgs,0,wx.EXPAND|wx.ALL,5)\n\n #edVMin.SetFocus()\n\n btns = self.CreateButtonSizer(wx.OK|wx.CANCEL)\n btnApply=wx.Button(self, -1, 'Apply')\n btns.Add(btnApply, 0, wx.ALL, 5)\n sizer.Add(btns,0,wx.EXPAND|wx.ALL,5)\n self.Bind(wx.EVT_BUTTON, self.OnModify, id=wx.ID_OK)\n self.Bind(wx.EVT_BUTTON, self.OnModify, btnApply)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edCtrX)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edCtrY)\n #self.Bind(wx.EVT_TEXT, self.OnModify, edNumSector)\n self.Bind(wx.EVT_COMBOBOX, self.OnModify, cbMethod)\n self.SetSizer(sizer)\n sizer.Fit(self)\n\n def OnModify(self, event):\n print ('OnModify')\n frm=self.GetParent()\n ds=frm.dataSet\n canvas=frm.canvas\n glImg=canvas.glImg\n ai=frm.ai\n ai.set_poni1(float(self.edCtrX.Value))\n ai.set_poni2(float(self.edCtrY.Value))\n frm.numPtTh=int(self.edNumPtTh.Value)\n frm.numPtCh=int(self.edNumPtCh.Value)\n sl=ut.GetSlice(frm.idxXY,ds.shape,frm.wxAxCtrlLst)\n imgPolar,theta,chi=frm.ai.xrpd2(ds[sl],frm.numPtTh,frm.numPtCh)\n if canvas.data.shape==imgPolar.shape:\n canvas.data[:]=imgPolar[:]\n glImg.data[:]=canvas.GetTxrData()\n else:\n canvas.data=imgPolar;\n glImg._data=canvas.GetTxrData()\n glImg._texture=Texture(glImg._data)\n #self.glImg=glImg=glumpy.image.Image(txrData, colormap=colMap,vmin=txrRng[0], vmax=txrRng[1])\n print (canvas.data.shape,glImg.data.shape)\n glImg.update()\n canvas.OnPaint(None)#force to repaint, Refresh and Update do not force !\n frm.Refresh(False)\n if event.GetId()==wx.ID_OK:\n event.Skip()#do not consume (use event to close the window and sent return code)\n\n\nif __name__ == '__main__':\n import os,sys,argparse #since python 2.7\n def GetParser(required=True):\n fnHDF='/scratch/detectorData/e14472_00033.hdf5'\n #lbl='mcs'\n lbl='pilatus_1'\n #lbl='spec'\n elem='/entry/data/'+lbl\n exampleCmd='--hdfFile='+fnHDF+' --elem='+elem\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=__doc__,\n epilog='Example:\\n'+os.path.basename(sys.argv[0])+' '+exampleCmd+'\\n ')\n parser.add_argument('--hdfFile', required=required, default=fnHDF, help='the hdf5 to show')\n parser.add_argument('--elem', required=required, default=elem, help='the path to the element in the hdf5 file')\n return parser\n args = parser.parse_args()\n return args\n\n class App(wx.App):\n def OnInit(self):\n parser=GetParser()\n #parser=GetParser(False) # debug with exampleCmd\n args = parser.parse_args()\n try:\n self.fid=fid=h5py.h5f.open(args.hdfFile)\n except IOError as e:\n sys.stderr.write('Unable to open File: '+args.hdfFile+'\\n')\n parser.print_usage(sys.stderr)\n return True\n try:\n hid = h5py.h5o.open(fid,args.elem)\n except KeyError as e:\n sys.stderr.write('Unable to open Object: '+args.elem+'\\n')\n parser.print_usage(sys.stderr)\n return True\n frame = HdfPyFAIFrame(None,args.elem,hid)\n #frame = HdfPyFAI1DFrame(None,args.elem,hid)\n frame.Show()\n self.SetTopWindow(frame)\n return True\n\n def OnExit(self):\n self.fid.close()\n\n ut.StopWatch.Start()\n app = App()\n app.MainLoop()\n"
] | [
[
"scipy.ndimage.median_filter",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg.__init__",
"numpy.arange",
"matplotlib.use",
"numpy.average"
]
] |
abbasegbeyemi/pyqtgraph | [
"6aeafce477d1d7eebb9d2fe824d4c5573ef9ceed"
] | [
"examples/optics/pyoptic.py"
] | [
"# -*- coding: utf-8 -*-\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport csv, gzip, os\nfrom pyqtgraph import Point\n\nclass GlassDB:\n \"\"\"\n Database of dispersion coefficients for Schott glasses\n + Corning 7980\n \"\"\"\n def __init__(self, fileName='schott_glasses.csv'):\n path = os.path.dirname(__file__)\n fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb')\n r = csv.reader(map(str, fh.readlines()))\n lines = [x for x in r]\n self.data = {}\n header = lines[0]\n for l in lines[1:]:\n info = {}\n for i in range(1, len(l)):\n info[header[i]] = l[i]\n self.data[l[0]] = info\n self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog.\n 'B1': 0.68374049400,\n 'B2': 0.42032361300,\n 'B3': 0.58502748000,\n 'C1': 0.00460352869,\n 'C2': 0.01339688560,\n 'C3': 64.49327320000,\n 'TAUI25/250': 0.95, ## transmission data is fabricated, but close.\n 'TAUI25/1400': 0.98,\n }\n \n for k in self.data:\n self.data[k]['ior_cache'] = {}\n \n\n def ior(self, glass, wl):\n \"\"\"\n Return the index of refraction for *glass* at wavelength *wl*.\n \n The *glass* argument must be a key in self.data.\n \"\"\"\n info = self.data[glass]\n cache = info['ior_cache']\n if wl not in cache:\n B = list(map(float, [info['B1'], info['B2'], info['B3']]))\n C = list(map(float, [info['C1'], info['C2'], info['C3']]))\n w2 = (wl/1000.)**2\n n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2])))\n cache[wl] = n\n return cache[wl]\n \n def transmissionCurve(self, glass):\n data = self.data[glass]\n keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x]\n keys.sort()\n curve = np.empty((2,len(keys)))\n for i in range(len(keys)):\n curve[0][i] = keys[i]\n key = 'TAUI25/%d' % keys[i]\n val = data[key]\n if val == '':\n val = 0\n else:\n val = float(val)\n curve[1][i] = val\n return curve\n \n\nGLASSDB = GlassDB()\n\n\ndef wlPen(wl):\n \"\"\"Return a pen representing the given wavelength\"\"\"\n l1 = 400\n l2 = 700\n hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8)\n val = 1.0\n if wl > 700:\n val = 1.0 * (((700-wl)/700.) + 1)\n elif wl < 400:\n val = wl * 1.0/400.\n #print hue, val\n color = pg.hsvColor(hue, 1.0, val)\n pen = pg.mkPen(color)\n return pen\n\n\nclass ParamObj(object):\n # Just a helper for tracking parameters and responding to changes\n def __init__(self):\n self.__params = {}\n \n def __setitem__(self, item, val):\n self.setParam(item, val)\n \n def setParam(self, param, val):\n self.setParams(**{param:val})\n \n def setParams(self, **params):\n \"\"\"Set parameters for this optic. This is a good function to override for subclasses.\"\"\"\n self.__params.update(params)\n self.paramStateChanged()\n\n def paramStateChanged(self):\n pass\n\n def __getitem__(self, item):\n # bug in pyside 1.2.2 causes getitem to be called inside QGraphicsObject.parentItem:\n return self.getParam(item) # PySide bug: https://bugreports.qt.io/browse/PYSIDE-671\n \n def __len__(self):\n # Workaround for PySide bug: https://bugreports.qt.io/browse/PYSIDE-671\n return 0\n\n def getParam(self, param):\n return self.__params[param]\n\n\nclass Optic(pg.GraphicsObject, ParamObj):\n \n sigStateChanged = QtCore.Signal()\n \n \n def __init__(self, gitem, **params):\n ParamObj.__init__(self)\n pg.GraphicsObject.__init__(self) #, [0,0], [1,1])\n\n self.gitem = gitem\n self.surfaces = gitem.surfaces\n gitem.setParentItem(self)\n \n self.roi = pg.ROI([0,0], [1,1])\n self.roi.addRotateHandle([1, 1], [0.5, 0.5])\n self.roi.setParentItem(self)\n \n defaults = {\n 'pos': Point(0,0),\n 'angle': 0,\n }\n defaults.update(params)\n self._ior_cache = {}\n self.roi.sigRegionChanged.connect(self.roiChanged)\n self.setParams(**defaults)\n \n def updateTransform(self):\n self.setPos(0, 0)\n tr = QtGui.QTransform()\n self.setTransform(tr.translate(Point(self['pos'])).rotate(self['angle']))\n \n def setParam(self, param, val):\n ParamObj.setParam(self, param, val)\n\n def paramStateChanged(self):\n \"\"\"Some parameters of the optic have changed.\"\"\"\n # Move graphics item\n self.gitem.setPos(Point(self['pos']))\n self.gitem.resetTransform()\n self.gitem.setRotation(self['angle'])\n \n # Move ROI to match\n try:\n self.roi.sigRegionChanged.disconnect(self.roiChanged)\n br = self.gitem.boundingRect()\n o = self.gitem.mapToParent(br.topLeft())\n self.roi.setAngle(self['angle'])\n self.roi.setPos(o)\n self.roi.setSize([br.width(), br.height()])\n finally:\n self.roi.sigRegionChanged.connect(self.roiChanged)\n \n self.sigStateChanged.emit()\n\n def roiChanged(self, *args):\n pos = self.roi.pos()\n # rotate gitem temporarily so we can decide where it will need to move\n self.gitem.resetTransform()\n self.gitem.setRotation(self.roi.angle())\n br = self.gitem.boundingRect()\n o1 = self.gitem.mapToParent(br.topLeft())\n self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1))\n \n def boundingRect(self):\n return QtCore.QRectF()\n \n def paint(self, p, *args):\n pass\n\n def ior(self, wavelength):\n return GLASSDB.ior(self['glass'], wavelength)\n \n\n\nclass Lens(Optic):\n def __init__(self, **params):\n defaults = {\n 'dia': 25.4, ## diameter of lens\n 'r1': 50., ## positive means convex, use 0 for planar\n 'r2': 0, ## negative means convex\n 'd': 4.0,\n 'glass': 'N-BK7',\n 'reflect': False,\n }\n defaults.update(params)\n d = defaults.pop('d')\n defaults['x1'] = -d/2.\n defaults['x2'] = d/2.\n \n gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults)\n Optic.__init__(self, gitem, **defaults)\n \n def propagateRay(self, ray):\n \"\"\"Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays\"\"\"\n\n \"\"\"\n NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs)\n\n For the incident vector I and surface normal N, and the\n ratio of indices of refraction eta, return the refraction\n vector. The result is computed by\n k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))\n if (k < 0.0)\n return genType(0.0)\n else\n return eta * I - (eta * dot(N, I) + sqrt(k)) * N\n The input parameters for the incident vector I and the\n surface normal N must already be normalized to get the\n desired results. eta == ratio of IORs\n\n\n For reflection:\n For the incident vector I and surface orientation N,\n returns the reflection direction:\n I – 2 ∗ dot(N, I) ∗ N\n N must already be normalized in order to achieve the\n desired result.\n \"\"\"\n iors = [self.ior(ray['wl']), 1.0]\n for i in [0,1]:\n surface = self.surfaces[i]\n ior = iors[i]\n p1, ai = surface.intersectRay(ray)\n if p1 is None:\n ray.setEnd(None)\n break\n p1 = surface.mapToItem(ray, p1)\n \n rd = ray['dir']\n a1 = np.arctan2(rd[1], rd[0])\n ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior))\n ray.setEnd(p1)\n dp = Point(np.cos(ar), np.sin(ar))\n ray = Ray(parent=ray, ior=ior, dir=dp)\n return [ray]\n \n\nclass Mirror(Optic):\n def __init__(self, **params):\n defaults = {\n 'r1': 0,\n 'r2': 0,\n 'd': 0.01,\n }\n defaults.update(params)\n d = defaults.pop('d')\n defaults['x1'] = -d/2.\n defaults['x2'] = d/2.\n gitem = CircularSolid(brush=(100,100,100,255), **defaults)\n Optic.__init__(self, gitem, **defaults)\n \n def propagateRay(self, ray):\n \"\"\"Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays\"\"\"\n \n surface = self.surfaces[0]\n p1, ai = surface.intersectRay(ray)\n if p1 is not None:\n p1 = surface.mapToItem(ray, p1)\n rd = ray['dir']\n a1 = np.arctan2(rd[1], rd[0])\n ar = a1 + np.pi - 2*ai\n ray.setEnd(p1)\n dp = Point(np.cos(ar), np.sin(ar))\n ray = Ray(parent=ray, dir=dp)\n else:\n ray.setEnd(None)\n return [ray]\n\n\nclass CircularSolid(pg.GraphicsObject, ParamObj):\n \"\"\"GraphicsObject with two circular or flat surfaces.\"\"\"\n def __init__(self, pen=None, brush=None, **opts):\n \"\"\"\n Arguments for each surface are:\n x1,x2 - position of center of _physical surface_\n r1,r2 - radius of curvature\n d1,d2 - diameter of optic\n \"\"\"\n defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)\n defaults.update(opts)\n ParamObj.__init__(self)\n self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]\n pg.GraphicsObject.__init__(self)\n for s in self.surfaces:\n s.setParentItem(self)\n \n if pen is None:\n self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)\n else:\n self.pen = pg.mkPen(pen)\n \n if brush is None: \n self.brush = pg.mkBrush((230, 230, 255, 30))\n else:\n self.brush = pg.mkBrush(brush)\n\n self.setParams(**defaults)\n\n def paramStateChanged(self):\n self.updateSurfaces()\n\n def updateSurfaces(self):\n self.surfaces[0].setParams(self['r1'], self['d1'])\n self.surfaces[1].setParams(-self['r2'], self['d2'])\n self.surfaces[0].setPos(self['x1'], 0)\n self.surfaces[1].setPos(self['x2'], 0)\n \n self.path = QtGui.QPainterPath()\n self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))\n self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())\n self.path.closeSubpath()\n \n def boundingRect(self):\n return self.path.boundingRect()\n \n def shape(self):\n return self.path\n \n def paint(self, p, *args):\n p.setRenderHints(p.renderHints() | p.Antialiasing)\n p.setPen(self.pen)\n p.fillPath(self.path, self.brush)\n p.drawPath(self.path)\n \n\nclass CircleSurface(pg.GraphicsObject):\n def __init__(self, radius=None, diameter=None):\n \"\"\"center of physical surface is at 0,0\n radius is the radius of the surface. If radius is None, the surface is flat. \n diameter is of the optic's edge.\"\"\"\n pg.GraphicsObject.__init__(self)\n \n self.r = radius\n self.d = diameter\n self.mkPath()\n \n def setParams(self, r, d):\n self.r = r\n self.d = d\n self.mkPath()\n \n def mkPath(self):\n self.prepareGeometryChange()\n r = self.r\n d = self.d\n h2 = d/2.\n self.path = QtGui.QPainterPath()\n if r == 0: ## flat surface\n self.path.moveTo(0, h2)\n self.path.lineTo(0, -h2)\n else:\n ## half-height of surface can't be larger than radius\n h2 = min(h2, abs(r))\n arc = QtCore.QRectF(0, -r, r*2, r*2)\n a1 = np.arcsin(h2/r) * 180. / np.pi\n a2 = -2*a1\n a1 += 180.\n self.path.arcMoveTo(arc, a1)\n self.path.arcTo(arc, a1, a2)\n self.h2 = h2\n \n def boundingRect(self):\n return self.path.boundingRect()\n \n def paint(self, p, *args):\n return ## usually we let the optic draw.\n \n def intersectRay(self, ray):\n ## return the point of intersection and the angle of incidence\n #print \"intersect ray\"\n h = self.h2\n r = self.r\n p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords.\n #print \" ray: \", p, dir\n p = p - Point(r, 0) ## move position so center of circle is at 0,0\n #print \" adj: \", p, r\n \n if r == 0:\n #print \" flat\"\n if dir[0] == 0:\n y = 0\n else:\n y = p[1] - p[0] * dir[1]/dir[0]\n if abs(y) > h:\n return None, None\n else:\n return (Point(0, y), np.arctan2(dir[1], dir[0]))\n else:\n #print \" curve\"\n ## find intersection of circle and line (quadratic formula)\n dx = dir[0]\n dy = dir[1]\n dr = (dx**2 + dy**2) ** 0.5\n D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1]\n idr2 = 1.0 / dr**2\n disc = r**2 * dr**2 - D**2\n if disc < 0:\n return None, None\n disc2 = disc**0.5\n if dy < 0:\n sgn = -1\n else:\n sgn = 1\n \n \n br = self.path.boundingRect()\n x1 = (D*dy + sgn*dx*disc2) * idr2\n y1 = (-D*dx + abs(dy)*disc2) * idr2\n if br.contains(x1+r, y1):\n pt = Point(x1, y1)\n else:\n x2 = (D*dy - sgn*dx*disc2) * idr2\n y2 = (-D*dx - abs(dy)*disc2) * idr2\n pt = Point(x2, y2)\n if not br.contains(x2+r, y2):\n return None, None\n raise Exception(\"No intersection!\")\n \n norm = np.arctan2(pt[1], pt[0])\n if r < 0:\n norm += np.pi\n #print \" norm:\", norm*180/3.1415\n dp = p - pt\n #print \" dp:\", dp\n ang = np.arctan2(dp[1], dp[0]) \n #print \" ang:\", ang*180/3.1415\n #print \" ai:\", (ang-norm)*180/3.1415\n \n #print \" intersection:\", pt\n return pt + Point(r, 0), ang-norm\n\n \nclass Ray(pg.GraphicsObject, ParamObj):\n \"\"\"Represents a single straight segment of a ray\"\"\"\n \n sigStateChanged = QtCore.Signal()\n \n def __init__(self, **params):\n ParamObj.__init__(self)\n defaults = {\n 'ior': 1.0,\n 'wl': 500,\n 'end': None,\n 'dir': Point(1,0),\n }\n self.params = {}\n pg.GraphicsObject.__init__(self)\n self.children = []\n parent = params.get('parent', None)\n if parent is not None:\n defaults['start'] = parent['end']\n defaults['wl'] = parent['wl']\n self['ior'] = parent['ior']\n self['dir'] = parent['dir']\n parent.addChild(self)\n \n defaults.update(params)\n defaults['dir'] = Point(defaults['dir'])\n self.setParams(**defaults)\n self.mkPath()\n \n def clearChildren(self):\n for c in self.children:\n c.clearChildren()\n c.setParentItem(None)\n self.scene().removeItem(c)\n self.children = []\n \n def paramStateChanged(self):\n pass\n \n def addChild(self, ch):\n self.children.append(ch)\n ch.setParentItem(self)\n \n def currentState(self, relativeTo=None):\n pos = self['start']\n dir = self['dir']\n if relativeTo is None:\n return pos, dir\n else:\n trans = self.itemTransform(relativeTo)[0]\n p1 = trans.map(pos)\n p2 = trans.map(pos + dir)\n return Point(p1), Point(p2-p1)\n \n def setEnd(self, end):\n self['end'] = end\n self.mkPath()\n\n def boundingRect(self):\n return self.path.boundingRect()\n \n def paint(self, p, *args):\n #p.setPen(pg.mkPen((255,0,0, 150)))\n p.setRenderHints(p.renderHints() | p.Antialiasing)\n p.setCompositionMode(p.CompositionMode_Plus)\n p.setPen(wlPen(self['wl']))\n p.drawPath(self.path)\n \n def mkPath(self):\n self.prepareGeometryChange()\n self.path = QtGui.QPainterPath()\n self.path.moveTo(self['start'])\n if self['end'] is not None:\n self.path.lineTo(self['end'])\n else:\n self.path.lineTo(self['start']+500*self['dir'])\n\n\ndef trace(rays, optics):\n if len(optics) < 1 or len(rays) < 1:\n return\n for r in rays:\n r.clearChildren()\n o = optics[0]\n r2 = o.propagateRay(r)\n trace(r2, optics[1:])\n\n\nclass Tracer(QtCore.QObject):\n \"\"\"\n Simple ray tracer. \n \n Initialize with a list of rays and optics; \n calling trace() will cause rays to be extended by propagating them through\n each optic in sequence.\n \"\"\"\n def __init__(self, rays, optics):\n QtCore.QObject.__init__(self)\n self.optics = optics\n self.rays = rays\n for o in self.optics:\n o.sigStateChanged.connect(self.trace)\n self.trace()\n \n def trace(self):\n trace(self.rays, self.optics)\n\n"
] | [
[
"numpy.arctan2",
"numpy.arcsin",
"numpy.cos",
"numpy.clip",
"numpy.sqrt",
"numpy.sin"
]
] |
XinyueZ/some-python-codes | [
"2d7296a4deebb0cd086be34ad7d66f5042cdf6e6"
] | [
"machine_learning/tf_notMNIST_Training_Gradient_Descent.py"
] | [
"#\n# Run NN, multinomial logistic regression using simple gradient descent.\n#\nimport config\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import (Variable, constant, global_variables_initializer,\n truncated_normal, zeros)\n\nfrom tf_training_helper import TrainingHelper\n\n\nclass TF_notMNIST_Training_Gradient_Descent:\n def __init__(self, each_object_size_width=config.TRAIN_OBJECT_WIDTH, each_object_size_height=config.TRAIN_OBJECT_HEIGHT, train_batch=10000, train_steps=800, train_learning_rate=0.5):\n \"\"\"\n Constructor.\n \"\"\"\n self.each_object_size_width = each_object_size_width\n self.each_object_size_height = each_object_size_height\n self.train_batch = train_batch\n self.train_steps = train_steps\n self.train_learning_rate = train_learning_rate\n\n helper = TrainingHelper()\n self.__print_predications__ = helper.print_predications\n self.__print_test_accuracy__ = helper.print_test_accuracy\n self.__activation__ = helper.activation\n self.__loss_optimizer__ = helper.loss_optimizer\n\n def start_with(self, train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, count_classes, beta_for_regularizer=0.01):\n \"\"\"\n Start multinomial logistic regression using simple gradient descent.\n \"\"\"\n #\n # Fixed values while training\n #\n tf_train_dataset = constant(train_dataset[:self.train_batch, :])\n tf_train_labels = constant(train_labels[:self.train_batch])\n tf_valid_dataset = constant(valid_dataset)\n tf_test_dataset = constant(test_dataset)\n\n #\n # Variables should be trained.\n # Classical weight and biases.\n #\n tf_weights = Variable(truncated_normal(\n [self.each_object_size_width * self.each_object_size_height, count_classes]))\n tf_biases = Variable(zeros([count_classes]))\n\n logits = self.__activation__(tf_train_dataset, tf_weights, tf_biases)\n loss, optimizer = self.__loss_optimizer__(\n tf_train_labels, logits, self.train_learning_rate, beta_for_regularizer, [tf_weights])\n\n #\n # Convert dataset to predication\n # The actual problem is transformed into a probabilistic problem.\n #\n predication_for_train = tf.nn.softmax(logits)\n predication_for_valid = tf.nn.softmax(\n self.__activation__(tf_valid_dataset, tf_weights, tf_biases))\n predication_for_test = tf.nn.softmax(\n self.__activation__(tf_test_dataset, tf_weights, tf_biases))\n\n #\n # Training\n #\n print(\"\\n\")\n with tf.Session() as sess:\n init = global_variables_initializer()\n sess.run(init)\n for step in range(self.train_steps):\n _, ls, predications = sess.run(\n [optimizer, loss, predication_for_train])\n self.__print_predications__(\n step, ls, predications, train_labels[:self.train_batch, :], predication_for_valid, valid_labels) \n \n self.__print_test_accuracy__(predication_for_test, test_labels)\n"
] | [
[
"tensorflow.zeros",
"tensorflow.global_variables_initializer",
"tensorflow.truncated_normal",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.softmax"
]
] |
mone27/fastai | [
"af8dfc07ca3f333f8c1bdbea1803af669a53738f"
] | [
"fastai/callback/tensorboard.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/71_callback.tensorboard.ipynb (unless otherwise specified).\n\n__all__ = ['TensorBoardCallback']\n\n# Cell\nfrom ..basics import *\n\n# Cell\nimport tensorboard\nfrom torch.utils.tensorboard import SummaryWriter\nfrom .fp16 import ModelToHalf\n\n# Cell\nclass TensorBoardCallback(Callback):\n \"Saves model topology, losses & metrics\"\n def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9):\n store_attr(self, 'log_dir,trace_model,log_preds,n_preds')\n\n def before_fit(self):\n self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, \"gather_preds\") and rank_distrib()==0\n self.writer = SummaryWriter(log_dir=self.log_dir)\n if self.trace_model:\n if hasattr(self.learn, 'mixed_precision'):\n raise Exception(\"Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.\")\n b = self.dls.one_batch()\n self.learn._split(b)\n self.writer.add_graph(self.model, *self.xb)\n\n def after_batch(self):\n self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)\n for i,h in enumerate(self.opt.hypers):\n for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)\n\n def after_epoch(self):\n for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):\n self.writer.add_scalar(n, v, self.train_iter)\n if self.log_preds:\n b = self.dls.valid.one_batch()\n self.learn.one_batch(0, b)\n preds = getattr(self.loss_func, 'activation', noop)(self.pred)\n out = getattr(self.loss_func, 'decodes', noop)(preds)\n x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)\n tensorboard_log(x, y, its, outs, self.writer, self.train_iter)\n\n def after_fit(self): self.writer.close()\n\n# Cell\nfrom ..vision.data import *\n\n# Cell\n@typedispatch\ndef tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):\n fig,axs = get_grid(len(samples), add_vert=1, return_fig=True)\n for i in range(2):\n axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]\n axs = [r.show(ctx=c, color='green' if b==r else 'red')\n for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]\n writer.add_figure('Sample results', fig, step)\n\n# Cell\nfrom ..vision.core import TensorPoint,TensorBBox\n\n# Cell\n@typedispatch\ndef tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):\n fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True)\n for i in range(2):\n axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]\n for x in [samples,outs]:\n axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]\n writer.add_figure('Sample results', fig, step)"
] | [
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
alarca94/senti-transfer | [
"da83a072c8d471bc74aa25b237b5e301502db869"
] | [
"utils/inout.py"
] | [
"import os\nimport yaml\n\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\nfrom types import SimpleNamespace\nfrom sklearn.model_selection import train_test_split\n\nfrom utils.experiment_utils import create_linspace\nfrom utils.preprocess import *\n\n\nSOURCE_PATH = './source_data'\nDATA_PATH = './data'\nCONFIG_PATH = './conf'\nDATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis']\n\n\nclass Colors:\n BLACK = '\\033[1;30m'\n RED = '\\033[1;31m'\n GREEN = '\\033[1;32m'\n YELLOW = '\\033[1;33m'\n BLUE = '\\033[1;34m'\n PURPLE = '\\033[1;35m'\n CYAN = '\\033[1;36m'\n WHITE = '\\033[1;37m'\n ENDC = '\\033[0m'\n\n\ndef colored(text, color):\n return f'{color}{text}{Colors.ENDC}'\n\n\ndef write_split_files(dataset, trn, dev, tst):\n trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\\t', mode='w')\n dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\\t', mode='w')\n tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\\t', mode='w')\n\n\ndef prepare_files():\n seed = 100\n test_ratio = 0.2\n\n # EmoEvent and HaterNet\n filename = 'original_es.tsv'\n data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\\t'),\n 'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\\\|\\\\|;',\n names=['id', 'text', 'hateful'],\n header=None,\n engine=\"python\")}\n labels = {'emoevent': 'offensive',\n 'haternet': 'hateful'}\n\n for dataset in data:\n data[dataset].text = basic_text_normalization(data[dataset].text)\n y = data[dataset][labels[dataset]]\n trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)\n y = trn[labels[dataset]]\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n # HatEval 2019\n dataset = 'hateval2019'\n n_instances = {}\n\n for phase in ['train', 'dev', 'test']:\n data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',')\n data.text = basic_text_normalization(data.text)\n data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\\t', mode='w')\n n_instances[phase] = data.shape[0]\n\n print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '\n f'{n_instances[\"train\"]}, {n_instances[\"dev\"]}, {n_instances[\"test\"]}')\n\n # MEX-A3T\n dataset = 'mex-a3t'\n columns = ['text', 'aggressiveness']\n trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\\t', names=columns)\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\\t', names=columns)\n\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed)\n for subset in [trn, dev, tst]:\n subset.text = basic_text_normalization(subset.text)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n # TASS 2019\n dataset = 'tass2019'\n n_instances = {}\n for phase in ['train', 'dev', 'test']:\n phase_data = pd.DataFrame()\n for country in ['ES', 'CR', 'MX', 'PE', 'UY']:\n root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot()\n tweets = []\n for item in root.iter('tweet'):\n tweet = {'country': country}\n for tweet_field in item.iter():\n if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']:\n tweet[tweet_field.tag] = tweet_field.text\n tweets.append(tweet)\n phase_data = phase_data.append(tweets)\n new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'}\n phase_data.rename(columns=new_cols, inplace=True)\n phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']]\n phase_data.text = basic_text_normalization(phase_data.text)\n phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\\t', mode='w')\n n_instances[phase] = phase_data.shape[0]\n\n print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '\n f'{n_instances[\"train\"]}, {n_instances[\"dev\"]}, {n_instances[\"test\"]}')\n\n # Universal Joy\n dataset = 'universal_joy'\n trn_data = {}\n for filename in ['small', 'large', 'combi']:\n trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv'))\n trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es']\n trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning)\n\n # Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those\n trn = pd.concat(trn_data.values(), axis=0, ignore_index=True)\n trn.drop_duplicates(inplace=True, subset='text')\n\n # There is no overlapping between training, validation and test (also, they do not contain duplicates)\n dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv'))\n dev.drop_duplicates(inplace=True, subset='text')\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv'))\n tst.drop_duplicates(inplace=True, subset='text')\n # The test set approximately represents 12.5% of the total data\n # print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0]))\n\n # DETOXIS\n dataset = 'detoxis'\n\n trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',')\n tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',')\n\n trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed)\n for subset in [trn, dev, tst]:\n subset.rename(columns={'comment': 'text'}, inplace=True)\n subset.text = basic_text_normalization(subset.text)\n write_split_files(dataset, trn, dev, tst)\n print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '\n f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')\n\n\ndef read_datasets(datasets, tasks, lang='es'):\n data = {}\n for dataset in datasets:\n if dataset not in DATASETS:\n raise Exception(f'Dataset {dataset} is not in the list of available datasets!')\n\n data[dataset] = {\n 'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\\t'),\n 'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\\t'),\n 'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\\t')\n }\n\n for phase in data[dataset]:\n data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]]\n\n return data\n\n\ndef create_namespace_from_dict(dic, name=None):\n for k, v in dic.items():\n if isinstance(v, dict):\n dic[k] = create_namespace_from_dict(v, k)\n ns = SimpleNamespace(**dic)\n ns.__name__ = name\n return ns\n\n\ndef process_config(dic, name=None):\n for k, v in dic.items():\n if k not in ['transfer_learning', 'optimization']:\n if isinstance(v, dict):\n dic[k] = process_config(v, k)\n elif isinstance(v, list):\n for vi in v:\n if isinstance(vi, dict):\n dic[k] += create_linspace(vi)\n dic[k] = dic[k][1:]\n else:\n dic[k] = [v]\n return dic\n\n\ndef load_config(config_file):\n with open(os.path.join(CONFIG_PATH, config_file), 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return process_config(config) # create_namespace_from_dict(config)\n\n\ndef log(string, indent=0):\n start = '\\t' * indent\n print(f'{start}{string}')\n"
] | [
[
"pandas.DataFrame",
"sklearn.model_selection.train_test_split"
]
] |
cafltar/CAF_EC_Column_Rename | [
"7375678081d8931f34e7ab8b4a6e02eca112e721"
] | [
"LTAR_Flux_QC.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 15:53:46 2018\n\n@author: Eric S. Russell\nLaboratory for Atmospheric Research\nDept. of Civil and Environmental Engineering\nWashington State University\[email protected]\n\nNot all of these functions are used in the column rename script; these are potentially to be used with this processing \ndepending on other's thoughts. This is a trial run of dealing with code across sites.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime\n\n\"\"\" \nQA/QC processing for flux data:\n Inputs:\n data: Full input data\n grade: Maximum QA/QC grade as assigned by the flux calculation code\n LE_B: Two number array with the highest (LE_B[1]) and lowest (LE_B[0]) hard limit LE value\n H_B: Same as LE_B but for H\n F_B: Same as LE-B but for Fc\n cls:\n gg:\n Outputs:\n data: Dataframe with the filtered data; does not track reason for removing data.\n \n Conditional for door_is_open_Hst since not all sites will/do have enclosure door sensors installed\n\"\"\" \n# This function not implemented into the script; still thinking about how I want to format this and integrate so user doesn't have to do a lot to make work\n\ndef Grade_cs(df,info, Site, site=False):\n if site == True: \n grade = int(info['grade'][Site])\n LE_B = [float(info['LEL'][Site]),float(info['LEU'][Site])]\n H_B = [float(info['HL'][Site]),float(info['HU'][Site])]\n F_B = [float(info['FCL'][Site]),float(info['FCU'][Site])]\n T_B = [float(info['TL'][Site]),float(info['TU'][Site])]\n elif site == False:\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n T_B = [float(info['Val_L']['T_B']),float(info['Val_U']['T_B'])]\n gg = ['H_SSITC_TEST','LE_SSITC_TEST','FC_SSITC_TEST','TAU_SSITC_TEST']\n cls =['H','LE','FC', 'TAU']\n# var = ['H_Flags','LE_Flags','Fc_Flags'] Needs flagging system for QC\n pd.options.mode.chained_assignment = None \n if (grade >9) | (grade<1):\n print('Grade number must be between 0-9.')\n return # 'exit' function and return error \n Good = None\n data = []; data=pd.DataFrame(data,index=df.index)\n if cls[1] in df.columns:\n HL = (df[cls[1]].astype(float) < LE_B[0]) | (df[cls[1]].astype(float)>LE_B[1]) | df[cls[1]].astype(float).isnull()\n if gg[1] in df.columns:\n Grade = (df[gg[1]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[1]][~Grade] = np.NaN\n data[cls[1]+'_Flag'] = 0\n data[cls[1]+'_Flag'][~Grade] = 1\n if cls[0] in df.columns:\n HL = (df[cls[0]].astype(float) < H_B[0]) | (df[cls[0]].astype(float)> H_B[1]) | df[cls[0]].astype(float).isnull()\n if gg[0] in df.columns:\n Grade = (df[gg[0]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[0]][~Grade] = np.NaN\n data[cls[0]+'_Flag'] = 0\n data[cls[0]+'_Flag'][~Grade] = 1\n if cls[2] in df.columns:\n HL = (df[cls[2]].astype(float) < F_B[0])|(df[cls[2]].astype(float) > F_B[1]) | df[cls[2]].astype(float).isnull()\n if gg[2] in df.columns:\n Grade = (df[gg[2]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n df[cls[2]][~Grade] = np.NaN\n data[cls[2]+'_Flag'] = 0\n data[cls[2]+'_Flag'][~Grade] = 1\n if cls[3] in df.columns:\n HL = (df[cls[3]].astype(float) < T_B[0])|(df[cls[3]].astype(float) > T_B[1]) | df[cls[3]].astype(float).isnull()\n if gg[3] in df.columns:\n Grade = (df[gg[3]].astype(float) <= grade) & (~HL)\n else: Grade = ~HL\n data[cls[3]+'_Flag'] = 0\n data[cls[3]+'_Flag'][~Grade] = 1\n # Rain Mask\n if 'P' in df.columns:\n Precip = (df['P'].astype(float) == 0) | (df['P'].astype(float) == -9999)\n precip = True\n data['P_Flag'] = 0\n data['P_Flag'][~Precip] = 1\n else: precip = False \n if 'CO2_sig_strgth_Min' in df.columns:\n c_sig_strength = df['CO2_sig_strgth_Min'] > 0.7\n data['CO2_Signal_Strength'] = 0\n data['CO2_Signal_Strength'][~c_sig_strength] = 1\n if 'H2O_sig_strgth_Min' in df.columns:\n w_sig_strength = df['H2O_sig_strgth_Min'] > 0.7\n data['H2O_Signal_Strength'] = 0\n data['H2O_Signal_Strength'][~w_sig_strength] = 1\n if 'CO2_samples_Tot' in df.columns:\n Samp_Good_IRGA = df['CO2_samples_Tot'].astype(float)>14400\n data['CO2_Samples_Flag'] = 0\n data['CO2_Samples_Flag'][~Samp_Good_IRGA] = 1\n irga = True\n else: irga=False\n if 'sonic_samples_Tot' in df.columns:\n Samp_Good_Sonic = df['sonic_samples_Tot'].astype(float) > 14400\n data['Sonic_Samples_Flag'] = 0\n data['Sonic_Samples_Flag'][~Samp_Good_Sonic] = 1\n sonic = True\n else: sonic=False\n if 'used_records' in df.columns: \n Samp_Good_Sonic = df['used_records'].astype(float)>14400\n sonic = True\n else: sonic=False\n if 'door_is_open_Hst' in df.columns:\n Door_Closed = df['door_is_open_Hst'].astype(float) == 0\n pc = True\n else: pc = False\n if precip&irga&sonic&pc:\n Good = Door_Closed &Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&irga&sonic&~pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&~sonic&~pc:\n Good = Precip&w_sig_strength&c_sig_strength\n elif precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&Precip&w_sig_strength&c_sig_strength\n elif ~precip&~irga&sonic&~pc:\n Good = Samp_Good_Sonic&w_sig_strength&c_sig_strength\n elif ~precip&irga&sonic&pc:\n Good = Samp_Good_Sonic&Samp_Good_IRGA&w_sig_strength&c_sig_strength\n if Good is not None:\n if cls[3] in df.columns:\n df[cls[3]][~Good] = np.NaN\n if cls[2] in df.columns:\n df[cls[2]][~Good] = np.NaN\n if cls[1] in df.columns:\n df[cls[1]][~Good] = np.NaN\n if cls[0] in df.columns:\n df[cls[0]][~Good] = np.NaN\n return df, data\n\n\n#Fills in the blanks spaces with NaN's so the time index is continuous\ndef indx_fill(df, time): \n df.index = pd.to_datetime(df.index)\n # Sort index in case it came in out of order, a possibility depending on filenames and naming scheme\n df = df.sort_index()\n # Remove any duplicate times, can occur if files from mixed sources and have overlapping endpoints\n df = df[~df.index.duplicated(keep='first')]\n for k in range (0,len(df)):\n if str(df.index[k])=='NaT':\n df = df.drop(df.index[k])\n # Fill in missing times due to tower being down and pad dataframe to midnight of the first and last day\n idx = pd.date_range(df.index[0].floor('D'),df.index[len(df.index)-1].ceil('D'),freq = time)\n df = df.reindex(idx, fill_value=np.NaN)\n return df\n\n# Used to format EddyPro data by combining the date and time into a common index and dropping the filename column\ndef format_ep(df):\n df.index = df['date']+' '+df['time']\n df = df.drop(['filename'],1)\n df.index = pd.to_datetime(df.index)\n return df\n\n# This function not used in main script; potential to be used with QC function\ndef ReadIn_Initial(info):\n # Values pulled in from a separate *.csv file because easier and flexible\n grade = int(info['Val_L']['grade'])\n LE_B = [float(info['Val_L']['LE_B']),float(info['Val_U']['LE_B'])]\n H_B = [float(info['Val_L']['H_B']),float(info['Val_U']['H_B'])]\n F_B = [float(info['Val_L']['F_B']),float(info['Val_U']['F_B'])]\n gg = [(info['Val_L']['gg']),(info['Val_U']['gg']),(info['Val_3']['gg'])]\n cls = [(info['Val_L']['cls']),(info['Val_U']['cls']),(info['Val_3']['cls']), (info['Val_4']['cls'])]\n return grade, LE_B,H_B,F_B,gg,cls\n\n# Reads in a directory of files based on the format for either EddyPro or EasyFlux\ndef Fast_Read(filenames, time, form):\n if len(filenames) == 0:\n print('No Files in directory, check the path name.')\n return # 'exit' function and return error\n else:\n #Initialize dataframe used within function\n Final = [];Final = pd.DataFrame(Final)\n if form == 'EF':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],index_col = 'TIMESTAMP',header= 1,skiprows=[2,3],low_memory=False)\n Final = pd.concat([Final,df], sort = False)\n elif form == 'EP':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 1,skiprows=[2],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n Final =Final.drop(['filename'],1) # not needed string-based column; gets in the way of converting to floating point\n elif form == 'Biomet':\n for k in range (0,len(filenames)):\n df = pd.read_csv(filenames[k],header= 0,skiprows=[1],sep=',',low_memory=False)\n Final = pd.concat([Final,df])\n Final.index = Final['date']+' '+Final['time'] # Eddypro outputs both time and date as separate columns\n else: \n print('Format must be either EF or EP')\n return\n # Convert time index\n Final = Final.sort_index()\n Out = indx_fill(Final, time)\n return Out # Return dataframe to main function. \n\ndef Despike_7(s,ss,x,lab,delta_time, multi):\n an,Tim = [],[]\n while ss < x.index[-1]:\n x_m = np.nanmean(x[ss:s])\n x_s = np.nanstd(x[ss:s])\n x_d = x[ss:s]\n an.append((x_d > (x_m-(multi*x_s))) & (x_d < (x_m+(multi*x_s))))\n ss+= datetime.timedelta(days=delta_time)\n Tim.append((x_d.index))\n s+= datetime.timedelta(days=delta_time)\n qq = np.hstack(an)\n an = pd.DataFrame(qq, columns = [lab])\n an.index = np.hstack(Tim)\n an = an[~an.index.duplicated(keep='first')]\n# x[an[lab]==False] = np.NaN\n return an\n\ndef Met_QAQC(**kwargs):\n Q = None\n if 'Tair' in kwargs.keys():\n Tair = pd.DataFrame(kwargs['Tair'])\n Q = Tair; Q = pd.DataFrame(Q); \n Q['Tair_Hard_Limit'] = (Q[Tair.columns[0]].astype(float) <= 50) & (Q[Tair.columns[0]].astype(float) >= -40)\n Q['Tair_Change'] = ~(np.abs(Q[Tair.columns[0]].diff() >= 25)) & (np.abs(Q[Tair.columns[0]].diff() != 0)) # (~np.isnan(Q[Tair.columns[0]].diff())) & \n Q['Tair_Day_Change'] = (Tair.resample('D').mean().diff !=0)\n Q['Tair_Filtered'] = Q[Tair.columns[0]][Q['Tair_Hard_Limit'] & Q['Tair_Change'] & Q['Tair_Day_Change']]\n else:\n print('**** Temperature not present ****')\n \n if 'RH' in kwargs.keys():\n RH = pd.DataFrame(kwargs['RH']) \n if Q is None:\n Q = RH; Q = pd.DataFrame(Q)\n else: Q= Q.join(RH)\n Q['RH_Hard_Limit'] = (Q[RH.columns[0]].astype(float) <= 100) & (Q[RH.columns[0]].astype(float) >= 0)\n Q['RH_gt_100'] = (Q[RH.columns[0]].astype(float) >= 100) & (Q[RH.columns[0]].astype(float) <= 110)\n Q['RH_Change'] = (np.abs(Q[RH.columns[0]].astype(float).diff() <= 50)) & (np.abs(Q[RH.columns[0]].diff() != 0)) # & (~np.isnan(Q[RH.columns[0]].astype(float).diff()))\n Q['RH_Day_Change'] = (RH.resample('D').mean().diff !=0) \n Q['RH_Filtered'] = Q[RH.columns[0]][Q['RH_Hard_Limit']&Q['RH_Change']& Q['RH_Day_Change']]\n Q['RH_Filtered'] = Q['RH_Filtered'].replace(to_replace=Q['RH_Filtered'][Q['RH_gt_100']], value = 100)\n# Q['RH_Filtered'][Q['RH_gt_100']]=100\n else:\n print('**** RH not present ****')\n\n if 'P' in kwargs.keys():\n P = pd.DataFrame(kwargs['P']); \n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(P) \n Q['P_Hard_Limit'] = (Q[P.columns[0]].astype(float) <= 100) &(Q[P.columns[0]].astype(float) >= 70)\n Q['P_Change'] = (np.abs(Q[P.columns[0]].diff() <= 3.1)) & (np.abs(Q[P.columns[0]].diff() != 0)) # & (~np.isnan(Q[P.columns[0]].diff())) \n Q['P_Filtered'] = Q[P.columns[0]][Q['P_Hard_Limit'] & Q['P_Change']]\n if ('Tair' in kwargs.keys()) & ('z' in kwargs.keys()):\n MSLP = []; \n H = pd.DataFrame((8.314*(Tair[Tair.columns[0]]+273.15))/(0.029*9.81)/1000) # Scale height\n x = pd.DataFrame(-kwargs['z']/H[H.columns[0]]); \n MSLP = P[P.columns[0]]/np.exp(x[x.columns[0]]) # Mean Sea Level Pressure\n MSLP = pd.DataFrame(MSLP);MSLP = MSLP.rename(columns={MSLP.columns[0]:\"MSLP\"})\n Q= Q.join(MSLP)\n Q['MSLP_Hard_Limit'] = (Q[MSLP.columns[0]].astype(float) <= 110) &(Q[MSLP.columns[0]].astype(float) >= 80)\n Q['MSLP_Change'] = (np.abs(Q[MSLP.columns[0]].diff() <= 31)) & (np.abs(Q[MSLP.columns[0]].diff() != 0)) #& (~np.isnan(Q[MSLP.columns[0]].diff())) \n Q['MSLP_Filtered'] = Q[MSLP.columns[0]][Q['MSLP_Hard_Limit'] & Q['MSLP_Change']]\n else:\n print('**** Mean sea level pressure not present ****')\n else:\n print('**** Pressure not present ****')\n \n if 'WS' in kwargs.keys():\n WS = pd.DataFrame(kwargs['WS'])\n if Q is None:\n Q = WS; Q = pd.DataFrame(Q)\n else: Q= Q.join(WS)\n Q['WS_Hard_Limit'] = (Q[WS.columns[0]].astype(float) < 60) & (Q[WS.columns[0]].astype(float) >= 0)\n Q['WS_Change'] = (np.abs(Q[WS.columns[0]].diff() <= 15)) & (np.abs(Q[WS.columns[0]].diff() != 0)) #& (~np.isnan(Q[WS.columns[0]].diff())) \n Q['WS_Day_Change'] = (WS.resample('D').mean().diff !=0) \n Q['WS_Filtered'] = Q[WS.columns[0]][Q['WS_Hard_Limit']&Q['WS_Change']&Q['WS_Day_Change']]\n else:\n print('**** Wind Speed not present ****')\n \n if 'WD' in kwargs.keys():\n WD = pd.DataFrame(kwargs['WD'])\n if Q is None:\n Q = WD; Q = pd.DataFrame(Q)\n else: Q= Q.join(WD)\n Q['WD_Hard_Limit'] = (Q[WD.columns[0]].astype(float) < 360) & (Q[WD.columns[0]].astype(float) >= 0)\n Q['WD_Change'] = (np.abs(Q[WD.columns[0]].diff() != 0)) # (~np.isnan(Q[WD.columns[0]].diff())) &\n Q['WD_Filtered'] = Q[WD.columns[0]][Q['WD_Hard_Limit']&Q['WD_Change']]\n else:\n print('**** Wind Direction not present ****')\n \n if 'PAR' in kwargs.keys():\n PAR = pd.DataFrame(kwargs['PAR']); \n if Q is None:\n Q = PAR; Q = pd.DataFrame(Q)\n else: Q= Q.join(PAR)\n Q['PAR_Hard_Limit'] = (Q[PAR.columns[0]].astype(float) >= 0) & (Q[PAR.columns[0]].astype(float) < 5000)\n Q['PAR_Change'] = (np.abs(Q[PAR.columns[0]].diff() <= 1500))# & (~np.isnan(Q[PAR.columns[0]].diff()))\n Q['PAR_Day_Change'] = (PAR.resample('D').mean().diff != 0) # Causing problems for some reason\n Q['PAR_Filtered'] = Q[PAR.columns[0]][Q['PAR_Hard_Limit']&Q['PAR_Change']&Q['PAR_Day_Change']]\n else:\n print('**** PAR not present ****')\n \n if 'Rn' in kwargs.keys():\n Rn = pd.DataFrame(kwargs['Rn']) \n if Q is None:\n Q = Rn; Q = pd.DataFrame(Q)\n else: Q= Q.join(Rn)\n Q['Rn_Hard_Limit'] = (Q[Rn.columns[0]].astype(float) >= -150) & (Q[Rn.columns[0]].astype(float) <= 1500) \n Q['Rn_Change'] = (np.abs(Q[Rn.columns[0]].astype(float).diff() <= 500)) & (np.abs(Q[Rn.columns[0]].diff() != 0)) #& (~np.isnan(Q[Rn.columns[0]].astype(float).diff())) \n Q['Rn_Day_Change'] = (Rn.resample('D').mean().diff !=0) \n Q['Rn_Filtered'] = Q[Rn.columns[0]][Q['Rn_Hard_Limit']&Q['Rn_Change']&Q['Rn_Day_Change']]\n else:\n print('**** Net Radiations not present ****')\n \n if 'Precip' in kwargs.keys():\n Precip = pd.DataFrame(kwargs['Precip'])\n if Q is None:\n Q = P; Q = pd.DataFrame(Q)\n else: Q= Q.join(Precip)\n Q['Precip_Hard_Limit'] = (Q[Precip.columns[0]].astype(float) < 100) & (Q[Precip.columns[0]].astype(float) >= 0)\n Z_Precip = Q[Precip.columns[0]].astype(float) ==0\n# if ('RH' in kwargs.keys()) & ('Tair' in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH_gt_90']&~Q['Precip_Tair_lt_Zero']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n# elif ('RH' in kwargs.keys()) & ('Tair' not in kwargs.keys()):\n# Q['Precip_RH_gt_90'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['RH_Filtered'].astype(float) >= 90)\n# Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']&Q['Precip_RH']]\n# Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n if 'Tair' in kwargs.keys():\n Q['Precip_Tair_lt_Zero'] = (Q[Precip.columns[0]].astype(float) > 0) & (Q['Tair_Filtered'] < 0)\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']& ~Q['Precip_Tair_lt_Zero']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n Q['Precip_Filtered'] = Q[Precip.columns[0]][Q['Precip_Hard_Limit']]\n Q['Precip_Filtered'] = Q['Precip_Filtered'].replace(to_replace=Q['Precip_Filtered'][Z_Precip], value = 0)\n else:\n print('**** Precipitation not present ****')\n \n if 'VPD' in kwargs.keys():\n VPD = pd.DataFrame(kwargs['VPD'])\n if Q is None:\n Q = VPD; Q = pd.DataFrame(Q)\n else: Q= Q.join(VPD)\n Q['VPD_Hard_Limit'] = (Q[VPD.columns[0]].astype(float) < 50) & (Q[VPD.columns[0]].astype(float) >= 0)\n Q['VPD_Change'] = (np.abs(Q[VPD.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[VPD.columns[0]].diff() != 0)) \n Q['VPD_Day_Change'] = (VPD.resample('D').mean().diff !=0) \n Q['VPD_Filtered'] = Q[VPD.columns[0]][Q['VPD_Hard_Limit']&Q['VPD_Change']&Q['VPD_Day_Change']]\n\n if 'e' in kwargs.keys():\n e = pd.DataFrame(kwargs['e'])\n if Q is None:\n Q = e; Q = pd.DataFrame(Q)\n else: Q= Q.join(e)\n Q['e_Hard_Limit'] = (Q[e.columns[0]].astype(float) < 50) & (Q[e.columns[0]].astype(float) >= 0)\n Q['e_Change'] = (np.abs(Q[e.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e.columns[0]].diff() != 0)) \n Q['e_Day_Change'] = (e.resample('D').mean().diff !=0) \n Q['e_Filtered'] = Q[e.columns[0]][Q['e_Hard_Limit']&Q['e_Change']&Q['e_Day_Change']]\n \n if 'e_s' in kwargs.keys():\n e_s = pd.DataFrame(kwargs['e_s'])\n if Q is None:\n Q = e_s; Q = pd.DataFrame(Q)\n else: Q= Q.join(e_s)\n Q['e_s_Hard_Limit'] = (Q[e_s.columns[0]].astype(float) < 50) & (Q[e_s.columns[0]].astype(float) >= 0)\n Q['e_s_Change'] = (np.abs(Q[e_s.columns[0]].astype(float).diff() <= 10)) & (np.abs(Q[e_s.columns[0]].diff() != 0)) \n Q['e_s_Day_Change'] = (e_s.resample('D').mean().diff !=0) \n Q['e_s_Filtered'] = Q[e_s.columns[0]][Q['e_s_Hard_Limit']&Q['e_s_Change']&Q['e_s_Day_Change']] \n return Q\n "
] | [
[
"numpy.nanmean",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.nanstd",
"numpy.exp",
"numpy.hstack",
"pandas.to_datetime",
"pandas.concat"
]
] |
n1ckfg/RoutedFusion | [
"1733911c7fe025b461b75e48461658709996e39c"
] | [
"voxelgrid/tsdf/run_tsdf_fusion.py"
] | [
"#!/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin/python\nimport os\napp_path = '/scratch_net/nudel/esandstroem/venvs/tsdf_fusion_env/bin'\nos.environ[\"PATH\"] = app_path + os.pathsep + os.environ[\"PATH\"]\n\nfrom TSDFHandle import *\nimport numpy as np\nimport cv2\nfrom utils import extract_mesh_marching_cubes\nfrom visualization import plot_mesh\nimport plyfile\nfrom sys import argv\nimport pathlib\n\nif (len(argv) < 3):\n\tprint('Usage: {0} <name of depth directory> <save mode>'.format(argv[0]))\n\texit(0)\n\n\nCURRENT_DIR = str(pathlib.Path().absolute())\ndepth_path = CURRENT_DIR + '/' + argv[1]\ncampose_path = CURRENT_DIR + '/' + 'left_camera_matrix'\n\n\nbox = np.array([[-4,4],[-4,4],[-4,4]]) # each cell depicts the interval where we will reconstruct the shape i.e.\n# [[-xmin,xmax],[-ymin,ymax],[-zmin,zmax]]\ntsdf = TSDF(bbox=box, resolution=0.025, resolution_factor=1)\n\ndepth_dir = os.listdir(depth_path)\nsortOrder_depth = [int(x[:-4]) for x in depth_dir]\ndepth_dir = [x for _, x in sorted(zip(sortOrder_depth, depth_dir))]\n\ncampose_dir = os.listdir(campose_path)\nsortOrder_pose = [int(x[:-4]) for x in campose_dir]\ncampose_dir = [x for _, x in sorted(zip(sortOrder_pose, campose_dir))]\n\ncamera_intrinsics = np.array([[256, 0, 256], [0, 256, 256], [0, 0, 1]]).astype(np.float32)\n# apparently, the tsdf fusion code expects that the camera coordinate system is such that z is in the\n# camera viewing direction, y is down and x is to the right. This is achieved by a serie of rotations\nrot_180_around_y = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]).astype(np.float32)\nrot_180_around_z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).astype(np.float32)\nrotation = np.matmul(rot_180_around_z, rot_180_around_y)\n\nfor i in range(len(depth_dir)):\n\tdepth = cv2.imread(depth_path + '/' + depth_dir[i], -1)\n\tdepth = depth / 1000\n\tweight_map = np.ones(depth.shape)\n\tcampose = np.linalg.inv(np.loadtxt(campose_path + '/' + campose_dir[i]).astype(np.float32))\n\tcampose = np.matmul(camera_intrinsics, np.matmul(rotation,campose[0:3, 0:4]))\n\ttsdf.fuse(campose, depth.astype(np.float32), weight_map.astype(np.float32))\n\n\nmesh = extract_mesh_marching_cubes(tsdf.get_volume()[:, :, :, 0])\nif argv[2]:\n\tmesh.write('tsdf_fusion_' + argv[1] + '.ply')\nplot_mesh(mesh)\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.matmul",
"numpy.loadtxt"
]
] |
xiangze/edward | [
"6419751d1d849c84c502e5ff3f7249b9bbc7b3aa"
] | [
"tests/util/test_get_descendants.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom edward.models import Bernoulli, Normal\nfrom edward.util import get_descendants\n\n\nclass test_get_descendants_class(tf.test.TestCase):\n\n def test_v_structure(self):\n \"\"\"a -> b -> e <- d <- c\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(0.0, 1.0)\n d = Normal(c, 1.0)\n e = Normal(b * d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, e]))\n self.assertEqual(get_descendants(b), [e])\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_a_structure(self):\n \"\"\"e <- d <- a -> b -> c\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(b, 1.0)\n d = Normal(a, 1.0)\n e = Normal(d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(get_descendants(b), [c])\n self.assertEqual(get_descendants(c), [])\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_chain_structure(self):\n \"\"\"a -> b -> c -> d -> e\"\"\"\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = Normal(a, 1.0)\n c = Normal(b, 1.0)\n d = Normal(c, 1.0)\n e = Normal(d, 1.0)\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(set(get_descendants(b)), set([c, d, e]))\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_tensor(self):\n with self.test_session():\n a = Normal(0.0, 1.0)\n b = tf.constant(2.0)\n c = a + b\n d = Normal(c, 1.0)\n self.assertEqual(get_descendants(a), [d])\n self.assertEqual(get_descendants(b), [d])\n self.assertEqual(get_descendants(c), [d])\n self.assertEqual(get_descendants(d), [])\n\n def test_control_flow(self):\n with self.test_session():\n a = Bernoulli(0.5)\n b = Normal(0.0, 1.0)\n c = tf.constant(0.0)\n d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)\n e = Normal(d, 1.0)\n self.assertEqual(get_descendants(a), [e])\n self.assertEqual(get_descendants(b), [e])\n self.assertEqual(get_descendants(c), [e])\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\n def test_scan(self):\n \"\"\"copied from test_chain_structure\"\"\"\n def cumsum(x):\n return tf.scan(lambda a, x: a + x, x)\n\n with self.test_session():\n a = Normal(tf.ones([3]), tf.ones([3]))\n b = Normal(cumsum(a), tf.ones([3]))\n c = Normal(cumsum(b), tf.ones([3]))\n d = Normal(cumsum(c), tf.ones([3]))\n e = Normal(cumsum(d), tf.ones([3]))\n self.assertEqual(set(get_descendants(a)), set([b, c, d, e]))\n self.assertEqual(set(get_descendants(b)), set([c, d, e]))\n self.assertEqual(set(get_descendants(c)), set([d, e]))\n self.assertEqual(get_descendants(d), [e])\n self.assertEqual(get_descendants(e), [])\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.ones",
"tensorflow.scan",
"tensorflow.cast",
"tensorflow.constant",
"tensorflow.test.main"
]
] |
dongfangyixi/ParlAI | [
"424a2b3c7086593f699c76612dffd1d925986177"
] | [
"parlai/agents/transformer/mixer.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTransformer Agents.\n\"\"\"\nfrom typing import Optional\nfrom parlai.core.params import ParlaiParser\nfrom parlai.core.opt import Opt\nfrom parlai.core.agents import Agent\nfrom parlai.utils.torch import padded_3d\nfrom parlai.core.torch_classifier_agent import TorchClassifierAgent\nfrom parlai.core.torch_ranker_agent import TorchRankerAgent\nfrom parlai.core.torch_generator_agent import TorchGeneratorAgent\nfrom parlai.utils.misc import recursive_getattr\nfrom parlai.utils.logging import logging\n\nfrom .modules import (\n TransformerMemNetModel,\n TransformerGeneratorModel,\n TransformerLinearWrapper,\n MixerModel,\n MixerGeneratorModel,\n)\n\nimport torch\n\n\ndef add_common_cmdline_args(parser):\n \"\"\"\n Add common command line args.\n \"\"\"\n parser.add_argument(\n '-esz',\n '--embedding-size',\n type=int,\n default=300,\n help='Size of all embedding layers. Must be a multiple of --n-heads.',\n )\n parser.add_argument(\n '-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.'\n )\n parser.add_argument(\n '-hid',\n '--ffn-size',\n type=int,\n default=300,\n help='Hidden size of the FFN layers',\n )\n parser.add_argument(\n '--dropout',\n type=float,\n default=0.0,\n help='Dropout used around embeddings and before layer layer normalizations. '\n 'This is used in Vaswani 2017 and works well on large datasets.',\n )\n parser.add_argument(\n '--attention-dropout',\n type=float,\n default=0.0,\n help='Dropout used after attention softmax. This is not used in Vaswani 2017.',\n )\n parser.add_argument(\n '--relu-dropout',\n type=float,\n default=0.0,\n help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, '\n 'but used in Tensor2Tensor.',\n )\n parser.add_argument(\n '--n-heads', type=int, default=2, help='Number of multihead attention heads'\n )\n parser.add_argument(\n '--learn-positional-embeddings',\n type='bool',\n default=False,\n help='If off, sinusoidal embeddings are used. If on, position embeddings are '\n 'learned from scratch.',\n )\n parser.add_argument('--embeddings-scale', type='bool', default=True)\n parser.add_argument(\n '--n-positions',\n type=int,\n default=None,\n hidden=True,\n help='Number of positional embeddings to learn. Defaults '\n 'to truncate or 1024 if not provided.',\n )\n parser.add_argument(\n '--n-segments',\n type=int,\n default=0,\n help='The number of segments that support the model. '\n 'If zero no segment and no langs_embedding.',\n )\n parser.add_argument(\n '--variant',\n choices={'aiayn', 'xlm', 'prelayernorm', 'bart'},\n default='aiayn',\n help='Chooses locations of layer norms, etc. prelayernorm '\n 'is used to match some fairseq models',\n recommended='xlm',\n )\n parser.add_argument(\n '--activation',\n choices={'relu', 'gelu'},\n default='relu',\n help='Nonlinear activation to use. AIAYN uses relu, but '\n 'more recent papers prefer gelu.',\n recommended='gelu',\n )\n parser.add_argument(\n '--output-scaling',\n type=float,\n default=1.0,\n help='scale the output of every transformer by this quantity.',\n )\n parser.add_argument(\n '--share-word-embeddings',\n type='bool',\n default=True,\n help='Share word embeddings table for candidate and context'\n 'in the memory network',\n )\n parser.add_argument(\n '-nel',\n '--n-encoder-layers',\n type=int,\n default=-1,\n help='This will overide the n-layers for asymmetrical transformers',\n )\n parser.add_argument(\n '-ndl',\n '--n-decoder-layers',\n type=int,\n default=-1,\n help='This will overide the n-layers for asymmetrical transformers',\n )\n parser.add_argument(\n '--model-parallel',\n type='bool',\n default=False,\n help='Shard the layers across multiple GPUs.',\n )\n\n\nclass Transformer(Agent):\n \"\"\"\n Placeholder Transformer Agent.\n\n Placeholder class, which just throws an error telling the user to specify whether\n they want the ranker or the generator.\n \"\"\"\n\n def __init__(self, opt, shared=None):\n raise RuntimeError(\n \"`--model transformer` is not a valid choice. Please select either \"\n \"`--model transformer/ranker` or `--model transformer/generator\"\n )\n\n\nclass TransformerRankerAgent(TorchRankerAgent):\n \"\"\"\n Transformer Ranker Agent.\n\n Implementation of a TorchRankerAgent, where the model is a Transformer\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n \"\"\"\n Add command-line arguments specifically for this agent.\n \"\"\"\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n agent = parser.add_argument_group('Transformer Arguments')\n add_common_cmdline_args(agent)\n # memory and knowledge arguments\n agent.add_argument(\n '--use-memories',\n type='bool',\n default=False,\n help='use memories: must implement the function '\n '`_vectorize_memories` to use this',\n )\n agent.add_argument(\n '--wrap-memory-encoder',\n type='bool',\n default=False,\n help='wrap memory encoder with MLP',\n )\n agent.add_argument(\n '--memory-attention',\n type=str,\n default='sqrt',\n choices=['cosine', 'dot', 'sqrt'],\n help='similarity for basic attention mechanism '\n 'when using transformer to encode memories',\n )\n # model specific arguments\n agent.add_argument('--normalize-sent-emb', type='bool', default=False)\n agent.add_argument('--share-encoders', type='bool', default=True)\n parser.add_argument(\n '--share-word-embeddings',\n type='bool',\n default=True,\n help='Share word embeddings table for candidate and context'\n 'in the memory network',\n )\n agent.add_argument(\n '--learn-embeddings', type='bool', default=True, help='learn embeddings'\n )\n agent.add_argument(\n '--data-parallel',\n type='bool',\n default=False,\n help='use model in data parallel, requires ' 'multiple gpus',\n )\n agent.add_argument(\n '--reduction-type',\n type=str,\n default='mean',\n choices=['first', 'max', 'mean'],\n help='Type of reduction at the end of transformer',\n )\n\n parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024)\n cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)\n\n return agent\n\n def _score(self, output, cands):\n if cands.dim() == 2:\n return torch.matmul(output, cands.t())\n elif cands.dim() == 3:\n return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1)\n else:\n raise RuntimeError(\n 'Unexpected candidate dimensions {}' ''.format(cands.dim())\n )\n\n def build_model(self, states=None):\n \"\"\"\n Build and return model.\n \"\"\"\n model = MixerModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type'])\n return model\n\n def batchify(self, obs_batch, sort=False):\n \"\"\"\n Override so that we can add memories to the Batch object.\n \"\"\"\n batch = super().batchify(obs_batch, sort)\n if self.opt['use_memories']:\n valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)]\n valid_inds, exs = zip(*valid_obs)\n mems = None\n if any('memory_vecs' in ex for ex in exs):\n mems = [ex.get('memory_vecs', None) for ex in exs]\n batch.memory_vecs = mems\n return batch\n\n def _vectorize_memories(self, obs):\n # TODO: move this to Torch Ranker Agent\n raise NotImplementedError(\n 'Abstract class: user must implement this function to use memories'\n )\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Override to include vectorization of memories.\n \"\"\"\n kwargs['add_start'] = False\n kwargs['add_end'] = False\n obs = super().vectorize(*args, **kwargs)\n if self.opt['use_memories']:\n obs = self._vectorize_memories(obs)\n return obs\n\n def encode_candidates(self, padded_cands):\n \"\"\"\n Encode candidates.\n \"\"\"\n _, cands = self.model(xs=None, mems=None, cands=padded_cands)\n\n return cands\n\n def score_candidates(self, batch, cand_vecs, cand_encs=None):\n \"\"\"\n Score candidates.\n \"\"\"\n # convoluted check that not all memories are empty\n if (\n self.opt['use_memories']\n and batch.memory_vecs is not None\n and sum(len(m) for m in batch.memory_vecs)\n ):\n mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX)\n else:\n mems = None\n\n if cand_encs is not None:\n # we pre-encoded the candidates, do not re-encode here\n cand_vecs = None\n\n context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs)\n\n if cand_encs is not None:\n cands_h = cand_encs\n scores = self._score(context_h, cands_h)\n\n return scores\n\n\nclass TransformerGeneratorAgent(TorchGeneratorAgent):\n \"\"\"\n TransformerGeneratorAgent.\n\n Implementation of TorchGeneratorAgent, where the model is a Transformer\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n \"\"\"\n Add command-line arguments specifically for this agent.\n \"\"\"\n agent = parser.add_argument_group('Transformer Arguments')\n add_common_cmdline_args(agent)\n cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)\n\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n return agent\n\n def build_model(self, states=None):\n \"\"\"\n Build and return model.\n \"\"\"\n model = MixerGeneratorModel(self.opt, self.dict)\n if self.opt['embedding_type'] != 'random':\n self._copy_embeddings(\n model.encoder.embeddings.weight, self.opt['embedding_type']\n )\n return model\n\n def _resize_token_embeddings(self, state_dict, msg=None):\n \"\"\"\n Resize the token embeddings when are adding extra special tokens.\n \"\"\"\n # map extra special tokens carefully\n new_size = self.model.embeddings.weight.size()[0]\n orig_size = state_dict['embeddings.weight'].size()[0]\n logging.info(f'Resizing token embeddings from {orig_size} to {new_size}')\n if new_size <= orig_size:\n # new size should be greater than original size,\n # as we are adding special tokens\n raise RuntimeError(msg)\n\n for emb_weights in [\n 'embeddings.weight',\n 'encoder.embeddings.weight',\n 'decoder.embeddings.weight',\n ]:\n # get new_embs\n old_embs = state_dict[emb_weights]\n new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device)\n # copy over old weights\n new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :]\n # reset in state dict\n state_dict[emb_weights] = new_embs\n\n return state_dict\n\n\nclass TransformerClassifierAgent(TorchClassifierAgent):\n \"\"\"\n Classifier based on Transformer.\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n TransformerRankerAgent.add_cmdline_args(\n parser, partial_opt=partial_opt\n ) # add transformer args\n super().add_cmdline_args(parser, partial_opt=partial_opt)\n parser.add_argument(\n '--load-from-pretrained-ranker',\n type='bool',\n default=False,\n help='load model from base transformer ranking model '\n '(used for pretraining)',\n )\n parser.set_defaults(reduction_type='first')\n return parser\n\n def build_model(self):\n num_classes = len(self.class_list)\n self.base_model = MixerModel(self.opt, self.dict)\n return TransformerLinearWrapper(self.base_model.context_encoder, num_classes)\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the text.\n \"\"\"\n kwargs['add_start'] = True\n kwargs['add_end'] = True\n obs = super().vectorize(*args, **kwargs)\n return obs\n\n def _set_text_vec(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the text.\n \"\"\"\n obs = super()._set_text_vec(*args, **kwargs)\n\n if 'text_vec' in obs and 'added_start_end' not in obs:\n obs.force_set(\n 'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)\n )\n obs['added_start_end'] = True\n\n # check truncation after adding start end tokens\n if obs.get('text_vec') is not None:\n truncated_vec = self._check_truncate(\n obs['text_vec'], self.text_truncate, True\n )\n obs.force_set('text_vec', torch.LongTensor(truncated_vec))\n\n return obs\n\n def score(self, batch):\n return self.model(batch.text_vec)\n\n def load_state_dict(self, state_dict):\n \"\"\"\n Load the state dict into model.\n\n This is easily overridable to facilitate transfer of state dicts.\n \"\"\"\n if self.is_finetune and self.opt['load_from_pretrained_ranker']:\n self.base_model.load_state_dict(state_dict, strict=False)\n else:\n self.model.load_state_dict(state_dict)\n\n\n"
] | [
[
"torch.LongTensor"
]
] |
ReyesDeJong/Deep-SVDD-PyTorch | [
"1fc7eae1474556f869d5c5422da74fd4fe2f1aed"
] | [
"src/datasets/hits_dataset.py"
] | [
"import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Subset\nfrom torch.utils.data.dataset import Dataset # For custom datasets\nfrom torchvision import transforms\n\nPROJECT_PATH = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..'))\nsys.path.append(PROJECT_PATH)\n\nfrom src.base.torchvision_dataset import TorchvisionDataset\nfrom src.datasets.preprocessing import get_target_label_idx\nfrom src.datasets.data_splitter import DatasetDivider\nfrom src.datasets.data_set_generic import Dataset\n\n\nclass HitsDataset(TorchvisionDataset):\n def __init__(self, root: str, normal_class=1):\n super().__init__(root)\n\n self.n_classes = 2 # 0: normal, 1: outlier\n self.normal_classes = tuple([normal_class])\n self.outlier_classes = list(range(0, 2))\n self.outlier_classes.remove(normal_class)\n\n self.data_dict = pd.read_pickle(self.root)\n # hardcoded selected channel\n images = self.normalize_by_image(self.data_dict['images'])[..., 3][\n ..., np.newaxis]\n labels = np.array(self.data_dict['labels'])\n\n dataset = Dataset(data_array=images, data_label=labels, batch_size=50)\n data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1)\n data_splitter.set_dataset_obj(dataset)\n train_dataset, test_dataset, val_dataset = \\\n data_splitter.get_train_test_val_set_objs()\n\n transform = transforms.Compose([transforms.ToTensor()])\n target_transform = transforms.Lambda(\n lambda x: int(x in self.outlier_classes))\n\n train_set = Hits(train_dataset.data_array, train_dataset.data_label,\n transform=transform, target_transform=target_transform)\n train_idx_normal = get_target_label_idx(\n np.array(train_set.label_arr), self.normal_classes)\n self.train_set = Subset(train_set, train_idx_normal)\n print(self.train_set.__len__())\n\n self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label,\n transform=transform,\n target_transform=target_transform)\n val_idx_normal = get_target_label_idx(\n np.array(self.val_all_set.label_arr), self.normal_classes)\n self.val_normal_set = Subset(self.val_all_set, val_idx_normal)\n print(self.val_normal_set.__len__())\n\n self.test_set = Hits(test_dataset.data_array, test_dataset.data_label,\n transform=transform,\n target_transform=target_transform)\n\n def normalize_by_image(self, images):\n images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :]\n images = images / np.nanmax(images, axis=(1, 2))[\n :, np.newaxis, np.newaxis, :]\n return images\n\n\nclass Hits(Dataset):\n def __init__(self, images, labels, transform, target_transform):\n \"\"\"\n \"\"\"\n # Transforms\n self.transform = transform\n self.target_transform = target_transform\n\n self.image_arr = images\n self.label_arr = labels\n print(self.image_arr.shape)\n self.data_len = self.label_arr.shape[0]\n\n def __getitem__(self, index):\n single_image = self.image_arr[index]\n single_image_label = self.label_arr[index]\n\n if self.transform is not None:\n img = self.transform(single_image)\n\n if self.target_transform is not None:\n target = self.target_transform(single_image_label)\n\n return img, target, index # only line changed\n\n def __len__(self):\n return self.data_len\n"
] | [
[
"pandas.read_pickle",
"numpy.nanmax",
"torch.utils.data.Subset",
"numpy.nanmin",
"numpy.array"
]
] |
Amitdedhia6/DrugDiscovery | [
"c70dec96cee4d0d643a8b9de30530b6871fdf05e"
] | [
"generate_embeddings.py"
] | [
"import torch\nimport torch.nn as nn\nimport os\nfrom common import base_data_path\nfrom typing import List\nimport pandas as pd\n\n\nCONTEXT_SIZE = 1 # 1 words to the left, 1 to the right\nEMDEDDING_DIM = 3\nword_to_ix = {}\nix_to_word = {}\n\n\ndef make_context_vector(context, word_to_ix):\n idxs = [word_to_ix[w] for w in context]\n return torch.tensor(idxs, dtype=torch.long)\n\n\ndef get_index_of_max(input):\n index = 0\n for i in range(1, len(input)):\n if input[i] > input[index]:\n index = i\n return index\n\n\ndef get_max_prob_result(input, ix_to_word):\n return ix_to_word[get_index_of_max(input)]\n\n\ndef split_smiles_repr(smile_repr: str) -> List[str]:\n element_list = []\n skip_next = False\n for i in range(len(smile_repr)):\n if skip_next:\n skip_next = False\n continue\n\n element = smile_repr[i]\n if (i < (len(smile_repr) - 1)) and (smile_repr[i].isalpha()):\n possible_element = element + smile_repr[i+1]\n if possible_element in word_to_ix:\n element = possible_element\n skip_next = True\n\n if element in word_to_ix:\n element_list.append(element)\n else:\n raise ValueError('Inappropriate argument to function get_elements_from_smiles_data of Vocab class')\n return element_list\n\n\ndef get_data(sequence_list: List[str]):\n _sequence_list = []\n sequence_elements_list = []\n\n for s in sequence_list:\n split_elements = split_smiles_repr(s)\n _sequence_list.append(s)\n sequence_elements_list.append(split_elements)\n\n return sequence_elements_list\n\n\nfilepath = os.path.join(base_data_path, \"vocab.txt\")\nf = open(filepath, \"r\")\nelements_list = f.read().splitlines()\nelements_list.append(' ')\nf.close()\n\nvocab = elements_list\nvocab_size = len(elements_list)\n\nfor i, word in enumerate(vocab):\n word_to_ix[word] = i\n ix_to_word[i] = word\n\nfilepath = os.path.join(base_data_path, \"dataset_v1.csv\")\ndf = pd.read_csv(filepath, sep=\",\", header=0)\nsmiles_data = get_data(df.SMILES.tolist())\n\n\nclass CBOW(torch.nn.Module):\n def __init__(self, vocab_size, embedding_dim):\n super(CBOW, self).__init__()\n\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.linear1 = nn.Linear(embedding_dim, 128)\n self.activation_function1 = nn.ReLU()\n self.linear2 = nn.Linear(128, vocab_size)\n self.activation_function2 = nn.LogSoftmax(dim=-1)\n\n def forward(self, inputs):\n embeds = sum(self.embeddings(inputs)).view(1, -1)\n out = self.linear1(embeds)\n out = self.activation_function1(out)\n out = self.linear2(out)\n out = self.activation_function2(out)\n return out\n\n def get_word_emdedding(self, word):\n word = torch.LongTensor([word_to_ix[word]])\n return self.embeddings(word).view(1, -1)\n\n\nmodel = CBOW(vocab_size, EMDEDDING_DIM)\nloss_function = nn.NLLLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n\nfor epoch in range(50):\n total_loss = 0\n for smiles_element_list in smiles_data:\n for i in range(1, len(smiles_element_list) - 1):\n context = [smiles_element_list[i - 1], smiles_element_list[i + 1]]\n target = smiles_element_list[i]\n context_vector = make_context_vector(context, word_to_ix)\n model.zero_grad()\n log_probs = model(context_vector)\n loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n print(f\"Epoch - {epoch}, Loss - {total_loss}\")\n"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.Linear",
"pandas.read_csv",
"torch.tensor",
"torch.nn.Embedding",
"torch.nn.LogSoftmax",
"torch.LongTensor",
"torch.nn.ReLU"
]
] |
ehsanul/brick | [
"291c0783f3b062cf73887cb3581dd92342891165"
] | [
"heuristic/train/nn/train-nn.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport sys\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nEPOCHS = 1000\n\n# The patience parameter is the amount of epochs to check for improvement\nEARLY_STOP = keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)\n\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [cost]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label = 'Val Error')\n plt.ylim([0,5])\n plt.legend()\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$cost^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n plt.ylim([0,20])\n plt.legend()\n plt.show()\n\n# we hard-code the values instead of using stats so that integration with\n# predictor using the model is easier\nscaling = pd.DataFrame(data={\n 'min': [-10000, -10000, -10000, -2300, -2300, -2300, -6.0, -6.0, -6.0, -3.2, -3.2, -3.2],\n 'max': [ 10000, 10000, 10000, 2300, 2300, 2300, 6.0, 6.0, 6.0, 3.2, 3.2, 3.2],\n}, index=[ 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw'])\n\n# scale to range [0, 1]\n# TODO try polar coordinates. for velocity: https://math.stackexchange.com/questions/2444965/relationship-between-cartesian-velocity-and-polar-velocity\ndef scale(x):\n return (x - scaling['min']) / (scaling['max'] - scaling['min'])\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(128, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(128, activation=tf.nn.relu),\n\n # these extra layers seem to hurt more than they help!\n #layers.Dropout(0.01),\n #layers.Dense(64, activation=tf.nn.relu),\n\n # this doesn't work as well as a single 64-wide layer\n #layers.Dense(12, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n #layers.Dense(12, activation=tf.nn.relu),\n\n layers.Dense(1)\n ])\n #optimizer = tf.keras.optimizers.RMSprop(0.001)\n optimizer = tf.train.AdamOptimizer(0.001)\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n\n# should be the time.csv from generate-data's time binary\ndataset_path = sys.argv[1]\n\ncolumn_names = ['cost', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'avx', 'avy', 'avz', 'roll', 'pitch', 'yaw']\nraw_dataset = pd.read_csv(dataset_path, names=column_names,\n na_values = \"\", #comment='\\t',\n sep=\",\", skipinitialspace=True)\n\n\n# visualize the data!\npos_plot = sns.pairplot(raw_dataset[[\"cost\", \"x\", \"y\", \"z\"]], diag_kind=\"kde\")\npos_plot.savefig(\"./pos.fig.png\")\nvel_plot = sns.pairplot(raw_dataset[[\"cost\", \"vx\", \"vy\", \"vz\"]], diag_kind=\"kde\")\nvel_plot.savefig(\"./vel.fig.png\")\navel_plot = sns.pairplot(raw_dataset[[\"cost\", \"avx\", \"avy\", \"avz\"]], diag_kind=\"kde\")\navel_plot.savefig(\"./avel.fig.png\")\nrot_plot = sns.pairplot(raw_dataset[[\"cost\", \"roll\", \"pitch\", \"yaw\"]], diag_kind=\"kde\")\nrot_plot.savefig(\"./rot.fig.png\")\npos_rot_plot = sns.pairplot(raw_dataset[[\"cost\", \"x\", \"y\", \"yaw\"]], diag_kind=\"kde\")\npos_rot_plot.savefig(\"./pos_rot.fig.png\")\n\ndataset = raw_dataset.copy()\ndataset.tail()\n\n# we don't have missing data\n# dataset.isna().sum()\n# dataset = dataset.dropna()\n\n# split into training vs test datasets\ntrain_dataset = dataset.sample(frac=0.95,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\n# using stats from full dataset\nstats = raw_dataset.describe()\nstats.pop(\"cost\")\nstats = stats.transpose()\nstats\n\ntrain_labels = train_dataset.pop('cost')\ntest_labels = test_dataset.pop('cost')\n\nscaled_train_dataset = scale(train_dataset)\nscaled_test_dataset = scale(test_dataset)\n\n# build and train moddel\nmodel = build_model()\nmodel.summary()\nhistory = model.fit(scaled_train_dataset, train_labels, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[EARLY_STOP, PrintDot()])\nplot_history(history)\n\n# check against test set\nloss, mae, mse = model.evaluate(scaled_test_dataset, test_labels, verbose=0)\nprint(\"Testing set Mean Abs Error: {:5.2f} cost\".format(mae))\n\n# plot all test predictions\ntest_predictions = model.predict(scaled_test_dataset).flatten()\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [cost]')\nplt.ylabel('Predictions [cost]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nplt.plot([-100, 100], [-100, 100])\nplt.show()\n\n# error distribution\nerror = test_predictions - test_labels\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [cost]\")\nplt.ylabel(\"Count\")\nplt.show()\n\nmodel.save('./simple_throttle_cost_model.h5')\nsaved_model_path = tf.contrib.saved_model.save_keras_model(model, \"./simple_throttle_cost_saved_model\")\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"pandas.DataFrame",
"tensorflow.contrib.saved_model.save_keras_model",
"tensorflow.train.AdamOptimizer",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hist",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
juanpablordz/moog.github.io | [
"d7995d3563492378d0877ce8d16f5ca9a8031794"
] | [
"moog/action_spaces/joystick.py"
] | [
"\"\"\"Joystick action space for controlling agent avatars.\"\"\"\n\nfrom . import abstract_action_space\nfrom dm_env import specs\nimport numpy as np\n\n\nclass Joystick(abstract_action_space.AbstractActionSpace):\n \"\"\"Joystick action space.\"\"\"\n\n def __init__(self, scaling_factor=1., action_layers='agent',\n constrained_lr=False, control_velocity=False, momentum=0.):\n \"\"\"Constructor.\n \n Args:\n scaling_factor: Scalar. Scaling factor multiplied to the action.\n agent_layer: String or iterable of strings. Elements (or itself if\n string) must be keys in the environment state. All sprites in\n these layers will be acted upon by this action space.\n control_velocity: Bool. Whether to control velocity (True) or force\n (False).\n constrained_lr: Bool. If True, joystick is contrained to actions\n parallel to the x-axis, by zeroing out the y-axis (component 1)\n of the action.\n momentum: Float in [0, 1]. Discount factor for previous action. This\n should be zero if control_velocity is False, because imparting\n forces automatically gives momentum to the agent(s) being\n controlled. If control_velocity is True, setting this greater\n than zero gives the controlled agent(s) momentum. However, the\n velocity is clipped at scaling_factor, so the agent only retains\n momentum when stopping or changing direction and does not\n accelerate.\n \"\"\"\n self._scaling_factor = scaling_factor\n if not isinstance(action_layers, (list, tuple)):\n action_layers = (action_layers,)\n self._action_layers = action_layers\n self._constrained_lr = constrained_lr\n self._control_velocity = control_velocity\n self._momentum = momentum\n\n self._action_spec = specs.BoundedArray(\n shape=(2,), dtype=np.float32, minimum=-1, maximum=1)\n\n def step(self, state, action):\n \"\"\"Apply action to environment state.\n\n Args:\n state: OrderedDict. Environment state.\n action: Numpy float array of size (2) in [-1, 1]. Force to apply.\n \"\"\"\n if self._constrained_lr:\n action[1] = 0.\n\n self._action *= self._momentum\n self._action += self._scaling_factor * action\n self._action = np.clip(\n self._action, -self._scaling_factor, self._scaling_factor)\n \n for action_layer in self._action_layers:\n for sprite in state[action_layer]:\n if self._control_velocity:\n sprite.velocity = self._action / sprite.mass\n else:\n sprite.velocity += self._action / sprite.mass\n\n def reset(self, state):\n \"\"\"Reset action space at start of new episode.\"\"\"\n del state\n self._action = np.zeros(2)\n \n def random_action(self):\n \"\"\"Return randomly sampled action.\"\"\"\n return np.random.uniform(-1., 1., size=(2,))\n \n def action_spec(self):\n return self._action_spec\n"
] | [
[
"numpy.random.uniform",
"numpy.clip",
"numpy.zeros"
]
] |
yyliu01/TraCoCo | [
"eecbc92c961d393deaa31726739a94b7f495d893"
] | [
"Code/VnetLA/validate.py"
] | [
"import os\nimport math\nimport torch\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom medpy import metric\nimport torch.nn.functional as F\nfrom Configs.config import config\nfrom Model.Vnet import VNet as Vnet\nfrom cc3d import connected_components\nfrom Dataloader.dataset import LAHeartDataset\n\n\"\"\"\n# https://github.com/kleinzcy/SASSnet/blob/master/code/test_util.py\ndef getLargestCC(segmentation):\n # from skimage.measure import label as sm_label\n labels = sm_label(segmentation)\n assert (labels.max() != 0) # assume at least 1 CC\n largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1\n return largestCC\n\"\"\"\n\n\ndef cct(pseudo_label):\n labels_out, N = connected_components(pseudo_label, connectivity=26, return_N=True)\n for segid in range(1, N + 1):\n extracted_image = labels_out * (labels_out == segid)\n if extracted_image.sum() < 8000:\n pseudo_label[labels_out == segid] = 0\n return pseudo_label\n\n\ndef test_all_case(net, val_set, num_classes, patch_size=(112, 112, 80), stride_xy=18, stride_z=4,\n post_process=False, visual=False):\n\n total_metric = 0.0\n assert val_set.aug is False, \">> no augmentation for test set\"\n dataloader = iter(val_set)\n tbar = range(len(val_set))\n tbar = tqdm(tbar, ncols=135)\n for (idx, _) in enumerate(tbar):\n image, label = next(dataloader)\n prediction, score_map = test_single_case(net, image, stride_xy, stride_z, patch_size,\n num_classes=num_classes,\n post_process=post_process)\n\n if np.sum(prediction) == 0:\n single_metric = (0, 0, 0, 0)\n else:\n single_metric = calculate_metric_percase(np.array(prediction),\n np.array(label[:]))\n\n total_metric += np.asarray(single_metric)\n\n if visual:\n # import nibabel as nib\n # struggle for where to save; modify it if you need.\n raise NotImplementedError\n\n avg_metric = total_metric / len(val_set)\n print(\"|dice={:.4f}|mIoU={:.4f}|95HD={:.4f}|ASD={:.4f}|\".format(avg_metric[0], avg_metric[1],\n avg_metric[3], avg_metric[2]))\n\n return avg_metric\n\n\ndef test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1,\n post_process=False):\n\n image = image.squeeze()\n w, h, d = image.shape\n\n # if the size of image is less than patch_size, then padding it\n add_pad = False\n if w < patch_size[0]:\n w_pad = patch_size[0] - w\n add_pad = True\n else:\n w_pad = 0\n if h < patch_size[1]:\n h_pad = patch_size[1] - h\n add_pad = True\n else:\n h_pad = 0\n if d < patch_size[2]:\n d_pad = patch_size[2] - d\n add_pad = True\n else:\n d_pad = 0\n wl_pad, wr_pad = w_pad // 2, w_pad - w_pad // 2\n hl_pad, hr_pad = h_pad // 2, h_pad - h_pad // 2\n dl_pad, dr_pad = d_pad // 2, d_pad - d_pad // 2\n if add_pad:\n image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad), (dl_pad, dr_pad)], mode='constant',\n constant_values=0)\n ww, hh, dd = image.shape\n\n sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1\n sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1\n sz = math.ceil((dd - patch_size[2]) / stride_z) + 1\n score_map = np.zeros((num_classes,) + image.shape).astype(np.float32)\n cnt = np.zeros(image.shape).astype(np.float32)\n\n for x in range(0, sx):\n xs = min(stride_xy * x, ww - patch_size[0])\n for y in range(0, sy):\n ys = min(stride_xy * y, hh - patch_size[1])\n for z in range(0, sz):\n zs = min(stride_z * z, dd - patch_size[2])\n test_patch = image[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]]\n test_patch = np.expand_dims(np.expand_dims(test_patch, axis=0), axis=0).astype(np.float32)\n test_patch = torch.from_numpy(test_patch).cuda(non_blocking=True)\n y1, _ = net(test_patch)\n y = F.softmax(y1, dim=1)\n y = y.cpu().data.numpy()\n y = y[0, :, :, :, :]\n score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \\\n = score_map[:, xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + y\n cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] \\\n = cnt[xs:xs + patch_size[0], ys:ys + patch_size[1], zs:zs + patch_size[2]] + 1\n score_map = score_map / np.expand_dims(cnt, axis=0)\n label_map = np.argmax(score_map, axis=0)\n\n if post_process:\n label_map = cct(label_map)\n # label_map = getLargestCC(label_map) feel free to change the post-process approach\n\n if add_pad:\n label_map = label_map[wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]\n score_map = score_map[:, wl_pad:wl_pad + w, hl_pad:hl_pad + h, dl_pad:dl_pad + d]\n\n return label_map, score_map\n\n\ndef calculate_metric_percase(pred, gt):\n dice = metric.binary.dc(pred, gt)\n jc = metric.binary.jc(pred, gt)\n hd = metric.binary.hd95(pred, gt)\n asd = metric.binary.asd(pred, gt)\n\n return dice, jc, hd, asd\n\n\ndef test_calculate_metric(ckpt_path, vis=False, post=False):\n net = Vnet(n_channels=1, n_classes=2,\n normalization='batchnorm', has_dropout=True).cuda()\n net.load_state_dict(torch.load(ckpt_path))\n net.eval()\n val_dataset = LAHeartDataset(os.path.join(config.code_path, \"Dataloader\"),\n config.data_path,\n split=\"eval\", config=config)\n\n # follows the previous works' setting\n avg_metric = test_all_case(net, val_dataset, num_classes=2,\n patch_size=(112, 112, 80), stride_xy=18, stride_z=4,\n post_process=post, visual=vis)\n\n return avg_metric\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Medical Semi-supervised Semantic Segmentation (valid)')\n parser.add_argument(\"--env_name\", default=\"traCoCo(8-label,spatial_weight(kl)=0.3,hyp=0.1,iters=9000)\",\n type=str, help=\"your environment folder name for training\")\n\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"your environment folder name for training\")\n\n parser.add_argument(\"--post\", action=\"store_true\",\n help=\"implement post process or not\")\n\n cmd_line = parser.parse_args()\n default_path = os.path.join(config.code_path, \"saved\", cmd_line.env_name)\n ckpt = os.listdir(default_path)\n ckpt = [i for i in ckpt if \".pth\" in str(i)][0]\n print(\"validate {} for LA dataset ...\".format(str(ckpt)))\n metric = test_calculate_metric(os.path.join(default_path, ckpt), vis=cmd_line.visual,\n post=cmd_line.post)\n"
] | [
[
"numpy.sum",
"torch.load",
"numpy.zeros",
"torch.nn.functional.softmax",
"numpy.asarray",
"numpy.argmax",
"numpy.expand_dims",
"torch.from_numpy",
"numpy.array",
"numpy.pad"
]
] |
dcstrandberg/aspect-sentiment | [
"0177888d4fe96d49b78e44f5bd24be619c93bf00"
] | [
"aspect_sentiment.py"
] | [
"import spacy\nfrom textblob import TextBlob\nimport pandas as pd\n\n# Import functions from other files\nfrom tweet_handlers import pullTweetsFromCSV, tweetPulls\n\n### Declare functions to standardize, identify, and analyze input text\n# Will ultimately take in a list of tweets and return:\n# - Word counts\n# - Split of positive / negative aspects\n# - Brand identification?\n\n#visualizeText() is a funtion to diagram sentences for help troubleshooting\n# Inputs: \n# - nlp: an NLP object, \n# - txt = a string containing the sentence to be diagramed, \n# - writeFilename: a string containing the filename to write the HTML diagram to\n# Returns:\n# - writeFilename: the path of the file that contains the HTML diagram\ndef visualizeText(nlp, txt, writeFilename):\n doc = nlp(txt)\n html = spacy.displacy.render(doc, style='dep')\n\n filePath = './' + writeFilename + '.html'\n\n with open(filePath, 'w') as f:\n f.write(html)\n \n return filePath\n\n\n#extractDescriptors() is a funtion to pull aspects and descriptors from a list of sentences\n# Inputs: \n# - nlp: an NLP object, \n# - sentenceList: a list of strinsg containing the sentences to be analyzed\n# Outputs: \n# - list of dictionaries containing 'aspect' and 'description' -- not broken by tweet\n\ndef extractDescriptors(nlp, sentenceList):\n #We'll ultimately return this aspects list\n aspects = []\n aspects_lemma = []\n attributes = []\n attributes_lemma = []\n\n\n #We will iterate through the sentences\n for i, aSentence in enumerate( sentenceList ):\n if i % 100 == 0: print(\"Tweet# \", str(i))\n doc = nlp(aSentence)\n \n for token in doc:\n\n ###TODO: \n # Currently there's no standardization that makes it a 1:1 Noun + Adjective, so that needs to be fixed\n # Also need to add in a case that checks for pronoun resolution and sees what we can do about that\n\n # We need to identify each noun, and find its descendants that are (pos_ == 'ADJ' or pos_ == 'VERB') and (dep_ == 'amod' or dep_ == 'acl')\n\n # Modifying rule to examine ALL nouns, not just the subject of the sentence\n #if token.dep_ == 'nsubj' and token.pos_ == 'NOUN':\n if (token.pos_ == 'ADJ' or token.pos_ == 'VERB') and (token.dep_ == 'amod' or token.dep_ == 'acl'):\n\n #Now append the things\n aspects.append (token.head.text)\n aspects_lemma.append(token.head.lemma_)\n\n attributes.append( token.text )\n attributes_lemma.append( token.lemma_ )\n\n\n return ( aspects , attributes, aspects_lemma, attributes_lemma ) \n\n# Need a function that pulls attributes for each keyword in the tweet DF, since we need them to be kept separate\n# extractTweetAttributes: \n# Takes a DF of tweets, keywords, etc. and pulls out adjectives for each\n# Inputs:\n# - nlp: an NLP object,\n# - tweet_df: pandas dataframe containing colums:\n# - Tweet \n# - Keyword\n# - Spanish\n# - Date\n# Returns:\n# - attribute_df: dataframe containing the list of...\n# ...aspects & attributes for each keyword / spanish pair\ndef extractTweetAttributes(nlp, tweet_df):\n #define return df\n attribute_df = pd.DataFrame( columns = [\n 'Keyword',\n 'Spanish',\n 'aspect',\n 'attribute',\n 'aspect_lemma',\n 'attribute_lemma'\n ])\n\n # Now create a set for the different keywords and spanish words\n keySet = set( tweet_df['Keyword'] )\n \n for aKey in keySet:\n print(\"Extracting \", aKey)\n spanishWord = tweet_df.loc[ tweet_df['Keyword'] == aKey ]['Spanish'].iloc[0]\n\n # And this is where we actually add the various analyses\n ( aspectList , attributeList, aspectList_lemma, attributeList_lemma ) = extractDescriptors( nlp, tweet_df[ tweet_df['Keyword'] == aKey ]['tweet'] ) \n\n\n # Now that we've got the data, create lookup lists for the Keyword & Spanish words\n keyList = [aKey] * len(aspectList)\n spanishList = [spanishWord] * len(aspectList)\n\n temp_df = pd.DataFrame({\n 'Keyword': keyList,\n 'Spanish': spanishList,\n 'aspect': aspectList,\n 'attribute': attributeList,\n 'aspect_lemma': aspectList_lemma,\n 'attribute_lemma': attributeList_lemma\n })\n\n # Finally, append the data for this keyword to the attribute dataframe\n attribute_df = attribute_df.append( temp_df )\n \n return attribute_df\n\ndef countAttributes( aspect_df ):\n\n temp_df = pd.DataFrame({\n 'Keyword': aspect_df['Keyword'],\n 'Spanish': aspect_df['Spanish'],\n 'aspect': aspect_df['aspect_lemma'],\n 'attribute': aspect_df['attribute_lemma']\n })\n\n return temp_df.value_counts()\n\n# In the main, this is where the tweet files are loaded...\n# ...and routed through the analysis functions\nif __name__ == \"__main__\":\n print(\"In the main\")\n \n # Create the NLP object that will be used for all the text processing\n #nlp = spacy.load(\"en_core_web_sm\")\n # We're actually using a spanish NLP object instead of an English one\n nlp = spacy.load(\"es_core_news_sm\")\n\n # Pull in CSV files that hold all the tweets\n tweetFileList = [\n './tweet_data/tweet_db_08.27.2021.csv'\n ]\n\n # Create the DF of tweets from the CSV File\n tweet_df = pullTweetsFromCSV( tweetFileList )#, fileEncoding='ANSI' )\n\n # Instead of pulling tweets from a file, we're going to get new tweets\n # First we need to designate a list of english + spanish keywords to search for\n keyword_df = pd.read_csv('./keyword_list.csv')\n\n #tweet_df = tweetPulls( keyword_df )\n\n #Save the tweet-df because of errors\n #tweet_df.to_csv('./tweet_data/tweet_db_08.27.2021.csv')#, encoding='ANSI')\n\n # Run the tweets through the attribute extractor\n aspect_df = extractTweetAttributes ( nlp, tweet_df)\n\n \n # Run the aspects & attributes through a modified version of the wordcount function\n count_df = countAttributes( aspect_df )\n # - Not to mention run some sort of pronoun resolution\n \n count_df.to_csv('./tweet_data/aspect_count_08.27.2021.csv')"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
williamhowardsnyder/OnClass | [
"07b2917dbdf01a1de54771de3383bbaa4bb2f283"
] | [
"utils.py"
] | [
"from anndata import read_h5ad\nimport sys\nfrom time import time\nfrom scipy import stats, sparse\nimport numpy as np\nimport collections\nimport pickle\nfrom sklearn.preprocessing import normalize\nimport os\nfrom collections import Counter\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve\nimport time\nimport umap\nimport copy\nfrom sklearn import preprocessing\nfrom fbpca import pca\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA\n#from libs import *\nfrom scanorama import find_alignments,merge_datasets,process_data,transform,vstack\nfrom sklearn.utils.graph_shortest_path import graph_shortest_path\nfrom scipy.sparse.linalg import svds, eigs\n\nnn_nhidden = [1000]\nrsts = [0.5,0.6,0.7,0.8]\ndfs_depth = 1\nco_dim = 5\nkeep_prob = 1.0\nuse_diagonal = True\nmax_iter = 20\nniter = 5\ndef translate_paramter(ps):\n\ts = []\n\tfor p in ps:\n\t\tif isinstance(p, list):\n\t\t\tp = [str(i) for i in p]\n\t\t\tp = '.'.join(p)\n\t\t\ts.append(p)\n\t\telse:\n\t\t\ts.append(str(p))\n\ts = '_'.join(s)\n\treturn s\npname = translate_paramter([max_iter])\n\ndef make_folder(folder):\n\tif not os.path.exists(folder):\n\t\tos.makedirs(folder)\n\treturn folder\n\ndef create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]):\n\tncls = np.shape(cls2cls)[0]\n\tif dname != 'allen':\n\t\tonto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file)\n\t\t#network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls)\n\t\tnetwork = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress)\n\telse:\n\t\tstack_net_bin = np.zeros((ncls,ncls))\n\t\tfor n1 in onto_net:\n\t\t\tfor n2 in onto_net[n1]:\n\t\t\t\tif n1==n2:\n\t\t\t\t\tcontinue\n\t\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\t\tstack_net_bin[n2,n1] = 1\n\t\tnetwork = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts]\n\treturn network\n\n\ndef fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8):\n\tco2name, name2co = get_ontology_name(obo_file = obo_file)\n\tfrom sentence_transformers import SentenceTransformer\n\tmodel = SentenceTransformer('bert-base-nli-mean-tokens')\n\tsentences = np.array([sentence.lower() for sentence in sentences])\n\tsentence_embeddings = model.encode(sentences)\n\tco_embeddings = []\n\tcos = []\n\tfor co in co2emb:\n\t\tco_embeddings.append(co2emb[co])\n\t\tcos.append(co)\n\tco_embeddings = np.array(co_embeddings)\n\tsent2co = {}\n\tfor sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))):\n\t\tscs = cosine_similarity(co_embeddings, embedding.reshape(1,-1))\n\n\t\tco_id = np.argmax(scs)\n\t\tsc = scs[co_id]\n\t\tif sc>nlp_mapping_cutoff:\n\t\t\tsent2co[sentence.lower()] = cos[co_id]\n\t\t\tnames = set()\n\t\t\tfor name in name2co:\n\t\t\t\tif name2co[name].upper() == cos[co_id]:\n\t\t\t\t\tnames.add(name)\n\t\t\t#print (sentence, cos[co_id], sc, co2name[cos[co_id]],names)\n\treturn sent2co\n\n\ndef ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tnngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\t#kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\t#if len(kngh) == 0:\n\t\t\t#\tcontinue\n\t\t\ty_mat[i,nseen:] = y_mat[i,nngh]\n\t\t\ty_mat[i,:nseen] -= 1000000\n\treturn y_mat\n\n\ndef ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\tkngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\tif len(kngh) == 0:\n\t\t\t\tcontinue\n\t\t\ty_mat[i,:nseen] -= 1000000\n\t\t\ty_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:])\n\treturn y_mat\n\ndef find_gene_ind(genes, common_genes):\n\tgid = []\n\tfor g in common_genes:\n\t\tgid.append(np.where(genes == g)[0][0])\n\tgid = np.array(gid)\n\treturn gid\n\ndef RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7):\n\tncls = len(l2i)\n\tonto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file)\n\tonto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_mat = np.zeros((ncls, ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tonto_net_mat[n1,n2] = onto_net_nlp[n1, n2]\n\t\t\tonto_net_mat[n2,n1] = onto_net_nlp[n2, n1]\n\tonto_net_rwr = RandomWalkRestart(onto_net_mat, rst)\n\treturn onto_net_rwr\n\ndef process_expression(c2g_list):\n\t#this data process function is motivated by ACTINN, please check ACTINN for more information.\n\tc2g = np.vstack(c2g_list)\n\tc2g = c2g.T\n\t#print ('onclass d0',np.shape(c2g))\n\tc2g = c2g[np.sum(c2g, axis=1)>0, :]\n\t#print (c2g)\n\t#print ('onclass d1',np.shape(c2g))\n\tc2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000\n\tc2g = np.log2(c2g+1)\n\texpr = np.sum(c2g, axis=1)\n\t#total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\n\tc2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\t#print (c2g)\n\t#print ('onclass d2',np.shape(c2g))\n\tcv = np.std(c2g, axis=1) / np.mean(c2g, axis=1)\n\tc2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),]\n\t#print (c2g)\n\t#print ('onclass d3',np.shape(c2g))\n\tc2g = c2g.T\n\t#print (c2g)\n\t#print ('onclass d4',np.shape(c2g))\n\tc2g_list_new = []\n\tindex = 0\n\tfor c in c2g_list:\n\t\tncell = np.shape(c)[0]\n\t\tc2g_list_new.append(c2g[index:index+ncell,:])\n\t\tindex = ncell\n\treturn c2g_list_new\n\ndef read_ontology_file(dname, data_folder):\n\tif 'allen' in dname:\n\t\tcell_type_network_file = data_folder + 'allen.ontology'\n\t\tcell_type_nlp_emb_file = None\n\t\tcl_obo_file = None\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.error(cell_type_network_file + ' not found!')\n\telse:\n\t\tcell_type_network_file = data_folder + 'cl.ontology'\n\t\tcell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb'\n\t\tcl_obo_file = data_folder + 'cl.obo'\n\t\tif not os.path.isfile(cell_type_nlp_emb_file):\n\t\t\tsys.exit(cell_type_nlp_emb_file + ' not found!')\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.exit(cell_type_network_file + ' not found!')\n\t\tif not os.path.isfile(cl_obo_file):\n\t\t\tsys.exit(cl_obo_file + ' not found!')\n\treturn cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file\n\ndef read_data_file(dname, data_dir):\n\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\tfeature_file = data_dir + 'Lemur/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'sapiens' in dname:\n\t\tfeature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_type'\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + '/Allen_Brain/features.pkl'\n\t\tlabel_file = data_dir + '/Allen_Brain/labels.pkl'\n\t\tgene_file = data_dir + '/Allen_Brain/genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + '/HLCA/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + '/HLCA/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + '/HLCA/'+tech+'_genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif feature_file.endswith('.pkl'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\telif feature_file.endswith('.h5ad'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\tsys.exit('wrong file suffix')\n\ndef read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True):\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\t#file = data_dir + 'TMS_official_060520/' + 'tabula-microcebus_smartseq2-10x_combined_annotated_filtered_gene-labels-correct.h5ad'\n\t\tfile = data_dir + 'TMS_official_060520/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tbatch_key = ''#original_channel\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfile = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tbatch_key = ''\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'allen_part' in dname:\n\t\tfeature_file = data_dir + 'Allen/matrix_part.csv'\n\t\tlabel_file = data_dir + 'Allen/metadata.csv'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + 'Allen/features.pkl'\n\t\tlabel_file = data_dir + 'Allen/labels.pkl'\n\t\tgene_file = data_dir + 'Allen/genes.pkl'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Krasnow/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + 'Krasnow/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif read_tissue:\n\t\treturn feature, label, genes, tissues, ontology_nlp_file, ontology_file\n\telse:\n\t\treturn feature, label, genes, ontology_nlp_file, ontology_file\n\n\n\ndef parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\ndef parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,ncell,replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\ndef select_high_var_genes(train_X, test_X, ngene = 200):\n\tmat = np.vstack((train_X, test_X))\n\t#mat = mat.todense()\n\tgstd = np.std(mat, axis=0)\n\tbest_genes = np.argsort(gstd*-1)\n\tbest_genes = best_genes[:ngene]\n\treturn train_X[:, best_genes], test_X[:, best_genes]\n\ndef emb_cells(train_X, test_X, dim=20):\n\tif dim==-1:\n\t\treturn np.log1p(train_X.todense()), np.log1p(test_X.todense())\n\ttrain_X = np.log1p(train_X)\n\ttest_X = np.log1p(test_X)\n\ttrain_X = preprocessing.normalize(train_X, axis=1)\n\ttest_X = preprocessing.normalize(test_X, axis=1)\n\tntrain = np.shape(train_X)[0]\n\tmat = sparse.vstack((train_X, test_X))\n\tU, s, Vt = pca(mat, k=dim) # Automatically centers.\n\tX = U[:, range(dim)] * s[range(dim)]\n\treturn X[:ntrain,:], X[ntrain:,:]\n\ndef write_markers(fname, markers):\n\t## Write marker genes to file\n\tfmarker_genes = open(fname,'w')\n\tfor t in markers:\n\t\tfmarker_genes.write(t+'\\t')\n\t\tg2pv = sorted(markers[t].items(), key=lambda item: item[1])\n\t\tfor g,pv in g2pv:\n\t\t\tfmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\\t')\n\t\tfmarker_genes.write('\\n')\n\tfmarker_genes.close()\n\n\ndef calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100):\n\tncell, nterm = np.shape(cell2term)\n\tngene = np.shape(cell2gene)[1]\n\tassert(ncell == np.shape(cell2gene)[0])\n\tmarkers = collections.defaultdict(dict)\n\tfor t in range(nterm):\n\t\tscs = np.argsort(cell2term[:,t])\n\t\tk_bot_cells = scs[:topk_cells]\n\t\tk_top_cells = scs[ncell-topk_cells:]\n\t\tpv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] #* ngene\n\t\ttop_mean = np.mean(cell2gene[k_top_cells,:],axis=0)\n\t\tbot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0)\n\t\tif only_over_expressed:\n\t\t\tfor g in range(ngene):\n\t\t\t\tif top_mean[g] < bot_mean[g]:\n\t\t\t\t\tpv[g] = 1.\n\t\tpv_sort = list(np.argsort(pv))\n\t\t#for i in range(return_k_genes):\n\t\t#markers[terms[t]][genes[pv_sort[i]]] = pv[pv_sort[i]]\n\t\tmarkers[terms[t]] = pv\n\t\tfor i,p in enumerate(pv):\n\t\t\tif np.isnan(p):\n\t\t\t\tpv[i] = 1.\n\t\t\t#markers[terms[t]][str(pv_sort[i])] = pv[pv_sort[i]]\n\treturn markers\n\ndef peak_h5ad(file):\n\t'''\n\tpeak the number of cells, classes, genes in h5ad file\n\t'''\n\tx = read_h5ad(file)\n\t#print (np.shape(x.X))\n\t#print (x.X[:10][:10])\n\t#print (x.obs.keys())\n\tncell, ngene = np.shape(x.X)\n\tnclass = len(np.unique(x.obs['free_annotation']))\n\t#print (np.unique(x.obs['free_annotation']))\n\tf2name = {}\n\tsel_cell = 0.\n\tfor i in range(ncell):\n\t\tif x.obs['method'][i]!='10x':\n\t\t\tcontinue\n\n\t\tfree = x.obs['free_annotation'][i]\n\t\tname = x.obs['cell_ontology_class'][i]\n\t\tf2name[free] = name\n\t\tsel_cell += 1\n\t#return f2name\n\t#for key in x.obs.keys():\n\t#\tprint (key, np.unique(x.obs[key]))\n\treturn sel_cell, ngene, nclass\n\t#for i in range(10):\n\t#\tprint (x.obs['method'][i], x.obs['channel_no_10x'][i])\n\t#for key in x.obs.keys():\n\t#\tprint (key, np.unique(x.obs[key]))\n\t#return index\n\n\ndef get_onotlogy_parents(GO_net, g):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\tngh_GO.remove(GO)\n\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\ndef exclude_non_ontology_term(cl_obo_file, labels, label_key):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tnew_labs = []\n\tnew_ids = []\n\tif label_key!='cell_ontology_class' and label_key!='cell_ontology_id':\n\t\tuse_co = False\n\t\tfor kk in np.unique(labels):\n\t\t\tif kk.lower().startswith('cl:'):\n\t\t\t\tuse_co = True\n\t\t\t\tbreak\n\telse:\n\t\tif label_key == 'cell_ontology_class':\n\t\t\tuse_co = False\n\t\telse:\n\t\t\tuse_co = True\n\tfor i in range(len(labels)):\n\t\tl = labels[i]\n\t\tif not use_co:\n\t\t\tif l.lower() in name2co.keys():\n\t\t\t\tnew_labs.append(name2co[l.lower()])\n\t\t\t\tnew_ids.append(i)\n\t\telse:\n\t\t\tif l.lower() in co2name.keys():\n\t\t\t\tnew_labs.append(l.lower())\n\t\t\t\tnew_ids.append(i)\n\tnew_labs = np.array(new_labs)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, new_labs\n\n\ndef parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\n\tncell = np.shape(x.raw.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.raw.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\tcorrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = corrected_feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\ndef select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None):\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeatures = features[select_cells,: ]\n\tif labels is not None:\n\t\tlabels = labels[select_cells]\n\tif tissues is not None:\n\t\ttissues = tissues[select_cells]\n\tx = x[select_cells,:]\n\treturn features, labels, tissues, x\n\ndef find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50):\n\tcor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T)\n\tcor = np.nan_to_num(cor) # cell type to gene\n\tnl = len(i2l)\n\tc2g = {}\n\tfor i in range(nl):\n\t\tgl = np.argsort(cor[i,:]*-1)\n\t\tc2g[i2l[i]] = {}\n\t\tfor j in range(topk):\n\t\t\tc2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]]\n\treturn c2g, cor\n\n\ndef use_pretrained_model(OnClass, genes, test_X, models = []):\n\tlast_l2i = {}\n\tlast_i2l = {}\n\n\tpred_Y_all_models = 0.\n\tngene = len(genes)\n\tfor model in models:\n\t\tOnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model)\n\t\tprint ('Build model finished for ',model)\n\t\tpred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes)\n\t\tprint ('Predict for ',model)\n\t\tpred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1)\n\t\tpred_Y_all = pred_Y_all.T\n\t\tif len(last_l2i)>0:\n\t\t\tnew_ct_ind = []\n\t\t\tfor i in range(len(last_i2l)):\n\t\t\t\tl = last_i2l[i]\n\t\t\t\tnew_ct_ind.append(OnClass.co2i[l])\n\t\t\tpred_Y_all = pred_Y_all[:, np.array(new_ct_ind)]\n\t\t\tpred_Y_all_models += pred_Y_all\n\t\telse:\n\t\t\tlast_l2i = OnClass.co2i\n\t\t\tlast_i2l = OnClass.i2co\n\t\t\tpred_Y_all_models = pred_Y_all\n\treturn pred_Y_all_models\n\n\ndef read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None):\n\tnp.random.seed(seed)\n\tx = read_h5ad(feature_file)\n\tncell = np.shape(x.X)[0]\n\tdataset = x.X.toarray()\n\tgenes = np.array([x.upper() for x in x.var.index])\n\n\tif tissue_key is not None:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())\n\telse:\n\t\ttissues = None\n\tif AnnData_label_key is None and label_file is None:\n\t\tprint ('no label file is provided')\n\t\tlabels = None\n\t\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\t\treturn dataset, genes, labels, tissues, x\n\tif AnnData_label_key is not None:\n\t\tlabels = x.obs[AnnData_label_key].tolist()\n\telse:\n\t\tfin = open(label_file)\n\t\tlabels = []\n\t\tfor line in fin:\n\t\t\tlabels.append(line.strip())\n\t\tfin.close()\n\tlabels = np.array(labels)\n\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\tind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file)\n\tif tissue_key is not None:\n\t\ttissues = tissues[ind]\n\tdataset = dataset[ind, :]\n\tx = x[ind, :]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\ttissues = tissues[new_ids]\n\t\tdataset = dataset[new_ids, :]\n\t\tlabels = labels[new_ids]\n\t\tx = x[new_ids, :]\n\n\tncell = np.shape(dataset)[0]\n\tindex = np.random.choice(ncell,ncell,replace=False)\n\tdataset = dataset[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif tissue_key is not None:\n\t\ttissues = tissues[index]\n\treturn dataset, genes, labels, tissues, x\n\n\n\n\ndef exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None):\n\tif cl_obo_file is None:\n\t\treturn lab2co\n\tco2name, name2co = get_ontology_name(obo_file = cl_obo_file)\n\tfor label in labels:\n\t\tif label.lower() in name2co:\n\t\t\tlab2co[label.lower()] = name2co[label.lower()]\n\tfor name in name2co:\n\t\tlab2co[name.lower()] = name2co[name]\n\treturn lab2co\n\n\ndef map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None):\n\tlab2co = {}\n\tif nlp_mapping:\n\t\tif co2emb is None:\n\t\t\tsys.exit('Please provide cell type embedding to do NLP-based mapping.')\n\t\tlab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff)\n\tlab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file)\n\tfor ct in ct_mapping_key:\n\t\tlab2co[ct_mapping_key[ct]] = lab2co[ct]\n\tind = []\n\tlab_id = []\n\tunfound_labs = set()\n\tfor i,l in enumerate(labels):\n\t\tif l in cell_ontology_ids:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(l)\n\t\telif l.lower() in lab2co:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(lab2co[l.lower()])\n\t\telse:\n\t\t\tunfound_labs.add(l)\n\tfrac = len(ind) * 1. / len(labels)\n\tind = np.array(ind)\n\tlabels = np.array(lab_id)\n\tunfound_labs = set(unfound_labs)\n\twarn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100)\n\tif frac < 0.5:\n\t\tprint (warn_message)\n\t\tprint ('Here are unfound labels:',unfound_labs)\n\treturn ind, labels, unfound_labs\n\ndef parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\t'''\n\tread h5ad file\n\tfeature: cell by gene expression\n\tlabel: cell ontology class\n\tgenes: gene names HGNC\n\t'''\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs['tissue'].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\t#corrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\n\ndef exclude_parent_child_nodes(cell_ontology_file,labels):\n\tuniq_labels = np.unique(labels)\n\texcludes = set()\n\tnet = collections.defaultdict(dict)\n\tfin = open(cell_ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tnet[s][p] = 1 #p is parent\n\tfin.close()\n\tfor n in list(net.keys()):\n\t\tngh = get_ontology_parents(net, n)\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\tfor l1 in uniq_labels:\n\t\tfor l2 in uniq_labels:\n\t\t\tif l1 in net[l2] and l1!=l2: #l1 is l2 parent\n\t\t\t\texcludes.add(l1)\n\t#print (excludes)\n\tnew_ids = []\n\tfor i in range(len(labels)):\n\t\tif labels[i] not in excludes:\n\t\t\tnew_ids.append(i)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, excludes\n\ndef corr2_coeff(A, B):\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:, None]\n B_mB = B - B.mean(1)[:, None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1)\n ssB = (B_mB**2).sum(1)\n\n # Finally get corr coeff\n return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None]))\n\ndef extract_data_based_on_class(feats, labels, sel_labels):\n\tind = []\n\tfor l in sel_labels:\n\t\tid = np.where(labels == l)[0]\n\t\tind.extend(id)\n\tnp.random.shuffle(ind)\n\tX = feats[ind,:]\n\tY = labels[ind]\n\treturn X, Y, ind\n\ndef SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False))\n\tfor c in cls2ct:\n\t\tif cls2ct[c] < nmin_size:\n\t\t\ttest_cls.append(c)\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\n'''\ndef SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\trare_cls = []\n\tnot_rare_cls = []\n\tfor c in cls2ct:\n\t\tif cls2ct[c] < 2:\n\t\t\tcontinue\n\t\telif cls2ct[c] < nmin_size:\n\t\t\trare_cls.append(c)\n\t\telse:\n\t\t\tnot_rare_cls.append(c)\n\tcls = np.concatenate((rare_cls, not_rare_cls))\n\tncls = len(cls)\n\trare_cls = np.array(rare_cls)\n\tnot_rare_cls = np.array(not_rare_cls)\n\ttrain_non_rare_cls = list(np.random.choice(not_rare_cls, int(len(not_rare_cls) * (1 - nfold_cls)), replace=False))\n\ttrain_cls = np.concatenate((train_non_rare_cls, rare_cls))\n\ttest_cls = [x for x in cls if x not in train_cls]\n\ttest_cls = np.array(test_cls)\n\tassert(len(test_cls) + len(train_cls) == ncls)\n\tassert(len(set(test_cls) & set(train_cls)) == 0)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n'''\n\ndef LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = [test_Y]\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\ndef renorm(X):\n\tY = X.copy()\n\tY = Y.astype(float)\n\tngene,nsample = Y.shape\n\ts = np.sum(Y, axis=0)\n\t#print s.shape()\n\tfor i in range(nsample):\n\t\tif s[i]==0:\n\t\t\ts[i] = 1\n\t\t\tif i < ngene:\n\t\t\t\tY[i,i] = 1\n\t\t\telse:\n\t\t\t\tfor j in range(ngene):\n\t\t\t\t\tY[j,i] = 1. / ngene\n\t\tY[:,i] = Y[:,i]/s[i]\n\treturn Y\n\ndef RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False):\n\tif use_torch:\n\t\tdevice = torch.device(\"cuda:0\")\n\tnnode = A.shape[0]\n\t#print nnode\n\tif reset is None:\n\t\treset = np.eye(nnode)\n\tnsample,nnode = reset.shape\n\t#print nsample,nnode\n\tP = renorm(A)\n\tP = P.T\n\tnorm_reset = renorm(reset.T)\n\tnorm_reset = norm_reset.T\n\tif use_torch:\n\t\tnorm_reset = torch.from_numpy(norm_reset).float().to(device)\n\t\tP = torch.from_numpy(P).float().to(device)\n\tQ = norm_reset\n\n\tfor i in range(1,max_iter):\n\t\t#Q = gnp.garray(Q)\n\t\t#P = gnp.garray(P)\n\t\tif use_torch:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array()\n\t\t\tdelta = torch.norm(Q-Q_new, 2)\n\t\telse:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array()\n\t\t\tdelta = np.linalg.norm(Q-Q_new, 'fro')\n\t\tQ = Q_new\n\t\t#print (i,Q)\n\t\tsys.stdout.flush()\n\t\tif delta < 1e-4:\n\t\t\tbreak\n\tif use_torch and not return_torch:\n\t\tQ = Q.cpu().numpy()\n\treturn Q\n\ndef DCA_vector(Q, dim):\n\tnnode = Q.shape[0]\n\talpha = 1. / (nnode **2)\n\tQ = np.log(Q + alpha) - np.log(alpha);\n\n\t#Q = Q * Q';\n\t[U, S, V] = svds(Q, dim);\n\tS = np.diag(S)\n\tX = np.dot(U, np.sqrt(S))\n\tY = np.dot(np.sqrt(S), V)\n\tY = np.transpose(Y)\n\treturn X,U,S,V,Y\n\ndef read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = len(l2i)\n\tnet = np.zeros((ncls, ncls))\n\tbin_net = np.zeros((ncls, ncls))\n\tfin = open(ontology_nlp_file)\n\tfor line in fin:\n\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\twt = float(wt)\n\t\tnet[l2i[s], l2i[p]] = np.exp(wt)\n\t\tnet[l2i[p], l2i[s]] = np.exp(wt)\n\t\tbin_net[l2i[s], l2i[p]] = 1\n\t\tbin_net[l2i[p], l2i[s]] = 1\n\tfin.close()\n\n\tl2vec = {}\n\tfin = open(ontology_nlp_emb_file)\n\tfor line in fin:\n\t\tw = line.upper().strip().split('\\t')\n\t\tl2vec[w[0]] = []\n\t\tdim = len(w)-1\n\t\tfor i in range(1,len(w)):\n\t\t\tl2vec[w[0]].append(float(w[i]))\n\tfin.close()\n\n\tl2vec_mat = np.zeros((ncls, dim))\n\tfor l in l2vec:\n\t\tif l.upper() not in l2i:\n\t\t\tcontinue\n\t\tl2vec_mat[l2i[l.upper()],:] = l2vec[l]\n\n\t'''\n\tnet_sum = np.sum(net,axis=0)\n\tfor i in range(ncls):\n\t\tif net_sum[i] == 0:\n\t\t\tnet[i,i] = 1.\n\t\tnet[:,i] /= np.sum(net[:,i])\n\t#net = net / net.sum(axis=1)[:, np.newaxis]\n\t'''\n\treturn net, bin_net, l2vec_mat\n\n\ndef GetReverseNet(onto_net):\n\tonto_net_rev = collections.defaultdict(dict)\n\tfor a in onto_net:\n\t\tfor b in onto_net[a]:\n\t\t\tonto_net_rev[b][a] = 1\n\treturn onto_net_rev\n\n\ndef ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):#\n\tunseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen)\n\tY_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\tif add_emb_diagonal:\n\t\tY_emb = np.column_stack((np.eye(len(i2l)), Y_emb))\n\treturn unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat\n\n\n\ndef graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None):\n\tnl = np.shape(A)[0]\n\tif use_seen_only:\n\t\tseen_ind = []\n\t\tunseen_ind = []\n\t\tfor i in range(nl):\n\t\t\tif i2l[i] in unseen_l:\n\t\t\t\tunseen_ind.append(i)\n\t\t\telse:\n\t\t\t\tseen_ind.append(i)\n\t\tseen_ind = np.array(seen_ind)\n\t\tunseen_ind = np.array(unseen_ind)\n\n\t#if len(seen_ind) * 0.8 < dim:\n\t#\tdim = int(len(seen_ind) * 0.8)\n\tif mi==0 or mi == 1:\n\t\tsp = graph_shortest_path(A,method='FW',directed =False)\n\telse:\n\t\tsp = RandomWalkRestart(A, 0.8)\n\tif use_seen_only:\n\t\tsp = sp[seen_ind, :]\n\t\tsp = sp[:,seen_ind]\n\tX = np.zeros((np.shape(sp)[0],dim))\n\tsvd_dim = min(dim, np.shape(sp)[0]-1)\n\tif mi==0 or mi == 2:\n\t\tX[:,:svd_dim] = svd_emb(sp, dim=svd_dim)\n\telse:\n\t\tX[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0]\n\tif use_seen_only:\n\t\tX_ret = np.zeros((nl, dim))\n\t\tX_ret[seen_ind,:] = X\n\telse:\n\t\tX_ret = X\n\tif mi==2 or mi == 3:\n\t\tsp *= -1\n\treturn sp, X_ret\n\ndef cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tif use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'):\n\t\tcl_nlp = collections.defaultdict(dict)\n\t\tif ontology_nlp_file is not None:\n\t\t\tfin = open(ontology_nlp_file)\n\t\t\tfor line in fin:\n\t\t\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\t\t\tcl_nlp[s][p] = float(wt)\n\t\t\t\tcl_nlp[p][s] = float(wt)\n\t\t\tfin.close()\n\n\t\tfin = open(ontology_file)\n\t\tlset = set()\n\t\ts2p = {}\n\t\tfor line in fin:\n\t\t\tw = line.strip().split('\\t')\n\t\t\ts = w[0]\n\t\t\tp = w[1]\n\t\t\tif len(w)==2:\n\t\t\t\tif p in cl_nlp and s in cl_nlp[p]:\n\t\t\t\t\twt = cl_nlp[p][s]\n\t\t\t\telse:\n\t\t\t\t\twt = 1.\n\t\t\telse:\n\t\t\t\twt = float(w[2])\n\t\t\tif s not in s2p:\n\t\t\t\ts2p[s] = {}\n\t\t\ts2p[s][p] = wt\n\t\t\tlset.add(s)\n\t\t\tlset.add(p)\n\t\tfin.close()\n\t\tlset = np.sort(list(lset))\n\t\tnl = len(lset)\n\t\tl2i = dict(zip(lset, range(nl)))\n\t\ti2l = dict(zip(range(nl), lset))\n\t\tA = np.zeros((nl, nl))\n\t\tfor s in s2p:\n\t\t\tfor p in s2p[s]:\n\t\t\t\tA[l2i[s], l2i[p]] = s2p[s][p]\n\t\t\t\tA[l2i[p], l2i[s]] = s2p[s][p]\n\t\tsp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l)\n\t\tif use_pretrain is not None:\n\t\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\t\tX_file = use_pretrain+'X.npy'\n\t\t\tsp_file = use_pretrain+'sp.npy'\n\t\t\tnp.save(X_file, X)\n\t\t\tnp.save(i2l_file, i2l)\n\t\t\tnp.save(l2i_file, l2i)\n\t\t\tnp.save(sp_file, sp)\n\telse:\n\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\tX_file = use_pretrain+'X.npy'\n\t\tsp_file = use_pretrain+'sp.npy'\n\t\tX = np.load(X_file)\n\t\ti2l = np.load(i2l_file,allow_pickle=True).item()\n\t\tl2i = np.load(l2i_file,allow_pickle=True).item()\n\t\tsp = np.load(sp_file,allow_pickle=True)\n\treturn X, l2i, i2l, sp\n\ndef merge_26_datasets(datanames_26datasets, scan_dim = 50):\n\tdatasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True)\n\tdatasets, genes = merge_datasets(datasets, genes_list)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150)\n\tdatasets_dimred = sparse.vstack(expr_datasets).toarray()\n\treturn datasets_dimred, genes\n\ndef emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tX, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\n\ti2emb = np.zeros((len(i2l),dim))\n\tnl = len(i2l)\n\tfor i in range(nl):\n\t\tant = i2l[i]\n\t\tif ant not in ont_l2i:\n\t\t\tprint (ant, ont_l2i)\n\t\t\tassert('xxx' in ant.lower() or 'nan' in ant.lower())\n\t\t\tcontinue\n\t\ti2emb[i,:] = X[ont_l2i[ant],:]\n\t'''\n\tAA = np.zeros((nl, nl))\n\tfor i in range(nl):\n\t\tfor j in range(nl):\n\t\t\tanti, antj = i2l[i], i2l[j]\n\t\t\tif anti in ont_l2i and antj in ont_l2i:\n\t\t\t\tAA[i,j] = A[ont_l2i[anti],ont_l2i[antj]]\n\t'''\n\treturn i2emb\n'''\ndef get_ontology_parents(GO_net, g):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\tngh_GO.remove(GO)\n\t\t\tterm_valid.add(GO)\n\treturn term_valid\n'''\n\ndef get_ontology_parents(GO_net, g, dfs_depth=100):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\tdepth = {}\n\tdepth[g] = 0\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\t\tdepth[GO1] = depth[GO] + 1\n\t\t\tngh_GO.remove(GO)\n\t\t\tif depth[GO] < dfs_depth:\n\t\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\ndef create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000):\n\n\tfin = open(ontology_file)\n\tlset = set()\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tlset.add(s)\n\t\tlset.add(p)\n\tfin.close()\n\n\tseen_l = sorted(np.unique(train_Y))\n\tunseen_l = sorted(lset - set(train_Y))\n\tys = np.concatenate((seen_l, unseen_l))\n\n\ti2l = {}\n\tl2i = {}\n\tfor l in ys:\n\t\tnl = len(i2l)\n\t\tcol = l\n\t\tif combine_unseen and l in unseen_l:\n\t\t\tnl = len(seen_l)\n\t\t\tl2i[col] = nl\n\t\t\ti2l[nl] = col\n\t\t\tcontinue\n\t\tl2i[col] = nl\n\t\ti2l[nl] = col\n\ttrain_Y = [l2i[y] for y in train_Y]\n\ttrain_X2Y = ConvertLabels(train_Y, ncls = len(i2l))\n\tonto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth)\n\treturn unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat\n\ndef query_depth_ontology(net, node, root='cl:0000000'):\n\tdepth = 0\n\twhile node != root:\n\t\tif len(net[node]) == 0:\n\t\t\tprint (node)\n\t\tnode = sorted(list(net[node].keys()))[0]\n\t\tdepth += 1\n\t\tif depth>100:\n\t\t\tsys.error('root not found')\n\treturn depth\n\n\ndef read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000):\n\tnl = len(l2i)\n\tnet = collections.defaultdict(dict)\n\tnet_mat = np.zeros((nl,nl))\n\tfin = open(ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tsi = l2i[s]\n\t\tpi = l2i[p]\n\t\tnet[si][pi] = 1\n\t\tnet_mat[si][pi] = 1\n\tfin.close()\n\tfor n in range(nl):\n\t\tngh = get_ontology_parents(net, n, dfs_depth = dfs_depth)\n\t\tnet[n][n] = 1\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\treturn net, net_mat\n\ndef extract_label_propagate_tree(onto_net, ncls):\n\ttree = np.zeros((ncls,ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\ttree[n1,n2] = 1\n\treturn tree\n\ndef ConvertLabels(labels, ncls=-1):\n\tncell = np.shape(labels)[0]\n\tif len(np.shape(labels)) ==1 :\n\t\t#bin to mat\n\t\tif ncls == -1:\n\t\t\tncls = np.max(labels)\n\t\tmat = np.zeros((ncell, ncls))\n\t\tfor i in range(ncell):\n\t\t\tmat[i, labels[i]] = 1\n\t\treturn mat\n\telse:\n\t\tif ncls == -1:\n\t\t\tncls = np.shape(labels)[1]\n\t\tvec = np.zeros(ncell)\n\t\tfor i in range(ncell):\n\t\t\tind = np.where(labels[i,:]!=0)[0]\n\t\t\tassert(len(ind)<=1) # not multlabel classification\n\t\t\tif len(ind)==0:\n\t\t\t\tvec[i] = -1\n\t\t\telse:\n\t\t\t\tvec[i] = ind[0]\n\t\treturn vec\n\ndef MapLabel2CL(test_Y, l2i):\n\ttest_Y_new = np.array([l2i[y] for y in test_Y])\n\treturn test_Y_new\n\ndef get_ontology_name(obo_file, lower=True):\n\tfin = open(obo_file)\n\tco2name = {}\n\tname2co = {}\n\ttag_is_syn = {}\n\tfor line in fin:\n\t\tif line.startswith('id: '):\n\t\t\tco = line.strip().split('id: ')[1]\n\t\tif line.startswith('name: '):\n\t\t\tif lower:\n\t\t\t\tname = line.strip().lower().split('name: ')[1]\n\t\t\telse:\n\t\t\t\tname = line.strip().split('name: ')[1]\n\t\t\tco2name[co] = name\n\t\t\tname2co[name] = co\n\t\tif line.startswith('synonym: '):\n\t\t\tif lower:\n\t\t\t\tsyn = line.strip().lower().split('synonym: \"')[1].split('\" ')[0]\n\t\t\telse:\n\t\t\t\tsyn = line.strip().split('synonym: \"')[1].split('\" ')[0]\n\t\t\tif syn in name2co:\n\t\t\t\tcontinue\n\t\t\tname2co[syn] = co\n\tfin.close()\n\treturn co2name, name2co\n\ndef knn_ngh(Y2Y):\n\tind = np.argsort(Y2Y*-1, axis=1)\n\treturn ind\n\ndef extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200):\n\tsys.exit(-1)#NOT USED\n\tncls = np.shape(onto_net_rwr)[0]\n\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1))\n\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\treturn pred_Y_all\n\ndef create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = np.shape(cls2cls)[0]\n\t_, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file)\n\tonto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_nlp = np.zeros((ncls, ncls))\n\tonto_net_bin = np.zeros((ncls, ncls))\n\tstack_net_bin = np.zeros((ncls, ncls))\n\tstack_net_nlp = np.zeros((ncls, ncls))\n\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tstack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\tstack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\tstack_net_bin[n2,n1] = 1\n\tfor n1 in range(ncls):\n\t\tfor n2 in range(ncls):\n\t\t\tif cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1:\n\t\t\t\tonto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\t\tonto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\t\tonto_net_bin[n1,n2] = 1\n\t\t\t\tonto_net_bin[n2,n1] = 1\n\treturn onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs\n\n\ndef create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]):\n\tcls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)\n\tncls = np.shape(onto_net_mat)[0]\n\tnetworks = []\n\tfor rst in rsts:\n\t\tfor dis in diss:\n\t\t\tfor thres in thress:\n\t\t\t\tuse_net = np.copy(onto_net_mat)\n\t\t\t\tuse_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]\n\t\t\t\tonto_net_rwr = RandomWalkRestart(use_net, rst)\n\t\t\t\tnetworks.append(onto_net_rwr)\n\treturn networks\n\ndef extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False):\n\tif not isinstance(networks, list):\n\t\tnetworks = [networks]\n\tpred_Y_all_totoal = 0.\n\tfor onto_net_rwr in networks:\n\t\tif use_normalize:\n\t\t\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1))\n\t\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\t\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\t\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\t\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\t\tpred_Y_all_totoal += pred_Y_all\n\treturn pred_Y_all_totoal\n\ndef my_auprc(y_true, y_pred):\n\tprecision, recall, thresholds = precision_recall_curve(y_true, y_pred)\n\tarea = auc(recall, precision)\n\treturn area\n\ndef sampled_auprc(truths,preds):\n\tpos = np.where(truths == 1)[0]\n\tneg = np.where(truths == 0)[0]\n\tassert(len(pos) + len(neg) == len(truths))\n\tnneg = len(neg)\n\tnpos = len(pos)\n\tselect_neg = np.random.choice(nneg, npos*3, replace = True)\n\tselect_ind = np.concatenate((pos, select_neg))\n\treturn average_precision_score(truths[select_ind], preds[select_ind])\n\ndef evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']):\n\t#preprocess scores\n\tunseen_l = np.array(list(unseen_l))\n\tncell,nclass = np.shape(Y_pred_mat)\n\tnseen = nclass - len(unseen_l)\n\tif Y_ind is not None:\n\t\tnon_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind)))\n\t\tif len(non_Y_ind)>0:\n\t\t\tY_pred_mat[:,non_Y_ind] = -1 * np.inf\n\tif Y_pred_vec is None:\n\t\tY_pred_vec = np.argmax(Y_pred_mat, axis=1)\n\tif Y_truth_bin_mat is None:\n\t\tY_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass)\n\n\tY_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass)\n\t#class-based metrics\n\tclass_auc_macro = np.full(nclass, np.nan)\n\tclass_auprc_macro = np.full(nclass, np.nan)\n\tclass_f1 = np.full(nclass, np.nan)\n\tfor i in range(nclass):\n\t\tif len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10:\n\t\t\tclass_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i])\n\n\n\t#sample-based metrics\n\textend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l)\n\tkappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec)\n\textend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec)\n\taccuracy = accuracy_score(Y_truth_vec, Y_pred_vec)\n\tprec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3)\n\tprec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5)\n\n\t#print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)])\n\tseen_auc_macro = np.nanmean(class_auc_macro[:nseen])\n\tseen_auprc_macro = np.nanmean(class_auprc_macro[:nseen])\n\tseen_f1 = np.nanmean(class_f1[:nseen])\n\tif len(unseen_l) == 0:\n\t\tunseen_auc_macro = 0\n\t\tunseen_auprc_macro = 0\n\t\tunseen_f1 = 0\n\telse:\n\t\tunseen_auc_macro = np.nanmean(class_auc_macro[unseen_l])\n\t\t#unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])#\n\t\tunseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l])\n\t\tunseen_f1 = np.nanmean(class_f1[unseen_l])\n\n\t#metrics = ['AUROC','AUPRC','unseen_AUROC', 'unseen_AUPRC','Cohens Kappa','Accuracy@3','Accuracy@5']\n\t#res_v = [seen_auc_macro, seen_auprc_macro, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), extend_kappa, prec_at_k_3, prec_at_k_5, unseen_auc_macro, unseen_auprc_macro]\n\tall_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5}\n\tres_v = {}\n\tfor metric in metrics:\n\t\tres_v[metric] = all_v[metric]\n\t#res_v = [seen_auc_macro, seen_auprc_macro, seen_f1, np.nanmean(class_auc_macro), np.nanmean(class_auprc_macro), np.nanmean(class_f1), unseen_auc_macro, unseen_auprc_macro, unseen_f1]\n\tif write_screen:\n\t\tprint (prefix, end='\\t')\n\t\tfor v in metrics:\n\t\t\tprint ('%.4f'%res_v[v], end='\\t')\n\t\tprint ('')\n\t\tsys.stdout.flush()\n\tif write_to_file is not None:\n\t\twrite_to_file.write(prefix+'\\t')\n\t\tfor v in metrics:\n\t\t\twrite_to_file.write('%.2f\\t'%res_v[v])\n\t\twrite_to_file.write('\\n')\n\t\twrite_to_file.flush()\n\treturn res_v\n\ndef precision_at_k(pred,truth,k):\n\tncell, nclass = np.shape(pred)\n\thit = 0.\n\tfor i in range(ncell):\n\t\tx = np.argsort(pred[i,:]*-1)\n\t\trank = np.where(x==truth[i])[0][0]\n\t\tif rank < k:\n\t\t\thit += 1.\n\tprec = hit / ncell\n\treturn prec\n\ndef write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name):\n\tif len(np.shape(test_label))==2:\n\t\ttest_label = np.argmax(test_label, axis = 1)\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tx = test_AnnData\n\tncell = np.shape(x.X)[0]\n\tprint (ncell, len(test_label))\n\tassert(ncell == len(test_label))\n\ttest_name = []\n\ttest_label_id = []\n\tfor i in range(ncell):\n\t\txx = i2tp[test_label[i]]\n\t\ttest_label_id.append(xx)\n\t\ttest_name.append(co2name[xx])\n\ttest_name = np.array(test_name)\n\ttest_label_id = np.array(test_label_id)\n\tx.obs['OnClass_annotation_ontology_ID'] = test_label\n\tx.obs['OnClass_annotation_ontology_name'] = test_name\n\treturn x\n\n\ndef read_type2genes(g2i, marker_gene,cl_obo_file):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\n\tc2cnew = {}\n\tc2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower()\n\tc2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower()\n\n\n\tc2cnew['mature NK T cell'] = 'mature NK T cell'.lower()\n\tc2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower()\n\tfin = open(marker_gene)\n\tfin.readline()\n\ttp2genes = {}\n\tunfound = set()\n\tfor line in fin:\n\t\tw = line.strip().split('\\t')\n\t\tc1 = w[1].lower()\n\t\tc2 = w[2].lower()\n\t\tgenes = []\n\t\tfor ww in w[8:]:\n\t\t\tif ww.upper() in g2i:\n\t\t\t\tgenes.append(ww.upper())\n\t\tif len(genes)==0:\n\t\t\tcontinue\n\t\tif c1.endswith('s') and c1[:-1] in name2co:\n\t\t\tc1 = c1[:-1]\n\t\tif c2.endswith('s') and c2[:-1] in name2co:\n\t\t\tc2 = c2[:-1]\n\t\tif c1 + ' cell' in name2co:\n\t\t\tc1 +=' cell'\n\t\tif c2 + ' cell' in name2co:\n\t\t\tc2 +=' cell'\n\t\tif c1 in c2cnew:\n\t\t\tc1 = c2cnew[c1]\n\t\tif c2 in c2cnew:\n\t\t\tc2 = c2cnew[c2]\n\t\tif c1 in name2co:\n\t\t\ttp2genes[name2co[c1]] = genes\n\t\telse:\n\t\t\tunfound.add(c1)\n\t\tif c2 in name2co:\n\t\t\ttp2genes[name2co[c2]] = genes\n\t\telse:\n\t\t\tunfound.add(c2)\n\tfin.close()\n\n\treturn tp2genes\n\n\n\n\ndef extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l):\n\tunseen_l = set(unseen_l)\n\tn = len(test_Y)\n\tacc = 0.\n\tntmp = 0.\n\tnew_pred = []\n\tfor i in range(n):\n\t\tif test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l:\n\t\t\tif test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1:\n\t\t\t\tacc += 1\n\t\t\t\tntmp += 1\n\t\t\t\tnew_pred.append(test_Y[i])\n\t\t\telse:\n\t\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\t\telse:\n\t\t\tif test_Y[i] == test_Y_pred_vec[i]:\n\t\t\t\tacc += 1\n\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\tnew_pred = np.array(new_pred)\n\treturn acc/n, new_pred\n\n\ndef run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100):\n\tsparse_datasets = []\n\tfor dataset in datasets:\n\t\tsparse_datasets.append(sparse.csr_matrix(dataset))\n\tdatasets, genes = merge_datasets(sparse_datasets, genes)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150)\n\tdataset_correct = []\n\tfor sp in sparse_dataset_correct:\n\t\tdataset_correct.append(np.power(sp.todense(), 2))\n\treturn datasets_dimred, dataset_correct\n\n\ndef run_scanorama_same_genes(features, batch_labels, scan_dim = 100):\n\tbatchs = np.unique(batch_labels)\n\tnbatch = len(batchs)\n\tif nbatch == 1:\n\t\treturn features\n\tncell, ngene = np.shape(features)\n\tassert(ncell == len(batch_labels))\n\tgenes = []\n\tdatasets = []\n\tindexs = []\n\tfor i in range(nbatch):\n\t\tgenes.append(np.array(range(ngene)))\n\t\tindex = np.where(batch_labels == batchs[i])[0]\n\t\tdataset = features[index,:]\n\t\tprint (batchs[i], np.shape(dataset))\n\t\tdatasets.append(dataset)\n\t\tindexs.append(index)\n\t_, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim)\n\tassert(len(dataset_correct)) == nbatch\n\tfor i in range(nbatch):\n\t\tfeatures[indexs[i],:] = dataset_correct[i]\n\treturn features\n\n\ndef my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,\n\t\t\t sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,\n\t\t\t ds_names=None, batch_size=None,\n\t\t\t geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors)\n\t#this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function.\n\tif len(datasets) == 1:\n\t\treturn datasets\n\n\tif alignments is None and matches is None:\n\t\talignments, matches = find_alignments(\n\t\t\tdatasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,\n\t\t)\n\n\tds_assembled = {}\n\tpanoramas = []\n\tct = 0\n\tfor i, j in alignments:\n\t\tct += 1\n\t\tprint (ct)\n\t\tsys.stdout.flush()\n\t\tif verbose:\n\t\t\tif ds_names is None:\n\t\t\t\tprint('Processing datasets {}'.format((i, j)))\n\t\t\telse:\n\t\t\t\tprint('Processing datasets {} <=> {}'.\n\t\t\t\t\t format(ds_names[i], ds_names[j]))\n\n\t\t# Only consider a dataset a fixed amount of times.\n\t\tif not i in ds_assembled:\n\t\t\tds_assembled[i] = 0\n\t\tds_assembled[i] += 1\n\t\tif not j in ds_assembled:\n\t\t\tds_assembled[j] = 0\n\t\tds_assembled[j] += 1\n\t\tif ds_assembled[i] > 3 and ds_assembled[j] > 3:\n\t\t\tcontinue\n\n\t\t# See if datasets are involved in any current panoramas.\n\t\tpanoramas_i = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif i in panoramas[p] ]\n\t\tassert(len(panoramas_i) <= 1)\n\t\tpanoramas_j = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif j in panoramas[p] ]\n\t\tassert(len(panoramas_j) <= 1)\n\n\t\tif len(panoramas_i) == 0 and len(panoramas_j) == 0:\n\t\t\tif datasets[i].shape[0] < datasets[j].shape[0]:\n\t\t\t\ti, j = j, i\n\t\t\tpanoramas.append([ i ])\n\t\t\tpanoramas_i = [ panoramas[-1] ]\n\n\t\t# Map dataset i to panorama j.\n\t\tif len(panoramas_i) == 0:\n\t\t\tcurr_ds = datasets[i]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(i, p)] ])\n\t\t\t\telif i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[i] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[i]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[i] = curr_ds + bias\n\n\t\t\tpanoramas_j[0].append(i)\n\n\t\t# Map dataset j to panorama i.\n\t\telif len(panoramas_j) == 0:\n\t\t\tcurr_ds = datasets[j]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(j, p)] ])\n\t\t\t\telif j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[j] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[j]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_i[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t\t cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[j] = curr_ds + bias\n\n\t\t\tpanoramas_i[0].append(j)\n\n\t\t# Merge two panoramas together.\n\t\telse:\n\t\t\tcurr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\t# Find base indices into each panorama.\n\t\t\tbase_i = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i: break\n\t\t\t\tbase_i += datasets[p].shape[0]\n\t\t\tbase_j = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j: break\n\t\t\t\tbase_j += datasets[p].shape[0]\n\n\t\t\t# Find matching indices.\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i and j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (b + base, a + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(j, p)] ])\n\t\t\t\telif p == i and j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (a + base, b + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j and i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a + base_i, b + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(i, p)] ])\n\t\t\t\telif p == j and i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b + base_i, a + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\t# Apply transformation to entire panorama.\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tcurr_ds += bias\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tn_cells = datasets[p].shape[0]\n\t\t\t\tdatasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\tbase += n_cells\n\n\t\t\tif not expr_datasets is None:\n\t\t\t\tcurr_ds = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t for p in panoramas_i[0] ])\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\tcurr_ds += bias\n\t\t\t\tbase = 0\n\t\t\t\tfor p in panoramas_i[0]:\n\t\t\t\t\tn_cells = expr_datasets[p].shape[0]\n\t\t\t\t\texpr_datasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\t\tbase += n_cells\n\n\t\t\t# Merge panoramas i and j and delete one.\n\t\t\tif panoramas_i[0] != panoramas_j[0]:\n\t\t\t\tpanoramas_i[0] += panoramas_j[0]\n\t\t\t\tpanoramas.remove(panoramas_j[0])\n\n\t\t# Visualize.\n\t\tif view_match:\n\t\t\tplot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)\n\n\treturn datasets, expr_datasets\n"
] | [
[
"numpy.sum",
"numpy.save",
"scipy.sparse.linalg.svds",
"numpy.diag",
"numpy.random.seed",
"numpy.argsort",
"numpy.copy",
"numpy.log",
"sklearn.metrics.precision_recall_curve",
"numpy.nan_to_num",
"numpy.vstack",
"numpy.log1p",
"numpy.transpose",
"numpy.nanmean",
"sklearn.metrics.auc",
"numpy.random.choice",
"sklearn.metrics.f1_score",
"sklearn.preprocessing.normalize",
"numpy.isnan",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.where",
"numpy.unique",
"numpy.mean",
"numpy.sqrt",
"numpy.eye",
"numpy.load",
"numpy.zeros",
"numpy.dot",
"numpy.argmax",
"sklearn.utils.graph_shortest_path.graph_shortest_path",
"sklearn.metrics.accuracy_score",
"numpy.max",
"numpy.std",
"sklearn.metrics.cohen_kappa_score",
"sklearn.metrics.average_precision_score",
"numpy.linalg.norm",
"sklearn.model_selection.train_test_split",
"scipy.sparse.vstack",
"numpy.percentile",
"numpy.log2",
"numpy.random.shuffle",
"scipy.sparse.csr_matrix",
"numpy.exp",
"sklearn.metrics.roc_auc_score",
"numpy.shape",
"numpy.array",
"numpy.concatenate",
"numpy.full"
]
] |
HongLabTHU/dual-mVEPs | [
"f387584865a45a7257d8203fcb9522820e1311de"
] | [
"Offline/dataset.py"
] | [
"import glob\nimport os\nimport warnings\nfrom datetime import datetime\nfrom copy import deepcopy\n\nimport numpy as np\nimport pyedflib\nimport scipy.io as sio\n\nfrom config import cfg\nfrom thirdparty.cerebus import NsxFile, NevFile\nfrom thirdparty.nex import Reader as NexReader\nfrom .utils import find_nearest_time\n\n\ndef _load_neuracle(data_dir):\n \"\"\"\n neuracle file loader\n :param data_dir: root data dir for the experiment\n :return:\n data: ndarray, (channels, timesteps)\n ch_name: list, name of channels\n timestamp: list, index of trigger\n \"\"\"\n f = {\n 'data': os.path.join(data_dir, 'data.bdf'),\n 'evt': os.path.join(data_dir, 'evt.bdf')\n }\n # read data\n f_data = pyedflib.EdfReader(f['data'])\n ch_names = f_data.getSignalLabels()\n data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)])\n\n # sample frequiencies\n sfreq = f_data.getSampleFrequencies()\n assert np.unique(sfreq).size == 1\n if cfg.amp_info.samplerate != sfreq[0]:\n warnings.warn('Samplerate in config file does not equal to data file record')\n cfg.amp_info.samplerate = int(sfreq[0])\n\n # read event\n f_evt = pyedflib.EdfReader(f['evt'])\n event, _, _ = f_evt.readAnnotations()\n event = list(map(lambda x: int(x * cfg.amp_info.samplerate), event))\n\n return data, ch_names, event\n\n\ndef _load_usbamp(data_dir):\n \"\"\"\n USBAmp file loader\n :param data_dir: root dir\n :return:\n data: ndarray, (channels, timesteps)\n ch_name: list, name of channels\n timestamp: list, index of trigger\n \"\"\"\n # edf USBAmp\n files = glob.glob(os.path.join(data_dir, '*.edf'))\n assert len(files) == 1\n f = pyedflib.EdfReader(files[0])\n ch_names = f.getSignalLabels()\n # filter channel\n # find trigger channel\n triggers = []\n sig = []\n for i, chan in enumerate(ch_names):\n if 'trigger' in chan:\n triggers.append(i)\n else:\n sig.append(i)\n sigbuf = np.array([f.readSignal(i) for i in range(len(ch_names))])\n ch_names = [ch_names[i] for i in sig]\n trigger = -1\n for ch_ind in triggers:\n if not np.allclose(np.diff(sigbuf[ch_ind]), 0):\n trigger = ch_ind\n break\n diff = np.diff(sigbuf[trigger])\n timestamp = np.nonzero(np.logical_and(diff <= 1, diff >= 0.2))[0].tolist()\n data = sigbuf[sig]\n return data, ch_names, timestamp\n\n\ndef _load_nex(data_dir):\n \"\"\"\n nex file loader\n :param data_dir:\n :return:\n data: ndarray, shape (ch, timesteps)\n ch_names: list, name of each channel\n timestamps: list, stimulation onset\n \"\"\"\n files = glob.glob(os.path.join(data_dir, '*.nex'))\n assert len(files) == 1\n\n reader = NexReader(useNumpy=True)\n data = reader.ReadNexFile(files[0])\n\n var = data['Variables']\n ch_names = []\n trigger_ch = None\n con_data = []\n samplerate = cfg.amp_info.samplerate\n for i, ch in enumerate(var):\n if 'CH' in ch['Header']['Name']:\n ch_names.append(ch['Header']['Name'])\n con_data.append(ch['ContinuousValues'])\n samplerate = ch['Header']['SamplingRate']\n if 'digin' == ch['Header']['Name']:\n trigger_ch = i\n if samplerate != cfg.amp_info.samplerate:\n warnings.warn('Samplerate in config file does not equal to data file record, recorded value is %d' % samplerate)\n assert trigger_ch is not None\n timestamp = np.round(data['Variables'][trigger_ch]['Timestamps'] * samplerate).astype(np.int32).tolist()\n con_data = np.array(con_data)\n return con_data, ch_names, timestamp\n\n\ndef _load_cerebus(data_dir):\n # search data_dir\n nsx_files = glob.glob(os.path.join(data_dir, '*.ns*'))\n nev_files = glob.glob(os.path.join(data_dir, '*.nev'))\n assert len(nsx_files) == len(nev_files) == 1\n # loading\n f_data = NsxFile(nsx_files[0])\n f_evt = NevFile(nev_files[0])\n data = f_data.getdata()\n evt = f_evt.getdata()\n\n f_data.close()\n f_evt.close()\n\n # some basic information\n samplerate = data['samp_per_s']\n if cfg.amp_info.samplerate != samplerate:\n warnings.warn('Samplerate in config file does not equal to data file record')\n cfg.amp_info.samplerate = samplerate\n\n timestampresolution = f_evt.basic_header['TimeStampResolution']\n ch_names = []\n for info in f_data.extended_headers:\n ch_names.append(info['ElectrodeLabel'])\n\n event = evt['dig_events']['TimeStamps'][0]\n event = list(map(lambda x: int(x / timestampresolution * cfg.amp_info.samplerate), event))\n return data['data'], ch_names, event\n\n\nclass Dataset:\n \"\"\"\n for loading data and event order.\n \"\"\"\n data_format = {\n 'nex': _load_nex,\n 'ns3': _load_cerebus,\n 'nev': _load_cerebus,\n 'edf': _load_usbamp,\n 'bdf': _load_neuracle\n }\n\n def __init__(self, subject, date=None, loaddata=True):\n self.subject = subject\n self._subj_path = os.path.dirname(__file__) + '/../data/' + subject\n if date is None:\n self._date = find_nearest_time(self._subj_path)\n else:\n if isinstance(date, datetime):\n # convert datetime to str\n self._date = date.strftime(\"%Y-%m-%d-%H-%M-%S\")\n else:\n self._date = date\n print(self._date)\n self.root_dir = os.path.join(self._subj_path, self._date)\n\n # self.montage = OrderedSet(cfg.subj_info.montage)\n self.montage = deepcopy(cfg.subj_info.montage)\n\n # load stim order\n self.events = self.load_event()\n\n if loaddata:\n self.load_all()\n else:\n self.data, self.ch_names, self.timestamp, self.montage_indices, self.events_backup = [None] * 5\n\n def load_all(self):\n # load data and timestamps\n dataarray, ch_names, timestamp = self._load_data()\n timestamp = Dataset.ts_check(timestamp)\n self.data = dataarray\n # list to set\n self.ch_names = ch_names\n self.timestamp = timestamp\n self.montage_indices = self.get_channel_indices(self.montage, self.ch_names)\n\n self.events_backup = self.events.copy()\n if cfg.exp_config.bidir:\n assert 2 * len(timestamp) == self.events.size, print('Dual-directional: ', len(timestamp), self.events.size)\n self.events = self.events[:, ::2]\n else:\n assert len(timestamp) == self.events.size, print('Unidirectional: ', len(timestamp), self.events.size)\n\n def _load_data(self):\n \"\"\"\n Read data according to file format\n :return:\n dataext: str, data file name\n\n \"\"\"\n walk_path = self.root_dir\n loader = None\n for f in os.listdir(walk_path):\n _ext = f.split('.')[-1]\n try:\n loader = Dataset.data_format[_ext]\n break\n except KeyError:\n pass\n if loader is None:\n raise FileNotFoundError('No matching data format found')\n return loader(walk_path)\n\n def load_event(self):\n walk_path = self.root_dir\n file = glob.glob(os.path.join(walk_path, self.subject) + '*')\n assert len(file) == 1\n file = file[0]\n\n if file.endswith('.mat'):\n raw = sio.loadmat(file)\n order = raw['stim_order']\n order -= 1\n return order.reshape((-1, 12))\n else:\n with open(file) as f:\n stim_order = [[int(x) for x in line.split()] for line in f if len(line) > 1]\n return np.array(stim_order)\n\n @staticmethod\n def get_channel_indices(target_channels, channels_in_data):\n \"\"\"\n Get corresponding index number for channels in target channels\n :param target_channels: list, target channel names\n :param channels_in_data: list, all channel names in data source.\n :return:\n \"\"\"\n indices = []\n # build a dictionary for indexing\n channel_book = {name: i for i, name in enumerate(channels_in_data)}\n for ch in target_channels:\n try:\n indices.append(channel_book[ch])\n except ValueError as err:\n print(err)\n\n return indices\n\n @staticmethod\n def ts_check(ts):\n # check time stamp intervals.\n # In our experience, sometimes an accidental wrong trigger may appear at the beginning during recording.\n fs = cfg.amp_info.samplerate\n while len(ts) % 12 and (not (fs * 0.1 <= ts[1] - ts[0] <= fs * 0.3)):\n del ts[0]\n return ts\n"
] | [
[
"scipy.io.loadmat",
"numpy.diff",
"numpy.logical_and",
"numpy.array",
"numpy.round",
"numpy.unique"
]
] |
pabloserna/SentimentAnalysisinAWS | [
"d94572665442ef6f49deb07ed78f8104654fefc3"
] | [
"train/model_stack.py"
] | [
"import torch.nn as nn\n\nclass LSTMClassifier(nn.Module):\n \"\"\"\n This is the simple RNN model we will be using to perform Sentiment Analysis.\n \"\"\"\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size):\n \"\"\"\n Initialize the model by settingg up the various layers.\n \"\"\"\n super(LSTMClassifier, self).__init__()\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n self.lstmA = nn.LSTM(embedding_dim, hidden_dim)\n self.lstmB = nn.LSTM(hidden_dim, hidden_dim)\n self.dense = nn.Linear(in_features=hidden_dim, out_features=1)\n self.sig = nn.Sigmoid()\n \n self.word_dict = None\n\n def forward(self, x):\n \"\"\"\n Perform a forward pass of our model on some input.\n \"\"\"\n x = x.t()\n lengths = x[0,:]\n reviews = x[1:,:]\n embeds = self.embedding(reviews)\n lstm_out1, _ = self.lstmA(embeds)\n lstm_out, _ = self.lstmB(lstm_out1)\n out = self.dense(lstm_out)\n out = out[lengths - 1, range(len(lengths))]\n return self.sig(out.squeeze())"
] | [
[
"torch.nn.Sigmoid",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.nn.Embedding"
]
] |
KlausBSautter/Kratos | [
"1ceb900dbacfab344e27e32285250eafc52093ec"
] | [
"applications/RomApplication/python_scripts/structural_mechanics_analysis_rom.py"
] | [
"import KratosMultiphysics\nimport KratosMultiphysics.RomApplication as romapp\nimport KratosMultiphysics.StructuralMechanicsApplication\nfrom KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod\nfrom KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper\nfrom KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis\n\nimport json\nimport numpy as np\n\nclass StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis):\n\n def __init__(self,model,project_parameters, hyper_reduction_element_selector = None):\n super().__init__(model,project_parameters)\n if hyper_reduction_element_selector != None :\n if hyper_reduction_element_selector == \"EmpiricalCubature\":\n self.hyper_reduction_element_selector = EmpiricalCubatureMethod()\n self.time_step_residual_matrix_container = []\n else:\n err_msg = \"The requested element selection method \\\"\" + hyper_reduction_element_selector + \"\\\" is not in the rom application\\n\"\n err_msg += \"Available options are: \\\"EmpiricalCubature\\\"\"\n raise Exception(err_msg)\n else:\n self.hyper_reduction_element_selector = None\n\n #### Internal functions ####\n def _CreateSolver(self):\n \"\"\" Create the Solver (and create and import the ModelPart if it is not alread in the model) \"\"\"\n ## Solver construction\n with open('RomParameters.json') as rom_parameters:\n rom_settings = KratosMultiphysics.Parameters(rom_parameters.read())\n self.project_parameters[\"solver_settings\"].AddValue(\"rom_settings\", rom_settings[\"rom_settings\"])\n return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters[\"solver_settings\"],self.project_parameters[\"problem_data\"][\"parallel_type\"].GetString())\n\n def _GetSimulationName(self):\n return \"::[ROM Simulation]:: \"\n\n def ModifyAfterSolverInitialize(self):\n \"\"\"Here is where the ROM_BASIS is imposed to each node\"\"\"\n super().ModifyAfterSolverInitialize()\n computing_model_part = self._solver.GetComputingModelPart()\n with open('RomParameters.json') as f:\n data = json.load(f)\n nodal_dofs = len(data[\"rom_settings\"][\"nodal_unknowns\"])\n nodal_modes = data[\"nodal_modes\"]\n counter = 0\n rom_dofs= self.project_parameters[\"solver_settings\"][\"rom_settings\"][\"number_of_rom_dofs\"].GetInt()\n for node in computing_model_part.Nodes:\n aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs)\n for j in range(nodal_dofs):\n Counter=str(node.Id)\n for i in range(rom_dofs):\n aux[j,i] = nodal_modes[Counter][j][i]\n node.SetValue(romapp.ROM_BASIS, aux ) # ROM basis\n counter+=1\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters[\"solver_settings\"][\"rom_settings\"], self._GetSolver().get_solution_scheme())\n\n def FinalizeSolutionStep(self):\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n print('\\n\\n\\n\\nGenerating matrix of residuals')\n ResMat = self.ResidualUtilityObject.GetResiduals()\n NP_ResMat = np.array(ResMat, copy=False)\n self.time_step_residual_matrix_container.append(NP_ResMat)\n super().FinalizeSolutionStep()\n\n def Finalize(self):\n super().Finalize()\n if self.hyper_reduction_element_selector != None:\n if self.hyper_reduction_element_selector.Name == \"EmpiricalCubature\":\n OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements()\n ModelPartName = self._GetSolver().settings[\"model_import_settings\"][\"input_filename\"].GetString()\n self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName)\n self.hyper_reduction_element_selector.Run()\n\n\n\n\n\n"
] | [
[
"numpy.array"
]
] |
sentinel-hub/multi-temporal-super-resolution | [
"5ef642304a980db87bdb935a7a7450bd649f8912"
] | [
"sr/data_loader.py"
] | [
"import os\nfrom collections import OrderedDict\nfrom typing import Tuple, List, Callable\n\nfrom fs_s3fs import S3FS\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom skimage.exposure import match_histograms\nfrom datetime import datetime\nfrom eolearn.core import EOPatch\n\n\ndef augment(\n lr: np.ndarray,\n hr: np.ndarray,\n flip: bool = True,\n rotate: bool = True,\n distribution_shift: bool = False,\n distribution_scale: bool = False,\n permute_timestamps: bool = True,\n max_distribution_shift: float = 0.25,\n max_distribution_scale_diff: float = 0.25,\n proba_of_original: float = 0.67\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs a series of image augmentations with specified probability.\n\n :param lr: array of low-resolution images, shape is `CxTxHxW`\n :param hr: array of high-resolution images, shape is `CxHxW`\n :param flip: whether to randomly flip height or width of arrays\n :param rotate: whether to randomly rotate the arrays\n :param distribution_shift: add an offset to the distribution\n :param distribution_scale: scale the channels distribution\n :param permute_timestamps: permute timestamps (not desired for HRN)\n :param max_distribution_shift: set max distribution shift used in distribution shift augmentation\n :param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation\n :param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations\n :returns: augmented lr and hr arrays\n \"\"\"\n\n # Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`\n n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)\n if aug_op)\n rng_threshold = proba_of_original ** (1. / n_aug_conditions)\n\n if flip and np.random.random() > rng_threshold:\n flip_axis = np.random.choice([-2, -1])\n lr = np.flip(lr, axis=flip_axis)\n hr = np.flip(hr, axis=flip_axis)\n\n if rotate and np.random.random() > rng_threshold:\n k = np.random.choice(np.arange(-2, 3))\n\n lr = np.rot90(lr, k=k, axes=(-2, -1))\n hr = np.rot90(hr, k=k, axes=(-2, -1))\n\n if distribution_shift and np.random.random() > rng_threshold:\n d_shift = (np.random.random() - 0.5) * max_distribution_shift\n\n lr = lr + d_shift\n hr = hr + d_shift\n\n if distribution_scale and np.random.random() > rng_threshold:\n d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff\n\n lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]\n hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]\n\n lr = (lr - lr_mean) * d_scale + lr_mean\n hr = (hr - hr_mean) * d_scale + hr_mean\n\n if permute_timestamps and np.random.random() > rng_threshold:\n # expects lr in `CxTxHxW` shape\n indices = np.random.permutation(lr.shape[1])\n lr = lr[:, indices]\n\n return lr, hr\n\n\ndef pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:\n \"\"\" Create an array with first dimension equal to k, filling with 0s in front or at back \"\"\"\n n_pad = k - len(feat)\n\n if n_pad < 0:\n raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')\n\n (_, h, w, c) = feat.shape\n if pad_to_front:\n feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))\n else:\n feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))\n\n return feat\n\n\nclass ImageSet(OrderedDict):\n \"\"\"\n An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ImageSet, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n dict_info = f\"{'name':>10} : {self['name']}\"\n\n for name, v in self.items():\n if hasattr(v, 'shape'):\n dict_info += f\"\\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})\"\n else:\n dict_info += f\"\\n{name:>10} : {v.__class__.__name__} ({v})\"\n return dict_info\n\n\ndef read_imageset(imset_file: str,\n filesystem: S3FS = None,\n normalize: bool = True,\n country_norm_df: pd.DataFrame = None,\n norm_deimos_npz: np.lib.npyio.NpzFile = None,\n norm_s2_npz: np.lib.npyio.NpzFile = None,\n n_views: int = 16,\n padding: str = 'zeros',\n histogram_matching: bool = False) -> ImageSet:\n \"\"\"\n Retrieves all assets from the given directory.\n\n :param imset_file: name of npz file with sample imageset\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of\n timeframes are taken\n :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image\n \"\"\"\n assert padding in ['zeros', 'repeat']\n\n # Read asset names\n npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,\n allow_pickle=True)\n \n features = npz['features']\n hr = npz['labels']\n\n if normalize:\n country = npz['countries']\n country_stats = country_norm_df[country_norm_df.country == str(country)]\n norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values\n\n norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values\n features = (features - norm_median) / norm_std\n\n deimos_p1 = norm_deimos_npz['p1']\n deimos_p99 = norm_deimos_npz['p99']\n\n s2_p1 = norm_s2_npz['p1']\n s2_p99 = norm_s2_npz['p99']\n\n hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)\n features = (features - s2_p1) / (s2_p99 - s2_p1)\n\n alphas = np.ones(n_views)\n\n if histogram_matching:\n hr = match_histograms(hr, features[-1], multichannel=True)\n\n n_feature_timestamps = len(features)\n if n_feature_timestamps < n_views:\n if padding == 'zeros':\n features = pad_to_k(features, n_views, pad_to_front=False)\n alphas[n_feature_timestamps:] = 0\n elif padding == 'repeat':\n n_pad = n_views - n_feature_timestamps\n padded = features[-1:].repeat(n_pad, axis=0)\n features = np.concatenate((features, padded))\n else:\n features = features[-n_views:, ...]\n\n # Tensor is `CxTxHxW`\n features = np.moveaxis(features, -1, 0)\n hr = np.moveaxis(hr, 2, 0)\n \n imageset = ImageSet(name=os.path.basename(imset_file),\n timestamp_deimos=str(npz['timetamps_deimos'].item()),\n lr=features,\n hr=hr,\n alphas=alphas)\n return imageset\n\n\nclass ImagesetDataset(Dataset):\n \"\"\" Derived Dataset class for loading many imagesets from a list of directories.\n\n :param imset_dir: name of directory containing files\n :param imset_npz_files: list of filenames that constitute the dataset\n :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`\n if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS\n (`BxTxCxHxW`)\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence\n :param channels_labels: which channels (i.e. indices) are extracted from hr image\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of\n timeframes are taken\n :param transform: function executed on lr and hr arrays as augmentation\n :param histogram_matching: whether to match the histogram between the HR and the corresponding LR image\n \"\"\"\n\n def __init__(\n self,\n imset_dir: str,\n imset_npz_files: list,\n time_first: bool,\n filesystem: object = None,\n normalize: bool = True,\n country_norm_df: object = None,\n norm_deimos_npz: np.ndarray = None,\n norm_s2_npz: np.ndarray = None,\n channels_feats: List[int] = [0, 1, 2, 3],\n channels_labels: List[int] = [0, 1, 2, 3],\n n_views: int = 16,\n padding: str = 'zeros',\n transform: Callable = None,\n histogram_matching: bool = False\n ):\n\n super().__init__()\n self.imset_dir = imset_dir\n self.filesystem = filesystem\n self.imset_npz_files = imset_npz_files\n self.time_first = time_first\n self.normalize = normalize\n self.country_norm_df = country_norm_df\n self.norm_deimos_npz = norm_deimos_npz\n self.norm_s2_npz = norm_s2_npz\n self.channels_feats = channels_feats\n self.channels_labels = channels_labels\n self.n_views = n_views\n self.padding = padding\n self.transform = transform\n self.histogram_matching = histogram_matching\n\n def __len__(self):\n return len(self.imset_npz_files)\n\n def __getitem__(self, index: int) -> ImageSet:\n \"\"\" Returns an ImageSet dict of all assets in the directory of the given index.\"\"\"\n\n if isinstance(index, int):\n imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])\n else:\n raise KeyError('Index must be of type `int`.')\n\n imset = read_imageset(\n imset_file=imset_file,\n filesystem=self.filesystem,\n normalize=self.normalize,\n country_norm_df=self.country_norm_df,\n norm_deimos_npz=self.norm_deimos_npz,\n norm_s2_npz=self.norm_s2_npz,\n n_views=self.n_views,\n padding=self.padding,\n histogram_matching=self.histogram_matching\n )\n\n lr = imset['lr'][self.channels_feats]\n hr = imset['hr'][self.channels_labels]\n\n if self.transform is not None:\n lr, hr = self.transform(lr, hr)\n\n if self.time_first:\n lr = np.swapaxes(lr, 0, 1)\n\n imset['lr'] = torch.from_numpy(lr.copy())\n imset['hr'] = torch.from_numpy(hr.copy())\n imset['alphas'] = torch.from_numpy(imset['alphas'])\n\n return imset\n\n\ndef filter_cloudy_s2(eop, max_cc):\n idxs = [] \n for i, _ in enumerate(eop.timestamp): \n if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1): \n idxs.append(i)\n eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]\n eop.data['CLP'] = eop.data['CLP'][idxs, ...]\n eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]\n eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]\n eop.timestamp = list(np.array(eop.timestamp)[idxs])\n return eop \n\n\ndef timestamps_within_date(timestamps, start_date, end_date): \n timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch\n return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]\n\n\ndef read_imageset_eopatch(imset_file: str,\n start_date: datetime, \n end_date: datetime,\n country: str,\n filesystem: S3FS = None,\n normalize: bool = True,\n country_norm_df: pd.DataFrame = None,\n norm_s2_npz: np.lib.npyio.NpzFile = None,\n n_views: int = 16,\n padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:\n \"\"\"\n Retrieves all assets from the given directory.\n\n :param imset_file: name of npz file with sample imageset\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param start_date: specifies the start of the temporal range of the stack of images used for prediction\n :param end_date: specifies the end of the temporal range of the stack of images used for prediction\n :param country: specifies the name of the country so it can be matched with the country_norm_df \n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of\n timeframes are taken\n \"\"\"\n assert padding in ['zeros', 'repeat']\n\n eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)\n noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)\n ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)\n features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000\n filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]\n\n\n if normalize:\n country_stats = country_norm_df[country_norm_df.country == str(country)]\n norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values\n norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values\n features = (features - norm_median) / norm_std\n\n s2_p1 = norm_s2_npz['p1']\n s2_p99 = norm_s2_npz['p99']\n features = (features - s2_p1) / (s2_p99 - s2_p1)\n\n alphas = np.ones(n_views)\n if histogram_matching:\n hr = match_histograms(hr, features[-1], multichannel=True)\n\n\n n_feature_timestamps = len(features)\n if n_feature_timestamps < n_views:\n if padding == 'zeros':\n features = pad_to_k(features, n_views, pad_to_front=False)\n alphas[n_feature_timestamps:] = 0\n elif padding == 'repeat':\n n_pad = n_views - n_feature_timestamps\n padded = features[-1:].repeat(n_pad, axis=0)\n features = np.concatenate((features, padded))\n else:\n features = features[-n_views:, ...]\n\n # Tensor is `CxTxHxW`\n features = np.moveaxis(features, -1, 0)\n\n imageset = ImageSet(name=os.path.basename(imset_file),\n lr=features,\n alphas=alphas,\n\t\t\tts=filtered_ts[::-1])\n return imageset\n\n\nclass EopatchPredictionDataset(Dataset):\n \"\"\" Derived Dataset class for loading many imagesets from a list of directories.\n\n :param imset_dir: name of directory containing files\n :param imset_npz_files: list of filenames that constitute the dataset\n :param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`\n if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS\n (`BxTxCxHxW`)\n :param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk\n :param start_date: specifies the start of the temporal range of the stack of images used for prediction\n :param end_date: specifies the end of the temporal range of the stack of images used for prediction\n :param country: specifies the name of the country so it can be matched with the country_norm_df \n :param normalize: whether to normalize data or not\n :param country_norm_df: S2 median/std normalization factors stored per country\n :param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS\n :param norm_s2_npz: 1st and 99th percentile normalization factors for S2\n :param channels_feats: which channels (i.e. indices) are extracted from lrs sequence\n :param channels_labels: which channels (i.e. indices) are extracted from hr image\n :param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time\n frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first\n :param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported\n options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of\n timeframes are taken\n :param transform: function executed on lr and hr arrays as augmentation\n \"\"\"\n\n def __init__(\n self,\n imset_dir: str,\n imset_npz_files: list,\n time_first: bool,\n start_date: datetime,\n end_date: datetime,\n country: str,\n filesystem: object = None,\n normalize: bool = True,\n country_norm_df: object = None,\n norm_deimos_npz: np.ndarray = None,\n norm_s2_npz: np.ndarray = None,\n channels_feats: List[int] = [0, 1, 2, 3],\n n_views: int = 16,\n padding: str = 'zeros',\n histogram_matching: bool = False\n ):\n\n super().__init__()\n self.imset_dir = imset_dir\n self.filesystem = filesystem\n self.imset_npz_files = imset_npz_files\n self.time_first = time_first\n self.normalize = normalize\n self.country_norm_df = country_norm_df\n self.norm_deimos_npz = norm_deimos_npz\n self.norm_s2_npz = norm_s2_npz\n self.channels_feats = channels_feats\n self.n_views = n_views\n self.padding = padding\n self.start_date = start_date\n self.end_date = end_date\n self.histogram_matching = histogram_matching\n self.country = country\n\n def __len__(self):\n return len(self.imset_npz_files)\n\n def __getitem__(self, index: int) -> ImageSet:\n \"\"\" Returns an ImageSet dict of all assets in the directory of the given index.\"\"\"\n\n if isinstance(index, int):\n imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])\n else:\n raise KeyError('Index must be of type `int`.') \n \n imset = read_imageset_eopatch(\n imset_file=imset_file,\n filesystem=self.filesystem,\n normalize=self.normalize,\n country_norm_df=self.country_norm_df,\n norm_deimos_npz=self.norm_deimos_npz,\n norm_s2_npz=self.norm_s2_npz,\n n_views=self.n_views,\n padding=self.padding,\n start_date=self.start_date,\n end_date=self.end_date,\n country=self.country,\n histogram_matching=self.histogram_matching, \n )\n\n lr = imset['lr'][self.channels_feats]\n \n if self.time_first:\n lr = np.swapaxes(lr, 0, 1)\n\n imset['lr'] = torch.from_numpy(lr.copy())\n imset['alphas'] = torch.from_numpy(imset['alphas'])\n\n return imset\n\n"
] | [
[
"numpy.ones",
"numpy.load",
"numpy.zeros",
"numpy.random.permutation",
"numpy.swapaxes",
"numpy.moveaxis",
"numpy.random.choice",
"numpy.random.random",
"numpy.arange",
"numpy.rot90",
"torch.from_numpy",
"numpy.flip",
"numpy.array",
"numpy.concatenate",
"numpy.mean"
]
] |
aaparikh/Intermediate-Python-Practice | [
"6f49bea8f677e7ed500cd1ec91df4c8531832abb"
] | [
"ranndom_m.py"
] | [
"#there are many ways we can do random numbers\n\n#1. import random\n#used to produce pseudo-random numbers. \n# They are called pseudo-random because they are not truly random and can be reproduced.\nimport random\n\na = random.random() #random float between 0 and 1\nb = random.uniform(1,10) #random float between 1 and 10\nc = random.randrange(1,10) #random integer between 1 and 10 (not including 10)\nd = random.randint(1,10) #random integer between 1 and 10 (including 10)\ne = random.choice(['a','b','c']) #random element from a list\n#sample picks one element one time and choices may pick one element multiple times\nf = random.sample(range(1,10),3) #3 random elements from a list\ng = random.choices(range(1,10),k=3) #3 random elements from a list\nh = random.normalvariate(0,1) #random float from normal distribution with mean 0 and standard deviation 1\nrandom.shuffle(['a','b','c']) #shuffle a list in place\nrandom.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)\n\n\nimport secrets #secrets — Generate secure random numbers for managing secrets (True randomness)\n# https://docs.python.org/3/library/secrets.html\n#But this is slower than random module as more complex algorithms are used.\n\na = secrets.randbelow(10) #random integer between 0 and 9\nb = secrets.randbits(10) #random integer between 0 and 2**10-1\nc = secrets.choice(['a','b','c']) #random element from a list\nd = secrets.sample(range(1,10),3) #3 random elements from a list\n\n\n#2. import numpy\nimport numpy as np\n#numpy random generator uses a different generator than random module and also has a different seed\nnp.random.seed(10) #set the seed for the random number generator to 10 (so that the same sequence of numbers will be generated)\na = np.random.random() #random float between 0 and 1\nb = np.random.uniform(1,10) #random float between 1 and 10\nc = np.random.randrange(1,10) #random integer between 1 and 10 (not including 10)\nd = np.random.randint(1,10) #random integer between 1 and 10 (including 10)\ne = np.random.choice(['a','b','c']) #random element from a list\nf = np.random.randn(3) #list of 3 random elements "
] | [
[
"numpy.random.randrange",
"numpy.random.uniform",
"numpy.random.seed",
"numpy.random.choice",
"numpy.random.randn",
"numpy.random.random",
"numpy.random.randint"
]
] |
Subarna578/pythainlp | [
"9650a40396719284add17bb09f50e948dea41053"
] | [
"pythainlp/transliterate/thai2rom.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nRomanization of Thai words based on machine-learnt engine (\"thai2rom\")\n\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pythainlp.corpus import download, get_corpus_path\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ThaiTransliterator:\n def __init__(self):\n \"\"\"\n Transliteration of Thai words\n Now supports Thai to Latin (romanization)\n \"\"\"\n # Download the model, if it's not on your machine.\n self.__filemodel = get_corpus_path(\"thai2rom-pytorch-attn\")\n if not self.__filemodel:\n download(\"thai2rom-pytorch-attn\")\n self.__filemodel = get_corpus_path(\"thai2rom-pytorch-attn\")\n\n loader = torch.load(self.__filemodel, map_location=device)\n\n INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader[\"encoder_params\"]\n OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader[\"decoder_params\"]\n\n self._maxlength = 100\n\n self._char_to_ix = loader[\"char_to_ix\"]\n self._ix_to_char = loader[\"ix_to_char\"]\n self._target_char_to_ix = loader[\"target_char_to_ix\"]\n self._ix_to_target_char = loader[\"ix_to_target_char\"]\n\n # encoder/ decoder\n # Restore the model and construct the encoder and decoder.\n self._encoder = Encoder(\n INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)\n\n self._decoder = AttentionDecoder(\n OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT\n )\n\n self._network = Seq2Seq(\n self._encoder,\n self._decoder,\n self._target_char_to_ix[\"<start>\"],\n self._target_char_to_ix[\"<end>\"],\n self._maxlength,\n ).to(device)\n\n self._network.load_state_dict(loader[\"model_state_dict\"])\n self._network.eval()\n\n def _prepare_sequence_in(self, text: str):\n \"\"\"\n Prepare input sequence for PyTorch\n \"\"\"\n idxs = []\n for ch in text:\n if ch in self._char_to_ix:\n idxs.append(self._char_to_ix[ch])\n else:\n idxs.append(self._char_to_ix[\"<UNK>\"])\n idxs.append(self._char_to_ix[\"<end>\"])\n tensor = torch.tensor(idxs, dtype=torch.long)\n return tensor.to(device)\n\n def romanize(self, text: str) -> str:\n \"\"\"\n :param str text: Thai text to be romanized\n :return: English (more or less) text that spells out how the Thai text\n should be pronounced.\n \"\"\"\n input_tensor = self._prepare_sequence_in(text).view(1, -1)\n input_length = [len(text) + 1]\n\n target_tensor_logits = self._network(input_tensor,\n input_length,\n None, 0)\n\n # Seq2seq model returns <END> as the first token,\n # As a result, target_tensor_logits.size() is torch.Size([0])\n if target_tensor_logits.size(0) == 0:\n target = [\"<PAD>\"]\n else:\n target_tensor = (\n torch.argmax(\n target_tensor_logits.squeeze(1),\n 1).cpu().numpy()\n )\n target = [self._ix_to_target_char[t] for t in target_tensor]\n\n return \"\".join(target)\n\n\nclass Encoder(nn.Module):\n def __init__(self, vocabulary_size, embedding_size,\n hidden_size, dropout=0.5):\n \"\"\"Constructor\"\"\"\n super(Encoder, self).__init__()\n self.hidden_size = hidden_size\n self.character_embedding = nn.Embedding(vocabulary_size,\n embedding_size)\n self.rnn = nn.LSTM(\n input_size=embedding_size,\n hidden_size=hidden_size // 2,\n bidirectional=True,\n batch_first=True,\n )\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, sequences, sequences_lengths):\n\n # sequences: (batch_size, sequence_length=MAX_LENGTH)\n # sequences_lengths: (batch_size)\n\n batch_size = sequences.size(0)\n self.hidden = self.init_hidden(batch_size)\n\n sequences_lengths = np.sort(sequences_lengths)[::-1]\n index_sorted = np.argsort(\n -sequences_lengths\n ) # use negation in sort in descending order\n index_unsort = np.argsort(index_sorted) # to unsorted sequence\n\n index_sorted = torch.from_numpy(index_sorted)\n sequences = sequences.index_select(0, index_sorted.to(device))\n\n sequences = self.character_embedding(sequences)\n sequences = self.dropout(sequences)\n\n sequences_packed = nn.utils.rnn.pack_padded_sequence(\n sequences, sequences_lengths.copy(), batch_first=True\n )\n\n sequences_output, self.hidden = self.rnn(sequences_packed,\n self.hidden)\n\n sequences_output, _ = nn.utils.rnn.pad_packed_sequence(\n sequences_output, batch_first=True\n )\n\n index_unsort = torch.from_numpy(index_unsort).to(device)\n sequences_output = sequences_output.index_select(\n 0, index_unsort.clone().detach()\n )\n\n return sequences_output, self.hidden\n\n def init_hidden(self, batch_size):\n h_0 = torch.zeros(\n [2, batch_size, self.hidden_size // 2], requires_grad=True\n ).to(device)\n c_0 = torch.zeros(\n [2, batch_size, self.hidden_size // 2], requires_grad=True\n ).to(device)\n\n return (h_0, c_0)\n\n\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size):\n super(Attn, self).__init__()\n\n self.method = method\n self.hidden_size = hidden_size\n\n if self.method == \"general\":\n self.attn = nn.Linear(self.hidden_size, hidden_size)\n\n elif self.method == \"concat\":\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))\n\n def forward(self, hidden, encoder_outputs, mask):\n # Calculate energies for each encoder output\n if self.method == \"dot\":\n attn_energies = torch.bmm(encoder_outputs,\n hidden.transpose(1, 2)).squeeze(2)\n elif self.method == \"general\":\n attn_energies = self.attn(\n encoder_outputs.view(-1, encoder_outputs.size(-1))\n ) # (batch_size * sequence_len, hidden_size)\n attn_energies = torch.bmm(\n attn_energies.view(\n *encoder_outputs.size()), hidden.transpose(1, 2)\n ).squeeze(2) # (batch_size, sequence_len)\n elif self.method == \"concat\":\n attn_energies = self.attn(\n torch.cat((\n hidden.expand(*encoder_outputs.size()),\n encoder_outputs\n ), 2)\n ) # (batch_size, sequence_len, hidden_size)\n attn_energies = torch.bmm(\n attn_energies,\n self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),\n ).squeeze(2)\n\n attn_energies = attn_energies.masked_fill(mask == 0, -1e10)\n\n # Normalize energies to weights in range 0 to 1\n return F.softmax(attn_energies, 1)\n\n\nclass AttentionDecoder(nn.Module):\n def __init__(self, vocabulary_size, embedding_size,\n hidden_size, dropout=0.5):\n \"\"\"Constructor\"\"\"\n super(AttentionDecoder, self).__init__()\n self.vocabulary_size = vocabulary_size\n self.hidden_size = hidden_size\n self.character_embedding = nn.Embedding(vocabulary_size,\n embedding_size)\n self.rnn = nn.LSTM(\n input_size=embedding_size + self.hidden_size,\n hidden_size=hidden_size,\n bidirectional=False,\n batch_first=True,\n )\n\n self.attn = Attn(method=\"general\", hidden_size=self.hidden_size)\n self.linear = nn.Linear(hidden_size, vocabulary_size)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, input, last_hidden, encoder_outputs, mask):\n \"\"\"\"Defines the forward computation of the decoder\"\"\"\n\n # input: (batch_size, 1)\n # last_hidden: (batch_size, hidden_dim)\n # encoder_outputs: (batch_size, sequence_len, hidden_dim)\n # mask: (batch_size, sequence_len)\n\n hidden = last_hidden.permute(1, 0, 2)\n attn_weights = self.attn(hidden, encoder_outputs, mask)\n\n context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)\n context_vector = torch.sum(context_vector, dim=1)\n context_vector = context_vector.unsqueeze(1)\n\n embedded = self.character_embedding(input)\n embedded = self.dropout(embedded)\n\n rnn_input = torch.cat((context_vector, embedded), -1)\n\n output, hidden = self.rnn(rnn_input)\n output = output.view(-1, output.size(2))\n\n x = self.linear(output)\n\n return x, hidden[0], attn_weights\n\n\nclass Seq2Seq(nn.Module):\n def __init__(\n self, encoder, decoder, target_start_token,\n target_end_token, max_length\n ):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n self.pad_idx = 0\n self.target_start_token = target_start_token\n self.target_end_token = target_end_token\n self.max_length = max_length\n\n assert encoder.hidden_size == decoder.hidden_size\n\n def create_mask(self, source_seq):\n mask = source_seq != self.pad_idx\n return mask\n\n def forward(\n self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5\n ):\n\n # source_seq: (batch_size, MAX_LENGTH)\n # source_seq_len: (batch_size, 1)\n # target_seq: (batch_size, MAX_LENGTH)\n\n batch_size = source_seq.size(0)\n start_token = self.target_start_token\n end_token = self.target_end_token\n max_len = self.max_length\n target_vocab_size = self.decoder.vocabulary_size\n\n outputs = torch.zeros(max_len,\n batch_size,\n target_vocab_size).to(device)\n\n if target_seq is None:\n assert teacher_forcing_ratio == 0, \"Must be zero during inference\"\n inference = True\n else:\n inference = False\n\n encoder_outputs, encoder_hidden = self.encoder(source_seq,\n source_seq_len)\n\n decoder_input = (\n torch.tensor([[start_token] * batch_size]).view(batch_size,\n 1).to(device)\n )\n\n encoder_hidden_h_t = torch.cat(\n [encoder_hidden[0][0], encoder_hidden[0][1]], dim=1\n ).unsqueeze(dim=0)\n decoder_hidden = encoder_hidden_h_t\n\n max_source_len = encoder_outputs.size(1)\n mask = self.create_mask(source_seq[:, 0:max_source_len])\n\n for di in range(max_len):\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs, mask\n )\n\n topv, topi = decoder_output.topk(1)\n outputs[di] = decoder_output.to(device)\n\n teacher_force = random.random() < teacher_forcing_ratio\n\n decoder_input = (\n target_seq[:, di].reshape(batch_size, 1)\n if teacher_force\n else topi.detach()\n )\n\n if inference and decoder_input == end_token:\n return outputs[:di]\n\n return outputs\n\n_THAI_TO_ROM = ThaiTransliterator()\n\n\ndef romanize(text: str) -> str:\n return _THAI_TO_ROM.romanize(text)\n"
] | [
[
"torch.sum",
"torch.FloatTensor",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.load",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.functional.softmax",
"torch.tensor",
"torch.nn.Embedding",
"numpy.argsort",
"torch.zeros",
"torch.cuda.is_available",
"torch.from_numpy",
"numpy.sort",
"torch.cat",
"torch.nn.Dropout"
]
] |
RaphaelOlivier/speechbrain | [
"142dc6caa4b46ca4c9341b0cd39627f489808749"
] | [
"speechbrain/lobes/models/huggingface_wav2vec.py"
] | [
"\"\"\"This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.\n\nReference: https://arxiv.org/abs/2006.11477\nReference: https://arxiv.org/abs/1904.05862\nReference: https://arxiv.org/abs/2110.13900\nTransformer from HuggingFace needs to be installed:\nhttps://huggingface.co/transformers/installation.html\n\nAuthors\n * Titouan Parcollet 2021\n * Boumadane Abdelmoumene 2021\n\"\"\"\n\nimport os\nimport torch\nimport logging\nimport pathlib\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch import nn\nfrom huggingface_hub import model_info\nfrom speechbrain.pretrained.fetching import fetch\n\n# We check if transformers is installed.\ntry:\n import transformers\n from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel\n from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig\n from transformers import Wav2Vec2FeatureExtractor\n from transformers import Wav2Vec2ForPreTraining\n from transformers.models.wav2vec2.modeling_wav2vec2 import (\n _compute_mask_indices,\n )\n\nexcept ImportError:\n MSG = \"Please install transformers from HuggingFace to use wav2vec2 / Hubert\\n\"\n MSG += \"E.G. run: pip install transformers\"\n raise ImportError(MSG)\n\nlogger = logging.getLogger(__name__)\n\nHF_models = {\n \"wav2vec2\": Wav2Vec2Model,\n \"hubert\": HubertModel,\n \"wavlm\": WavLMModel,\n \"data2vec\": Data2VecAudioModel\n}\n\nHF_config = {\n \"wav2vec2\": Wav2Vec2Config,\n \"hubert\": HubertConfig,\n \"wavlm\": WavLMConfig,\n \"data2vec\": Data2VecAudioConfig\n}\n\n\nclass HuggingFaceWav2Vec2(nn.Module):\n \"\"\"This lobe enables the integration of HuggingFace and SpeechBrain\n pretrained wav2vec2.0/Hubert models.\n\n Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477\n Source paper Hubert: https://arxiv.org/abs/2106.07447\n Transformer from HuggingFace needs to be installed:\n https://huggingface.co/transformers/installation.html\n\n The model can be used as a fixed feature extractor or can be finetuned. It\n will download automatically the model from HuggingFace or use a local path.\n\n Arguments\n ---------\n source : str\n HuggingFace hub name: e.g \"facebook/wav2vec2-large-lv60\"\n save_path : str\n Path (dir) of the downloaded model.\n output_norm : bool (default: True)\n If True, a layer_norm (affine) will be applied to the output obtained\n from the wav2vec model.\n freeze : bool (default: True)\n If True, the model is frozen. If False, the model will be trained\n alongside with the rest of the pipeline.\n freeze_feature_extractor : bool (default: False)\n When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False\n all the wav2vec model will be trained including featue_extractor module.\n apply_spec_augment : bool (default: False)\n If True, the model will apply spec augment on the output of feature extractor\n (inside huggingface Wav2VecModel() class).\n If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.\n Example\n -------\n >>> inputs = torch.rand([10, 600])\n >>> model_hub = \"facebook/wav2vec2-base-960h\"\n >>> save_path = \"savedir\"\n >>> model = HuggingFaceWav2Vec2(model_hub, save_path)\n >>> outputs = model(inputs)\n \"\"\"\n\n def __init__(\n self,\n source,\n save_path,\n output_norm=True,\n freeze=True,\n freeze_feature_extractor=False,\n apply_spec_augment=False,\n load_pretrained_weights=True,\n ):\n super().__init__()\n\n # Download the extractor from HuggingFace.\n # The extractor is only used to retrieve the normalisation information\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n source, cache_dir=save_path\n )\n\n # Select specific self-supervised loader (eg. Wav2Vec2, Hubert)\n if \"hubert\" in source:\n config = HF_config.get(\"hubert\")\n model = HF_models.get(\"hubert\")\n elif \"wavlm\" in source:\n config = HF_config.get(\"wavlm\")\n model = HF_models.get(\"wavlm\")\n elif \"data2vec\" in source:\n config = HF_config.get(\"data2vec\")\n model = HF_models.get(\"data2vec\")\n else:\n config = HF_config.get(\"wav2vec2\")\n model = HF_models.get(\"wav2vec2\")\n\n # Download and load the model\n self._from_pretrained(\n source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights\n )\n\n # set apply_spec_augment\n self.model.config.apply_spec_augment = apply_spec_augment\n\n # We check if inputs need to be normalized w.r.t pretrained wav2vec2\n self.normalize_wav = self.feature_extractor.do_normalize\n\n self.freeze = freeze\n self.freeze_feature_extractor = freeze_feature_extractor\n self.output_norm = output_norm\n if self.freeze:\n logger.warning(\n \"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen.\"\n )\n self.model.eval()\n for param in self.model.parameters():\n param.requires_grad = False\n else:\n self.model.train()\n if self.freeze_feature_extractor:\n self.model.feature_extractor._freeze_parameters()\n\n def _from_pretrained(self, source, config, model, save_path, load_weights):\n \"\"\"This function manages the source checking and loading of the params.\n # 1. Is the model from HF or a local path\n # 2. Is the model pretrained with HF or SpeechBrain\n # 3. Download (if appropriate) and load with respect to 1. and 2.\n \"\"\"\n\n is_sb, ckpt_file = self._check_model_source(source)\n if not load_weights:\n config = config.from_pretrained(source, cache_dir=save_path)\n self.model = model(config)\n elif is_sb:\n config = config.from_pretrained(source, cache_dir=save_path)\n self.model = model(config)\n self.model.gradient_checkpointing_disable() # Required by DDP\n # fetch the checkpoint file\n ckpt_full_path = fetch(\n filename=ckpt_file, source=source, savedir=save_path\n )\n # We transfer the parameters from the checkpoint.\n self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)\n else:\n if load_weights:\n self.model = model.from_pretrained(source, cache_dir=save_path)\n else:\n self.model=model()\n\n def _load_sb_pretrained_w2v2_parameters(self, path):\n \"\"\"Loads the parameter of a w2v2 model pretrained with SpeechBrain and the\n HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom\n loading because HuggingFace adds a level to the checkpoint when storing\n the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain\n and HuggingFaceWav2Vec2.\n\n In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter\n would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it\n is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).\n \"\"\"\n\n modified_state_dict = {}\n orig_state_dict = torch.load(path, map_location=\"cpu\")\n\n # We remove the .wav2vec2 in the state dict.\n for key, params in orig_state_dict.items():\n if \"wav2vec2.\" in key:\n save_key = key.replace(\"model.wav2vec2.\", \"\")\n modified_state_dict[save_key] = params\n\n incompatible_keys = self.model.load_state_dict(\n modified_state_dict, strict=False\n )\n for missing_key in incompatible_keys.missing_keys:\n logger.warning(\n f\"During parameter transfer to {self.model} loading from \"\n + f\"{path}, the transferred parameters did not have \"\n + f\"parameters for the key: {missing_key}\"\n )\n for unexpected_key in incompatible_keys.unexpected_keys:\n logger.warning(\n f\"The param with the key: {unexpected_key} is discarded as it \"\n + \"is useless for wav2vec 2.0 finetuning.\"\n )\n\n def _check_model_source(self, path):\n \"\"\"Checks if the pretrained model has been trained with SpeechBrain and\n is hosted locally or on a HuggingFace hub.\n \"\"\"\n checkpoint_filename = \"\"\n source = pathlib.Path(path)\n is_local = True\n is_sb = True\n\n # If path is a huggingface hub.\n if not source.exists():\n is_local = False\n\n if is_local:\n # Test for HuggingFace model\n if any(File.endswith(\".bin\") for File in os.listdir(path)):\n is_sb = False\n return is_sb, checkpoint_filename\n\n # Test for SpeechBrain model and get the filename.\n for File in os.listdir(path):\n if File.endswith(\".ckpt\"):\n checkpoint_filename = os.path.join(path, File)\n is_sb = True\n return is_sb, checkpoint_filename\n else:\n files = model_info(\n path\n ).siblings # get the list of files of the Hub\n\n # Test if it's an HuggingFace model or a SB one\n for File in files:\n if File.rfilename.endswith(\".ckpt\"):\n checkpoint_filename = File.rfilename\n is_sb = True\n return is_sb, checkpoint_filename\n\n for File in files:\n if File.rfilename.endswith(\".bin\"):\n checkpoint_filename = File.rfilename\n is_sb = False\n return is_sb, checkpoint_filename\n\n err_msg = f\"{path} does not contain a .bin or .ckpt checkpoint !\"\n raise FileNotFoundError(err_msg)\n\n def forward(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n\n # If we freeze, we simply remove all grads and features from the graph.\n if self.freeze:\n with torch.no_grad():\n return self.extract_features(wav).detach()\n\n return self.extract_features(wav)\n\n def extract_features(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n\n if self.normalize_wav:\n wav = F.layer_norm(wav, wav.shape)\n\n # Extract wav2vec output\n out = self.model(wav)[0]\n\n # We normalize the output if required\n if self.output_norm:\n out = F.layer_norm(out, out.shape)\n\n return out\n\n\nclass HuggingFaceWav2Vec2Pretrain(nn.Module):\n \"\"\"This lobe enables the integration of HuggingFace\n wav2vec2.0 models to be pretrained.\n\n Source paper: https://arxiv.org/abs/2006.11477\n Transformer from HuggingFace needs to be installed:\n https://huggingface.co/transformers/installation.html\n\n The return is an HuggingFace format and the mask indices that contains:\n https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining\n\n For instance, it returns the loss that can be accessed with .loss\n\n Arguments\n ---------\n source : str\n HuggingFace hub name: e.g \"facebook/wav2vec2-large-lv60\"\n save_path : str\n Path (dir) of the downloaded model.\n mask_prob : float (default: 0.65)\n Probability of masking a given frame. Default is taken from the paper.\n mask_length : float (default: 10)\n Length (i.e. number of consecutive masked frames). Default is taken from\n the paper.\n Example\n -------\n >>> inputs = torch.rand([10, 32000])\n >>> model_hub = \"facebook/wav2vec2-base-960h\"\n >>> save_path = \"savedir\"\n >>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)\n >>> outputs, _ = model(inputs)\n \"\"\"\n\n def __init__(\n self,\n source,\n save_path,\n mask_prob=0.65,\n mask_length=10,\n normalize_wav=True,\n ):\n super().__init__()\n\n self.mask_prob = mask_prob\n self.mask_length = mask_length\n self.normalize_wav = normalize_wav\n\n # Download the config of the model from HuggingFace.\n self.config = Wav2Vec2Config.from_pretrained(\n source, cache_dir=save_path\n )\n self.config.output_hidden_states = (\n True # We want the hidden states as well!\n )\n\n self.model = Wav2Vec2ForPreTraining(self.config)\n self.model.gradient_checkpointing_disable() # Required by DDP\n self.model.train()\n\n # We check if inputs need to be normalized w.r.t pretrained wav2vec2\n\n def forward(self, wav):\n \"\"\"Takes an input waveform and return its corresponding wav2vec encoding.\n\n Arguments\n ---------\n wav : torch.Tensor (signal)\n A batch of audio signals to transform to features.\n \"\"\"\n batch_size, raw_sequence_length = wav.shape\n\n if self.normalize_wav:\n wav = F.layer_norm(wav, wav.shape)\n\n sequence_length = self.model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n\n # 1. Compute the indices that will be masked\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.mask_prob,\n mask_length=self.mask_length,\n )\n torch_mask_time_indices = torch.tensor(\n mask_time_indices, device=wav.device, dtype=torch.long,\n )\n\n # 2. Sample the negative samples from the entire sequence.\n # Fairseq does it only on the masked indices, but this only work if you\n # have long sentences. For more versatily, we sample on the entire sequence.\n # value.\n full_sentence_indices = np.ones((batch_size, sequence_length))\n\n # print(np.sum(mask_time_indices, axis=1))\n negative_sample_indices = torch.tensor(\n transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(\n (batch_size, sequence_length),\n num_negatives=self.config.num_negatives,\n mask_time_indices=full_sentence_indices,\n ),\n device=wav.device,\n dtype=torch.long,\n )\n\n return (\n self.model(\n wav,\n mask_time_indices=torch_mask_time_indices,\n sampled_negative_indices=negative_sample_indices,\n ),\n torch_mask_time_indices,\n )\n"
] | [
[
"numpy.ones",
"torch.load",
"torch.nn.functional.layer_norm",
"torch.no_grad",
"torch.tensor"
]
] |
sbrml/pilco | [
"77b6d8b9033ffdb23cae4936b028f42144f37846"
] | [
"pilco/environments/custom/continuous_mountaincar.py"
] | [
"\"\"\"\nOur modification of the OpenAI Gym Continuous Mountain Car by Olivier Sigaud:\nhttps://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py\n\nwhich was (ultimately) based on Sutton's implementation:\nhttp://incompleteideas.net/sutton/MountainCar/MountainCar1.cp\n\"\"\"\n\nfrom pilco.errors import EnvironmentError\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport numpy as np\n\n\nclass MountainCar(gym.Env):\n\n metadata = {'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30}\n\n def __init__(self):\n\n # State and action bounds\n self.min_action = -1.0\n self.max_action = 1.0\n self.min_position = - 3.0\n self.max_position = 3.0\n self.max_speed = 0.07\n self.goal_position = 0.5\n\n # Force per mass the car can output\n self.power = 0.0015\n\n self.low_state = np.array([self.min_position, -self.max_speed],\n dtype=np.float32)\n\n self.high_state = np.array([self.max_position, self.max_speed],\n dtype=np.float32)\n\n self.viewer = None\n\n # Allowed action space\n self.action_space = spaces.Box(low=self.min_action,\n high=self.max_action,\n shape=(1,),\n dtype=np.float32)\n\n self.seed()\n\n # Temporary hack to work with rest of library\n self.env = self\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n\n def step(self, action):\n\n # Check if action is in permissible space\n if not self.action_space.contains(action):\n raise EnvironmentError(f'Expected action in the range of [-1., 1.] '\n f'got action {action}.')\n\n # Unpack positiion and valocity\n position, velocity = self.state\n\n # Increment position by velocity\n position_ = position + velocity\n\n # Increment velocity by Euler rule and clip\n velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position)\n velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed)\n\n self.state = np.array([position_, velocity_])\n\n return self.state, None, False, {}\n\n\n def reset(self):\n self.state = np.array([-0.5, 0.])\n return np.array(self.state)\n\n\n def _height(self, xs):\n return 0.55 + 0.45 * np.sin(3 * xs)\n\n def render(self, mode='human'):\n\n # Set picture size\n screen_width = 600\n screen_height = 400\n\n world_width = self.max_position - self.min_position\n scale = screen_width/world_width\n\n # Set car size\n carwidth = 40\n carheight = 20\n\n if self.viewer is None:\n\n from gym.envs.classic_control import rendering\n\n # Car constants\n clearance = 10\n\n # Overall viewer\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n # Track on which the car moves\n xs = np.linspace(self.min_position, self.max_position, 200)\n ys = self._height(xs)\n xys = list(zip((xs - self.min_position) * scale, ys * scale))\n\n # Add car\n self.track = rendering.make_polyline(xys)\n self.track.set_linewidth(4)\n self.viewer.add_geom(self.track)\n self.cartrans = rendering.Transform()\n\n # Car chasis\n l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0\n car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n car.add_attr(rendering.Transform(translation=(0, clearance)))\n car.add_attr(self.cartrans)\n self.viewer.add_geom(car)\n\n # Front wheel\n frontwheel = rendering.make_circle(carheight / 2.5)\n frontwheel.set_color(.5, .5, .5)\n frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))\n frontwheel.add_attr(self.cartrans)\n self.viewer.add_geom(frontwheel)\n\n # Back wheel\n backwheel = rendering.make_circle(carheight / 2.5)\n backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))\n backwheel.add_attr(self.cartrans)\n backwheel.set_color(.5, .5, .5)\n self.viewer.add_geom(backwheel)\n\n # Flagpole on mountain peak\n flagx = scale * (0.5 - self.min_position)\n flagy1 = scale * self._height(self.goal_position)\n flagy2 = flagy1 + 50\n flagpole = rendering.Line((flagx, flagy1),\n (flagx, flagy2))\n self.viewer.add_geom(flagpole)\n\n # Flag on flagpole\n flag = rendering.FilledPolygon([(flagx, flagy2),\n (flagx, flagy2 - 10),\n (flagx + 25, flagy2 - 5)])\n flag.set_color(.8, .8, 0)\n self.viewer.add_geom(flag)\n\n\n # Translate and rotate car\n self.cartrans.set_translation(scale * (self.state[0] - self.min_position),\n scale * self._height(self.state[0]))\n self.cartrans.set_rotation(np.cos(3 * self.state[0]))\n\n return self.viewer.render(return_rgb_array=mode=='rgb_array')\n\n\n def close(self):\n\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n"
] | [
[
"numpy.cos",
"numpy.clip",
"numpy.array",
"numpy.sin",
"numpy.linspace"
]
] |
eltociear/NiaPy | [
"7884aefec8f013d9f8db5c1af7080a61dd19a31d"
] | [
"examples/custom_problem.py"
] | [
"# encoding=utf8\n# This is temporary fix to import module from parent folder\n# It will be removed when package is published on PyPI\nimport sys\n\nsys.path.append('../')\n\nimport numpy as np\nfrom niapy.task import StoppingTask\nfrom niapy.problems import Problem\nfrom niapy.algorithms.basic import ParticleSwarmAlgorithm\n\n\nclass MyProblem(Problem):\n def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs):\n super().__init__(dimension, lower, upper, *args, **kwargs)\n\n def _evaluate(self, x):\n return np.sum(x ** 2)\n\n\n# we will run Particle Swarm Algorithm on custom problem\ntask = StoppingTask(problem=MyProblem(dimension=10), max_iters=1000)\nalgo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4)\nbest = algo.run(task=task)\nprint('%s -> %s ' % (best[0], best[1]))\n"
] | [
[
"numpy.sum"
]
] |
mrzhuzhe/Kaggle_Lux_AI_2021 | [
"08b795e71e78c768d28c648290a15d58ca718776"
] | [
"lux_ai/lux_gym/multi_subtask.py"
] | [
"from abc import ABC, abstractmethod\nimport numpy as np\nimport random\nfrom typing import Callable, Dict, Optional, Tuple, Sequence\n\nfrom .reward_spaces import Subtask\nfrom ..lux.game import Game\n\n\nclass SubtaskSampler(ABC):\n def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):\n self.subtask_constructors = subtask_constructors\n\n @abstractmethod\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n pass\n\n # noinspection PyMethodMayBeStatic\n def get_info(self) -> Dict[str, np.ndarray]:\n return {}\n\n\nclass RandomSampler(SubtaskSampler):\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n return self.subtask_constructors[random.randrange(len(self.subtask_constructors))]()\n\n\nclass DifficultySampler(SubtaskSampler):\n def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):\n super(DifficultySampler, self).__init__(subtask_constructors)\n self.active_subtask_idx = -1\n self.summed_rewards = np.zeros(len(self.subtask_constructors))\n self.n_trials = np.zeros(len(self.subtask_constructors))\n\n def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:\n if final_rewards is not None:\n self.n_trials[self.active_subtask_idx] += 1\n self.summed_rewards[self.active_subtask_idx] += np.mean(final_rewards)\n\n self.active_subtask_idx = np.random.choice(len(self.subtask_constructors), p=self.weights)\n return self.subtask_constructors[self.active_subtask_idx]()\n\n @property\n def weights(self) -> np.ndarray:\n weights = Subtask.get_reward_spec().reward_max - self.summed_rewards / np.maximum(self.n_trials, 1)\n return weights / weights.sum()\n\n def get_info(self) -> Dict[str, np.ndarray]:\n return {\n f\"LOGGING_{subtask.__name__}_subtask_difficulty\": self.weights[i]\n for i, subtask in enumerate(self.subtask_constructors)\n }\n\n\nclass MultiSubtask(Subtask):\n def __init__(\n self,\n subtask_constructors: Sequence[Callable[..., Subtask]] = (),\n subtask_sampler_constructor: Callable[..., SubtaskSampler] = RandomSampler,\n **kwargs\n ):\n super(MultiSubtask, self).__init__(**kwargs)\n self.subtask_constructors = subtask_constructors\n self.subtask_sampler = subtask_sampler_constructor(self.subtask_constructors)\n self.active_subtask = self.subtask_sampler.sample(None)\n self.info = {\n f\"LOGGING_{subtask.__name__}_subtask_reward\": np.array([float(\"nan\"), float(\"nan\")])\n for subtask in self.subtask_constructors\n }\n\n def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:\n reward, done = self.active_subtask.compute_rewards_and_done(game_state, done)\n for subtask in self.subtask_constructors:\n reward_key = f\"LOGGING_{subtask.__name__}_subtask_reward\"\n if isinstance(self.active_subtask, subtask):\n self.info[reward_key] = np.array(reward)\n else:\n self.info[reward_key] = np.array([float(\"nan\"), float(\"nan\")])\n if done:\n self.active_subtask = self.subtask_sampler.sample(reward)\n return reward, done\n\n def completed_task(self, game_state: Game) -> np.ndarray:\n raise NotImplementedError\n\n def get_info(self) -> Dict[str, np.ndarray]:\n return dict(**self.info, **self.subtask_sampler.get_info())\n\n def get_subtask_encoding(self, subtask_encoding_dict: dict) -> int:\n return self.active_subtask.get_subtask_encoding(subtask_encoding_dict)\n"
] | [
[
"numpy.maximum",
"numpy.array",
"numpy.mean"
]
] |
Acetonen/Interkamen_career | [
"75cc0a5832b7c1e303967cc337bb001e3383eb9e"
] | [
"interkamen_career/modules/mechanics_economic.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"Visualise statistic by machine economic.\"\"\"\n\n\nfrom __future__ import annotations\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom typing import Dict\nfrom .mechanic_report import MechReports\nfrom .administration.logger_cfg import Logs\nfrom .support_modules.custom_exceptions import MainMenu\nfrom .support_modules.standart_functions import (\n BasicFunctionsS\n as BasF_S\n)\n\nLOGGER = Logs().give_logger(__name__)\n\n\nclass MechEconomic(MechReports):\n \"\"\"Visualise statistic by machine economic.\"\"\"\n\n __slots__ = (\n 'mech_econ_path',\n 'mech_econ_data',\n 'mech_econ_file',\n )\n\n def __init__(self, user):\n \"\"\"Load mech econom data.\"\"\"\n super().__init__(user)\n self.mech_econ_data = {}\n self.mech_econ_path = (\n super().get_root_path() / 'data' / 'mech_ecomomic'\n )\n if self.mech_econ_path.exists():\n self.mech_econ_file = super().load_data(\n data_path=self.mech_econ_path,\n user=user,\n )\n else:\n self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0])\n\n def _save_mech_econom(self):\n \"\"\"Save mech econom and create log file.\"\"\"\n self.mech_econ_file = self.mech_econ_file.append(\n self.mech_econ_data,\n ignore_index=True\n )\n self._dump_mech_econ_data()\n self._log_mech_econ_creation()\n\n def _dump_mech_econ_data(self):\n \"\"\"Dump salary data to file.\"\"\"\n super().dump_data(\n data_path=self.mech_econ_path,\n base_to_dump=self.mech_econ_file,\n user=self.user,\n )\n\n def _log_mech_econ_creation(self):\n \"\"\"Save log about salary creation.\"\"\"\n report_name = '{}-{}'.format(\n self.mech_econ_data['year'],\n self.mech_econ_data['month'],\n )\n LOGGER.warning(\n f\"User '{self.user.login}' create mechanic econom.: {report_name}\"\n )\n\n def _visualise_one_day_cost(self):\n \"\"\"Visualise cost of one day by each machine.\"\"\"\n year = self._chose_year()\n data_by_year = super().give_dataframe_by_year(year)\n data_for_plot = {\n 'mach': [],\n 'day_cost': [],\n }\n for mach in super().maint_dict['mach_name']:\n totall_cost = sum(self.mech_econ_file[mach])\n total_work = sum(data_by_year.work)\n number_of_wdays = total_work\n day_cost = round(totall_cost/number_of_wdays, 0)\n data_for_plot['mach'].append(mach)\n data_for_plot['day_cost'].append(day_cost)\n data_for_plot = pd.DataFrame(data_for_plot)\n self._create_one_day_cost_plot(data_for_plot)\n\n def _input_machines_econ(self, mech_econ_date):\n \"\"\"Input money, spent for machine in month.\"\"\"\n self.mech_econ_data['year'] = mech_econ_date['year']\n self.mech_econ_data['month'] = mech_econ_date['month']\n super().clear_screen()\n print(\"Введите сумму для каждой техники:\")\n for mach in super().maint_dict['mach_name']:\n self.mech_econ_data[mach] = float(input(f\"{mach}: \"))\n save = input(\n \"\\nДанные введены.\"\n \"\\n[s] - сохранить данные: \"\n )\n if save.lower() == 's':\n self._save_mech_econom()\n print(\"Данные сохранены.\")\n else:\n print(\"Вы отменили сохранение.\")\n input(\"\\n[ENTER] - выйти.\")\n\n def _visualise_statistic(self, year):\n \"\"\"Visualise statistic.\"\"\"\n mech_econ_year = self.mech_econ_file.year == year\n data_by_year = (\n self.mech_econ_file[mech_econ_year]\n .sort_values(by=['month'])\n )\n super().print_all_dataframe(data_by_year)\n input(\"\\n[ENTER] - выйти.\")\n\n def _chose_year(self):\n \"\"\"Show statistic about drill instrument.\"\"\"\n print(\"[ENTER] - выход\"\n \"\\nВыберете год:\")\n year = super().choise_from_list(\n sorted(set(self.mech_econ_file.year)),\n none_option=True\n )\n if year:\n return year\n else:\n raise MainMenu\n\n @BasF_S.set_plotter_parametrs\n def _create_one_day_cost_plot(self, dataframe):\n \"\"\"Create one day cost plot.\"\"\"\n figure = plt.figure()\n\n x_cost = list(range(len(super().maint_dict['mach_name'])))\n\n axle = figure.add_subplot(111)\n axle.bar(\n x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r',\n label='Коэффициент', tick_label=dataframe.mach\n )\n axle.tick_params(labelrotation=90)\n axle.set_title(\n \"Коэффициент целесообразности содержания техники руб/час. \",\n fontsize=\"x-large\")\n axle.set_ylabel('руб.')\n axle.legend()\n axle.grid(\n True, linestyle='--', which='major',\n color='grey', alpha=.25, axis='y'\n )\n figure.tight_layout()\n plt.show()\n\n def create_mech_econom(self):\n \"\"\"Create mechanic econom data report.\"\"\"\n mech_econ_date = self.input_date()\n check = super().check_date_in_dataframe(\n self.mech_econ_file,\n mech_econ_date\n )\n if check:\n print(\"Данные за этот месяц уже внесены.\")\n input(\"\\n[ENTER] - выйти.\")\n else:\n self._input_machines_econ(mech_econ_date)\n\n def show_econ_statistic(self, stat_variants: Dict):\n \"\"\"Show machine economic statistic.\"\"\"\n stat_variants = {\n 'Целесообразность затрат на содержание техники.':\n self._visualise_one_day_cost,\n }\n print(\"[ENTER] - выйти.\"\n \"\\nВыберете вид отчета:\")\n stat = super().choise_from_list(stat_variants, none_option=True)\n if stat:\n stat_variants[stat]()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] |
robbycostales/multiagent-particle-envs | [
"22a00b18e13b629a206a8ffc8d8319d06dd5d7b0"
] | [
"multiagent/scenarios/simple_speaker_listener.py"
] | [
"import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent.scenario import BaseScenario\n\nclass Scenario(BaseScenario):\n def make_world(self, dim_c=3):\n world = World()\n # set any world properties first\n world.dim_c = dim_c\n num_landmarks = 3\n # add agents\n world.agents = [Agent() for i in range(2)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = False\n agent.size = 0.075\n # speaker\n world.agents[0].movable = False\n # listener\n world.agents[1].silent = True\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n landmark.size = 0.04\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # assign goals to agents\n for agent in world.agents:\n agent.goal_a = None\n agent.goal_b = None\n # want listener to go to the goal landmark\n world.agents[0].goal_a = world.agents[1]\n world.agents[0].goal_b = np.random.choice(world.landmarks)\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.25,0.25,0.25])\n # random properties for landmarks\n world.landmarks[0].color = np.array([0.65,0.15,0.15])\n world.landmarks[1].color = np.array([0.15,0.65,0.15])\n world.landmarks[2].color = np.array([0.15,0.15,0.65])\n # special colors for goals\n world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n # returns data for benchmarking purposes\n return reward(agent, reward)\n\n def reward(self, agent, world):\n # squared distance from listener to landmark\n a = world.agents[0]\n dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))\n return -dist2\n\n def observation(self, agent, world):\n # goal color\n goal_color = np.zeros(world.dim_color)\n if agent.goal_b is not None:\n goal_color = agent.goal_b.color\n\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n # communication of all other agents\n comm = []\n for other in world.agents:\n if other is agent or (other.state.c is None): continue\n comm.append(other.state.c)\n\n # speaker\n if not agent.movable:\n return np.concatenate([goal_color])\n # listener\n if agent.silent:\n return np.concatenate([agent.state.p_vel] + entity_pos + comm)\n\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.choice",
"numpy.array",
"numpy.concatenate",
"numpy.square"
]
] |
t27/carla-scenic-data-collector | [
"3f38fa0e23a9f0ed85726292c5703c8505330870"
] | [
"carla_python_api_recorder.py"
] | [
"# Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode\n\n\n#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\nimport glob\nimport os\nimport sys\nimport pandas as pd\nfrom tqdm import tqdm\nimport math\n\n\nCARLA_VERSION = \"0.9.11\"\ntry:\n # sys.path.append(\"./libs/carla-0.9.9-py3.7-linux-x86_64.egg\")\n if CARLA_VERSION == \"0.9.9\":\n sys.path.append(\"./libs/carla-0.9.9-py3.7-linux-x86_64.egg\")\n elif CARLA_VERSION == \"0.9.11\":\n sys.path.append(\"./libs/carla-0.9.11-py3.7-linux-x86_64.egg\")\nexcept IndexError:\n pass\n\nimport carla\n\nimport argparse\nimport random\nimport time\nimport logging\nimport click\nimport pathlib\n\nimport spawn\n\ncurrent_dir = pathlib.Path(__file__).parent.absolute()\nSEED = 27\nrandom.seed(SEED)\n\n\ndef get_metadata(actor, frame_id):\n type_id = actor.type_id\n\n def splitCarlaVec(vect):\n return vect.x, vect.y, vect.z\n\n id = actor.id\n # clsname = ClientSideBoundingBoxes.get_class_name(actor)\n tf = actor.get_transform()\n roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw\n loc = actor.get_location()\n pos_x, pos_y, pos_z = splitCarlaVec(loc)\n try:\n bbox3d = actor.bounding_box\n bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(\n bbox3d.location\n )\n bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)\n except:\n bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None\n bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None\n\n velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())\n acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())\n angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(\n actor.get_angular_velocity()\n )\n\n try:\n # need to do this because Carla's Actor object doesnt support getattr\n traffic_light_state = actor.state.name\n except:\n traffic_light_state = None\n\n return (\n frame_id,\n id,\n type_id,\n pos_x,\n pos_y,\n pos_z,\n roll,\n pitch,\n yaw,\n velocity_x,\n velocity_y,\n velocity_z,\n acc_x,\n acc_y,\n acc_z,\n angular_vel_x,\n angular_vel_y,\n angular_vel_z,\n bbox3d_offset_x,\n bbox3d_offset_y,\n bbox3d_offset_z,\n bbox3d_extent_x,\n bbox3d_extent_y,\n bbox3d_extent_z,\n traffic_light_state,\n )\n\n\nglobal_collision = False\n\n\ndef collision_detect_callback(event):\n actor_we_collide_against = event.other_actor\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n if \"vehicle.\" in actor_we_collide_against.type_id:\n global global_collision\n global_collision = True\n\n\ndef attach_collision_sensor(actor, world):\n blueprint_library = world.get_blueprint_library()\n\n collision_sensor = world.spawn_actor(\n blueprint_library.find(\"sensor.other.collision\"),\n carla.Transform(),\n attach_to=actor,\n )\n\n collision_sensor.listen(lambda event: collision_detect_callback(event))\n\n return collision_sensor\n\n\ndef run(\n client,\n round_name,\n recording_dir,\n speed_violation_prob=60,\n tl_violation_prob=70,\n perc_speed_diff=-30,\n num_vehicles=25,\n SESSION_DURATION=60,\n):\n safe = True # avoid spawning vehicles whose geometry is not ideal for carla\n\n actor_list = []\n sensors = []\n\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\n\n try:\n FPS = 5\n DELTA_T = 1 / FPS\n\n world = client.get_world()\n blueprints = world.get_blueprint_library().filter(\"vehicle.*\")\n traffic_manager = client.get_trafficmanager()\n traffic_manager.set_global_distance_to_leading_vehicle(2.0)\n if CARLA_VERSION == \"0.9.11\":\n print(\"Using deterministic Traffic Manager\")\n traffic_manager.set_random_device_seed(SEED)\n settings = client.get_world().get_settings()\n if not settings.synchronous_mode:\n traffic_manager.set_synchronous_mode(True)\n synchronous_master = True\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = DELTA_T\n client.get_world().apply_settings(settings)\n else:\n synchronous_master = False\n\n recording_dir_path = pathlib.Path(recording_dir)\n recording_dir_path.mkdir(exist_ok=True)\n session_recording = str(recording_dir_path / f\"{round_name}.csv\")\n carla_session_recording = str(\n recording_dir_path.absolute() / f\"{round_name}_carla_recording\"\n )\n print(\"Recording on file: %s\" % client.start_recorder(carla_session_recording))\n vehicles_list, walkers_list, all_actors = spawn.spawn(\n client, world, num_vehicles, 0, safe\n )\n world.tick()\n print(\"spawned %d vehicles, press Ctrl+C to exit.\" % len(actor_list))\n # fmt: off\n df_columns = [\n \"frame_id\", \"id\", \"type_id\", \"pos_x\", \"pos_y\", \"pos_z\", \"roll\", \"pitch\", \"yaw\", \n \"velocity_x\", \"velocity_y\", \"velocity_z\", \"acc_x\", \"acc_y\", \"acc_z\", \n \"angular_vel_x\", \"angular_vel_y\", \"angular_vel_z\", \n \"bbox3d_offset_x\", \"bbox3d_offset_y\", \"bbox3d_offset_z\", \n \"bbox3d_extent_x\", \"bbox3d_extent_y\", \"bbox3d_extent_z\", \"traffic_light_color\",\n ]\n # fmt: on\n # get all non vehicle agents\n global global_collision\n global_collision = False\n actors = world.get_actors()\n for actor in actors:\n if \"vehicle.\" in actor.type_id:\n sensors.append(attach_collision_sensor(actor, world))\n non_vehicles = [\n x\n for x in actors\n if (\"vehicle\" not in x.type_id and \"traffic_light\" not in x.type_id)\n ] # signs, traffic lights etc\n frame_id = 0\n df_arr = []\n non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]\n df_arr += non_vehicle_arr\n pbar = tqdm(total=FPS * SESSION_DURATION)\n max_frames = FPS * SESSION_DURATION\n collision_detected_once = False\n while frame_id < max_frames:\n if global_collision and not collision_detected_once:\n # Todo, if detected, start a countdown of N frames and break only after N iterations\n print(\"detected collision, exiting!\")\n collision_detected_once = True\n max_frames = frame_id + 5\n # continue\n\n actors = world.get_actors()\n for actor in actors:\n if \"vehicle.\" in actor.type_id:\n # print(actor.type_id)\n tm_port = traffic_manager.get_port()\n actor.set_autopilot(True, tm_port)\n traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)\n traffic_manager.distance_to_leading_vehicle(actor, 3)\n if random.random() * 100 < speed_violation_prob:\n traffic_manager.vehicle_percentage_speed_difference(\n actor, perc_speed_diff\n )\n\n vehicles_and_lights = [\n x\n for x in actors\n if \"vehicle\" in x.type_id or \"traffic_light\" in x.type_id\n ]\n metadata_arr = [\n get_metadata(actor, frame_id) for actor in vehicles_and_lights\n ]\n df_arr += metadata_arr\n frame_id += 1\n pbar.update(1)\n world.tick()\n df = pd.DataFrame(df_arr, columns=df_columns)\n pbar.close()\n print(f\"Saving CSV({len(df.frame_id.unique())} frames)\")\n # df.to_parquet(f\"session_data.parquet\")\n df.to_csv(session_recording, index=False)\n world.tick()\n # if args.recorder_time > 0:\n # time.sleep(args.recorder_time)\n # else:\n # while True:\n # world.wait_for_tick()\n # # time.sleep(0.1)\n\n finally:\n if synchronous_master:\n settings = world.get_settings()\n settings.synchronous_mode = False\n settings.fixed_delta_seconds = None\n world.apply_settings(settings)\n print(\"\\ndestroying %d actors\" % (len(sensors) + len(vehicles_list)))\n # all_agents = sensors + vehicles_list\n for s in sensors:\n s.destroy()\n client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])\n\n print(\"Stop recording\")\n client.stop_recorder()\n\n\[email protected]()\[email protected](\n \"-s\",\n \"--scenario_type\",\n type=click.Choice([\"tl_sl\", \"nominal\"], case_sensitive=False),\n required=True,\n)\[email protected](\"-n\", \"--num_rounds\", default=100)\[email protected](\"--test\", is_flag=True)\ndef main(scenario_type, num_rounds, test):\n # print(scenario_type, test, num_rounds)\n if test:\n random.seed(72)\n\n if scenario_type.lower() == \"tl_sl\":\n SPEED_VIOLATION_PROB = 60\n TL_VIOLATION_PROB = 70\n PERC_SPEED_DIFF = -30\n SCENARIO_NAME = \"tl_sl\"\n # NUM_ROUNDS = 100\n elif scenario_type.lower() == \"nominal\":\n SPEED_VIOLATION_PROB = 0\n TL_VIOLATION_PROB = 0\n PERC_SPEED_DIFF = 0\n SCENARIO_NAME = \"nominal\"\n # NUM_ROUNDS = 200\n NUM_ROUNDS = num_rounds\n print(f\"Recording {SCENARIO_NAME} data\")\n try:\n host = \"127.0.0.1\" # IP of the host server (default: 127.0.0.1)\n port = 2000 # TCP port to listen to (default: 2000)\",\n client = carla.Client(host, port)\n if test:\n scenario_dir = f\"test_{SCENARIO_NAME}_recordings\"\n else:\n scenario_dir = f\"{SCENARIO_NAME}_recordings\"\n\n round_names = []\n for i in range(NUM_ROUNDS):\n run(\n client,\n f\"{scenario_type}_round_{i}\",\n scenario_dir,\n SPEED_VIOLATION_PROB,\n TL_VIOLATION_PROB,\n PERC_SPEED_DIFF,\n )\n round_names.append(f\"{scenario_type}_round_{i}\")\n # client.reload_world()\n except KeyboardInterrupt:\n pass\n finally:\n print(\"\\ndone.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.DataFrame"
]
] |
dcambie/spectrochempy | [
"e376082d66be7a4c528b7d83be076d77534e39bd"
] | [
"spectrochempy/core/dataset/nddataset.py"
] | [
"# -*- coding: utf-8 -*-\n\n#\n# ======================================================================================================================\n# Copyright (©) 2015-2019 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\"\"\"\nThis module implements the |NDDataset| class.\n\"\"\"\n\n__all__ = ['NDDataset']\n\nimport textwrap\nimport warnings\nimport sys\n\nimport numpy as np\nfrom traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union\nfrom traittypes import Array\n\nfrom spectrochempy.core.project.baseproject import AbstractProject\nfrom spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME\nfrom spectrochempy.core.dataset.ndcomplex import NDComplexArray\nfrom spectrochempy.core.dataset.coord import Coord, LinearCoord\nfrom spectrochempy.core.dataset.coordset import CoordSet\nfrom spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators\nfrom spectrochempy.core.dataset.ndio import NDIO\nfrom spectrochempy.core.dataset.ndplot import NDPlot\nfrom spectrochempy.core import error_, warning_\nfrom spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)\n\nHAS_XARRAY = False\ntry:\n import xarray as xr\n\n HAS_XARRAY = True # pragma: no cover\nexcept ImportError:\n xr = None # pragma: no cover\n\n\n# ======================================================================================================================\n# NDDataset class definition\n# ======================================================================================================================\n\nclass NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):\n # coordinates\n _coordset = Instance(CoordSet, allow_none=True)\n\n # model data (e.g., for fit)\n _modeldata = Array(Float(), allow_none=True)\n\n # some setting for NDDataset\n _copy = Bool(False)\n _labels_allowed = Bool(False) # no labels for NDDataset\n\n # dataset can be members of a project.\n # we use the abstract class to avoid circular imports.\n _parent = Instance(AbstractProject, allow_none=True)\n\n # For the GUI interface\n\n # parameters state\n _state = Dict()\n\n # processed data (for GUI)\n _processeddata = Array(Float(), allow_none=True)\n\n # processed mask (for GUI)\n _processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))\n\n # baseline data (for GUI)\n _baselinedata = Array(Float(), allow_none=True)\n\n # reference data (for GUI)\n _referencedata = Array(Float(), allow_none=True)\n\n # ------------------------------------------------------------------------------------------------------------------\n # initialisation\n # ------------------------------------------------------------------------------------------------------------------\n # ..................................................................................................................\n def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):\n \"\"\"\n The main N-dimensional dataset class used by |scpy|.\n\n The NDDataset is the main object use by SpectroChemPy. Like numpy ndarrays, NDDataset have the capability to be\n sliced, sorted and subject to mathematical operations. But, in addition, NDDataset may have units,\n can be masked\n and each dimensions can have coordinates also with units. This make NDDataset aware of unit compatibility,\n e.g.,\n for binary operation such as additions or subtraction or during the application of mathematical operations.\n In addition or in replacement of numerical data for coordinates, NDDataset can also have labeled coordinates\n where labels can be different kind of objects (strings, datetime, numpy nd.ndarray or othe NDDatasets, etc…).\n\n Parameters\n ----------\n data : array of floats\n Data array contained in the object. The data can be a list, a tuple, a |ndarray|, a ndarray-like,\n a |NDArray| or any subclass of |NDArray|. Any size or shape of data is accepted. If not given, an empty\n |NDArray| will be inited.\n At the initialisation the provided data will be eventually casted to a numpy-ndarray.\n If a subclass of |NDArray| is passed which already contains some mask, labels, or units, these elements\n will\n be used to accordingly set those of the created object. If possible, the provided data will not be copied\n for `data` input, but will be passed by reference, so you should make a copy of the `data` before passing\n them if that's the desired behavior or set the `copy` argument to True.\n coordset : An instance of |CoordSet|, optional\n `coords` contains the coordinates for the different dimensions of the `data`. if `coords` is provided,\n it must specified the `coord` and `labels` for all dimensions of the `data`.\n Multiple `coord`'s can be specified in an |CoordSet| instance for each dimension.\n coordunits : list, optional\n A list of units corresponding to the dimensions in the order of the coordset.\n coordtitles : list, optional\n A list of titles corresponding of the dimensions in the order of the coordset.\n **kwargs : dict\n See other parameters.\n\n Other Parameters\n ----------------\n dtype : str or dtype, optional, default=np.float64\n If specified, the data will be casted to this dtype, else the data will be casted to float64 or complex128.\n dims : list of chars, optional\n If specified the list must have a length equal to the number od data dimensions (ndim) and the chars\n must be\n taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in\n this order.\n name : str, optional\n A user friendly name for this object. If not given, the automatic `id` given at the object creation will be\n used as a name.\n labels : array of objects, optional\n Labels for the `data`. labels can be used only for 1D-datasets.\n The labels array may have an additional dimension, meaning several series of labels for the same data.\n The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of\n |NDArray|.\n mask : array of bool or `NOMASK`, optional\n Mask for the data. The mask array must have the same shape as the data. The given array can be a list,\n a tuple, or a |ndarray|. Each values in the array must be `False` where the data are *valid* and True when\n they are not (like in numpy masked arrays). If `data` is already a :class:`~numpy.ma.MaskedArray`, or any\n array object (such as a |NDArray| or subclass of it), providing a `mask` here will causes the mask from the\n masked array to be ignored.\n units : |Unit| instance or str, optional\n Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also\n explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_\n package.\n title : str, optional\n The title of the dimension. It will later be used for instance for labelling plots of the data.\n It is optional but recommended to give a title to each ndarray.\n dlabel : str, optional\n Alias of `title`.\n meta : dict-like object, optional\n Additional metadata for this object. Must be dict-like but no\n further restriction is placed on meta.\n author : str, optional\n Name(s) of the author(s) of this dataset. BNy default, name of the computer note where this dataset is\n created.\n description : str, optional\n A optional description of the nd-dataset. A shorter alias is `desc`.\n history : str, optional\n A string to add to the object history.\n copy : bool, optional\n Perform a copy of the passed object. Default is False.\n\n See Also\n --------\n Coord : Explicit coordinates object.\n LinearCoord : Implicit coordinates objet.\n CoordSet : Set of coordinates.\n\n Notes\n -----\n The underlying array in a |NDDataset| object can be accessed through the `data` attribute, which will return\n a conventional |ndarray|.\n\n Examples\n --------\n Usage by an end-user\n\n >>> from spectrochempy import *\n >>> x = NDDataset([1, 2, 3])\n >>> print(x.data) # doctest: +NORMALIZE_WHITESPACE\n [ 1 2 3]\n \"\"\"\n super().__init__(data, **kwargs)\n\n self._parent = None\n\n # eventually set the coordinates with optional units and title\n\n if isinstance(coordset, CoordSet):\n self.set_coordset(**coordset)\n\n else:\n if coordset is None:\n coordset = [None] * self.ndim\n\n if coordunits is None:\n coordunits = [None] * self.ndim\n\n if coordtitles is None:\n coordtitles = [None] * self.ndim\n\n _coordset = []\n for c, u, t in zip(coordset, coordunits, coordtitles):\n if not isinstance(c, CoordSet):\n if isinstance(c, LinearCoord):\n coord = LinearCoord(c)\n else:\n coord = Coord(c)\n if u is not None:\n coord.units = u\n if t is not None:\n coord.title = t\n else:\n if u: # pragma: no cover\n warning_('units have been set for a CoordSet, but this will be ignored '\n '(units are only defined at the coordinate level')\n if t: # pragma: no cover\n warning_('title will be ignored as they are only defined at the coordinates level')\n coord = c\n\n _coordset.append(coord)\n\n if _coordset and set(_coordset) != {Coord()}: # if they are no coordinates do nothing\n self.set_coordset(*_coordset)\n\n # ------------------------------------------------------------------------------------------------------------------\n # special methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def __dir__(self):\n # WARNING: be carefull to keep the present order of the three first elements! Needed for save/load operations\n return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',\n 'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',\n 'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()\n\n # ..................................................................................................................\n def __getitem__(self, items):\n\n saveditems = items\n\n # coordinate selection to test first\n if isinstance(items, str):\n try:\n return self._coordset[items]\n except Exception:\n pass\n\n # slicing\n new, items = super().__getitem__(items, return_index=True)\n\n if new is None:\n return None\n\n if self._coordset is not None:\n names = self._coordset.names # all names of the current coordinates\n new_coords = [None] * len(names)\n for i, item in enumerate(items):\n # get the corresponding dimension name in the dims list\n name = self.dims[i]\n # get the corresponding index in the coordinate's names list\n idx = names.index(name)\n if self._coordset[idx].is_empty:\n new_coords[idx] = Coord(None, name=name)\n elif isinstance(item, slice):\n # add the slice on the corresponding coordinates on the dim to the new list of coordinates\n if not isinstance(self._coordset[idx], CoordSet):\n new_coords[idx] = self._coordset[idx][item]\n else:\n # we must slice all internal coordinates\n newc = []\n for c in self._coordset[idx]:\n newc.append(c[item])\n new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure\n # the order will be # kept for internal coordinates\n new_coords[idx]._default = self._coordset[idx]._default # set the same default coord\n new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim\n\n elif isinstance(item, (np.ndarray, list)):\n new_coords[idx] = self._coordset[idx][item]\n\n new.set_coordset(*new_coords, keepnames=True)\n\n new.history = f'Slice extracted: ({saveditems})'\n return new\n\n # ..................................................................................................................\n def __getattr__(self, item):\n # when the attribute was not found\n if item in [\"__numpy_ufunc__\", \"interface\", '_pytestfixturefunction', '__dataclass_fields__',\n '_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',\n 'clevels', '__wrapped__', 'coords', '__await__',\n '__aiter__'] or '_validate' in item or '_changed' in item:\n # raise an error so that traits, ipython operation and more ... will be handled correctly\n raise AttributeError\n\n # syntax such as ds.x, ds.y, etc...\n\n if item[0] in self.dims or self._coordset:\n\n # look also properties\n attribute = None\n index = 0\n # print(item)\n if len(item) > 2 and item[1] == '_':\n attribute = item[1:]\n item = item[0]\n index = self.dims.index(item)\n\n if self._coordset:\n try:\n c = self._coordset[item]\n if isinstance(c, str) and c in self.dims:\n # probaly a reference to another coordinate name\n c = self._coordset[c]\n\n if c.name in self.dims or c._parent_dim in self.dims:\n if attribute is not None:\n # get the attribute\n return getattr(c, attribute)\n else:\n return c\n else:\n raise AttributeError\n\n except Exception as err:\n if item in self.dims:\n return None\n else:\n raise err\n elif attribute is not None:\n if attribute == 'size':\n # we want the size but there is no coords, get it from the data shape\n return self.shape[index]\n else:\n raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')\n\n return None\n\n raise AttributeError\n\n def __setattr__(self, key, value):\n\n if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...\n # Note the above test is important to avoid errors with traitlets\n # even if it looks redundant with the folllowing\n if key in self.dims:\n if self._coordset is None:\n # we need to create a coordset first\n self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))\n idx = self._coordset.names.index(key)\n _coordset = self._coordset\n listcoord = False\n if isinstance(value, list):\n listcoord = all([isinstance(item, Coord) for item in value])\n if listcoord:\n _coordset[idx] = list(CoordSet(value).to_dict().values())[0]\n _coordset[idx].name = key\n _coordset[idx]._is_same_dim = True\n elif isinstance(value, CoordSet):\n if len(value) > 1:\n value = CoordSet(value)\n _coordset[idx] = list(value.to_dict().values())[0]\n _coordset[idx].name = key\n _coordset[idx]._is_same_dim = True\n elif isinstance(value, (Coord, LinearCoord)):\n value.name = key\n _coordset[idx] = value\n else:\n _coordset[idx] = Coord(value, name=key)\n _coordset = self._valid_coordset(_coordset)\n self._coordset.set(_coordset)\n else:\n raise AttributeError(f'Coordinate `{key}` is not used.')\n else:\n super().__setattr__(key, value)\n\n # ..................................................................................................................\n def __eq__(self, other, attrs=None):\n attrs = self.__dir__()\n for attr in (\n 'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',\n 'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',\n 'state'):\n # these attibutes are not used for comparison (comparison based on data and units!)\n try:\n attrs.remove(attr)\n except ValueError:\n pass\n\n return super().__eq__(other, attrs)\n\n # ..................................................................................................................\n def __hash__(self):\n # all instance of this class has same hash, so they can be compared\n return super().__hash__ + hash(self._coordset)\n\n # ------------------------------------------------------------------------------------------------------------------\n # Default values\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n @default('_coordset')\n def _coordset_default(self):\n return None\n\n # ..................................................................................................................\n @default('_modeldata')\n def _modeldata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_processeddata')\n def _processeddata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_baselinedata')\n def _baselinedata_default(self):\n return None\n\n # ..................................................................................................................\n @default('_referencedata')\n def _referencedata_default(self):\n return None\n\n # ------------------------------------------------------------------------------------------------------------------\n # GUI options\n # ------------------------------------------------------------------------------------------------------------------\n # TODO: refactor the spectrochempy preference system to have a common basis\n\n @property\n def state(self):\n # state of the controller window for this dataset\n return self._state\n\n @state.setter\n def state(self, val):\n self._state = val\n\n @property\n def processeddata(self):\n return self._processeddata\n\n @processeddata.setter\n def processeddata(self, val):\n self._processeddata = val\n\n @property\n def processedmask(self):\n return self._processedmask\n\n @processedmask.setter\n def processedmask(self, val):\n self._processedmask = val\n\n @property\n def baselinedata(self):\n return self._baselinedata\n\n @baselinedata.setter\n def baselinedata(self, val):\n self._baselinedata = val\n\n @property\n def referencedata(self):\n return self._referencedata\n\n @referencedata.setter\n def referencedata(self, val):\n self._referencedata = val\n\n # ------------------------------------------------------------------------------------------------------------------\n # Validators\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n @validate('_coordset')\n def _coordset_validate(self, proposal):\n coords = proposal['value']\n return self._valid_coordset(coords)\n\n def _valid_coordset(self, coords):\n # uses in coords_validate and setattr\n if coords is None:\n return\n\n for k, coord in enumerate(coords):\n\n if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:\n continue\n\n # For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet\n if not isinstance(coord, (LinearCoord, Coord, CoordSet)):\n if isinstance(coord, NDArray):\n coord = coords[k] = Coord(coord)\n else:\n raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '\n f' CoordSet class, but an instance of {type(coord)} has been passed')\n\n if self.dims and coord.name in self.dims:\n # check the validity of the given coordinates in terms of size (if it correspond to one of the dims)\n size = coord.size\n\n if self.implements('NDDataset'):\n idx = self._get_dims_index(coord.name)[0] # idx in self.dims\n if size != self._data.shape[idx]:\n raise ValueError(f'the size of a coordinates array must be None or be equal'\n f' to that of the respective `{coord.name}`'\n f' data dimension but coordinate size={size} != data shape[{idx}]='\n f'{self._data.shape[idx]}')\n else:\n pass # bypass this checking for any other derived type (should be done in the subclass)\n\n coords._parent = self\n return coords\n\n # ..................................................................................................................\n @property\n def _dict_dims(self):\n _dict = {}\n for index, dim in enumerate(self.dims):\n if dim not in _dict:\n _dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}\n return _dict\n\n # ------------------------------------------------------------------------------------------------------------------\n # public methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def add_coordset(self, *coords, dims=None, **kwargs):\n \"\"\"\n Add one or a set of coordinates from a dataset.\n\n Parameters\n ----------\n *coords : iterable\n Coordinates object(s).\n dims : list\n Name of the coordinates.\n **kwargs : dict\n Keywords passed to the coordset.\n \"\"\"\n if not coords and not kwargs:\n # reset coordinates\n self._coordset = None\n return\n\n if self._coordset is None:\n # make the whole coordset at once\n self._coordset = CoordSet(*coords, dims=dims, **kwargs)\n else:\n # add one coordinate\n self._coordset._append(*coords, **kwargs)\n\n if self._coordset:\n # set a notifier to the updated traits of the CoordSet instance\n HasTraits.observe(self._coordset, self._dims_update, '_updated')\n # force it one time after this initialization\n self._coordset._updated = True\n\n # ..................................................................................................................\n def coord(self, dim='x'):\n \"\"\"\n Return the coordinates along the given dimension.\n\n Parameters\n ----------\n dim : int or str\n A dimension index or name, default index = `x`.\n If an integer is provided, it is equivalent to the `axis` parameter for numpy array.\n\n Returns\n -------\n |Coord|\n Coordinates along the given axis.\n \"\"\"\n idx = self._get_dims_index(dim)[0] # should generate an error if the\n # dimension name is not recognized\n if idx is None:\n return None\n\n if self._coordset is None:\n return None\n\n # idx is not necessarily the position of the coordinates in the CoordSet\n # indeed, transposition may have taken place. So we need to retrieve the coordinates by its name\n name = self.dims[idx]\n if name in self._coordset.names:\n idx = self._coordset.names.index(name)\n return self._coordset[idx]\n else:\n error_(f'could not find this dimenson name: `{name}`')\n return None\n\n # ..................................................................................................................\n @property\n def coordset(self):\n \"\"\"\n |CoordSet| instance.\n\n Contains the coordinates of the various dimensions of the dataset.\n It's a readonly property. Use set_coords to change one or more coordinates at once.\n \"\"\"\n if self._coordset and all(c.is_empty for c in self._coordset):\n # all coordinates are empty, this is equivalent to None for the coordset\n return None\n return self._coordset\n\n # ..................................................................................................................\n @coordset.setter\n def coordset(self, coords):\n if isinstance(coords, CoordSet):\n self.set_coordset(**coords)\n else:\n self.set_coordset(coords)\n\n # ..................................................................................................................\n @property\n def coordnames(self):\n \"\"\"\n List of the |Coord| names.\n\n Read only property.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.names\n\n # ..................................................................................................................\n @property\n def coordtitles(self):\n \"\"\"\n List of the |Coord| titles.\n\n Read only property. Use set_coordtitle to eventually set titles.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.titles\n\n # ..................................................................................................................\n @property\n def coordunits(self):\n \"\"\"\n List of the |Coord| units.\n\n Read only property. Use set_coordunits to eventually set units.\n \"\"\"\n if self._coordset is not None:\n return self._coordset.units\n\n # ..................................................................................................................\n @property\n def data(self):\n \"\"\"\n The ``data`` array.\n\n If there is no data but labels, then the labels are returned instead of data.\n \"\"\"\n return super().data\n\n # ..................................................................................................................\n @data.setter\n def data(self, data):\n # as we can't write super().data = data, we call _set_data\n # see comment in the data.setter of NDArray\n super()._set_data(data)\n\n # ..................................................................................................................\n def delete_coordset(self):\n \"\"\"\n Delete all coordinate settings.\n \"\"\"\n self._coordset = None\n\n # ..................................................................................................................\n def implements(self, name=None):\n \"\"\"\n Check if the current object implements `NDDataset`.\n\n Rather than isinstance(obj, NDDataset) use object.implements('NDDataset').\n This is useful to check type without importing the module\n\n Parameters\n ----------\n name : str\n Name of the object class. If None, the function returns the class name.\n If name is given, it checks if it correspond to the current class name.\n\n Returns\n -------\n str or bool\n If name is given, a bool is returned\n If name is None, the classname is returned\n\n Examples\n --------\n >>> from spectrochempy import NDDataset, Coord\n >>> co = Coord([1., 2., 3.])\n >>> co.implements('NDDataset')\n False\n >>> co.implements('Coord')\n True\n >>> ds = NDDataset([1., 2., 3.])\n >>> ds.implements()\n 'NDDataset'\n \"\"\"\n\n if name is None:\n return 'NDDataset'\n else:\n return name == 'NDDataset'\n\n # ..................................................................................................................\n @property\n def labels(self):\n # not valid for NDDataset\n # There is no label for nd-dataset\n raise NotImplementedError # pragma: no cover\n\n # ..................................................................................................................\n @property\n def modeldata(self):\n \"\"\"\n |ndarray| - models data.\n\n Data eventually generated by modelling of the data.\n \"\"\"\n return self._modeldata\n\n # ..................................................................................................................\n @modeldata.setter\n def modeldata(self, data):\n self._modeldata = data\n\n # ..................................................................................................................\n @property\n def parent(self):\n \"\"\"\n |Project| instance\n\n The parent project of the dataset.\n \"\"\"\n return self._parent\n\n # ..................................................................................................................\n @parent.setter\n def parent(self, value):\n if self._parent is not None:\n # A parent project already exists for this dataset but the\n # entered values gives a different parent. This is not allowed,\n # as it can produce impredictable results. We will first remove it\n # from the current project.\n self._parent.remove_dataset(self.name)\n self._parent = value\n\n # ..................................................................................................................\n def set_coordset(self, *args, **kwargs):\n \"\"\"\n Set one or more coordinates at once.\n\n Warnings\n --------\n This method replace all existing coordinates.\n\n See Also\n --------\n add_coords, set_coordtitles, set_coordunits\n \"\"\"\n self._coordset = None\n self.add_coordset(*args, dims=self.dims, **kwargs)\n\n # ..................................................................................................................\n def set_coordtitles(self, *args, **kwargs):\n \"\"\"\n Set titles of the one or more coordinates.\n \"\"\"\n self._coordset.set_titles(*args, **kwargs)\n\n # ..................................................................................................................\n def set_coordunits(self, *args, **kwargs):\n \"\"\"\n Set units of the one or more coordinates.\n \"\"\"\n self._coordset.set_units(*args, **kwargs)\n\n # ..................................................................................................................\n def sort(self, **kwargs):\n \"\"\"\n Returns the dataset sorted along a given dimension.\n\n (by default, the last dimension [axis=-1]) using the numeric or label values.\n\n Parameters\n ----------\n dim : str or int, optional, default=-1\n dimension index or name along which to sort.\n pos : int , optional\n If labels are multidimensional - allow to sort on a define\n row of labels : labels[pos]. Experimental : Not yet checked.\n by : str among ['value', 'label'], optional, default=``value``\n Indicate if the sorting is following the order of labels or\n numeric coord values.\n descend : `bool`, optional, default=`False`\n If true the dataset is sorted in a descending direction. Default is False except if coordinates\n are reversed.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n sorted_dataset\n \"\"\"\n\n inplace = kwargs.get('inplace', False)\n if not inplace:\n new = self.copy()\n else:\n new = self\n\n # parameter for selecting the level of labels (default None or 0)\n pos = kwargs.pop('pos', None)\n\n # parameter to say if selection is done by values or by labels\n by = kwargs.pop('by', 'value')\n\n # determine which axis is sorted (dims or axis can be passed in kwargs)\n # it will return a tuple with axis and dim\n axis, dim = self.get_axis(**kwargs)\n if axis is None:\n axis, dim = self.get_axis(axis=0)\n\n # get the corresponding coordinates (remember the their order can be different form the order\n # of dimension in dims. S we cannot jsut take the coord from the indice.\n coord = getattr(self, dim) # get the coordinate using the syntax such as self.x\n\n descend = kwargs.pop('descend', None)\n if descend is None:\n # when non specified, default is False (except for reversed coordinates\n descend = coord.reversed\n\n # import warnings\n # warnings.simplefilter(\"error\")\n\n indexes = []\n for i in range(self.ndim):\n if i == axis:\n if not coord.has_data:\n # sometimes we have only label for Coord objects.\n # in this case, we sort labels if they exist!\n if coord.is_labeled:\n by = 'label'\n else:\n # nothing to do for sorting\n # return self itself\n return self\n\n args = coord._argsort(by=by, pos=pos, descend=descend)\n setattr(new, dim, coord[args])\n indexes.append(args)\n else:\n indexes.append(slice(None))\n\n new._data = new._data[tuple(indexes)]\n if new.is_masked:\n new._mask = new._mask[tuple(indexes)]\n\n return new\n\n # ..................................................................................................................\n def squeeze(self, *dims, inplace=False):\n \"\"\"\n Remove single-dimensional entries from the shape of a NDDataset.\n\n Parameters\n ----------\n dim : None or int or tuple of ints, optional\n Selects a subset of the single-dimensional entries in the\n shape. If a dimension (dim) is selected with shape entry greater than\n one, an error is raised.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n squeezed\n The input array, but with all or a subset of the\n dimensions of length 1 removed.\n\n Raises\n ------\n ValueError\n If `dim` is not `None`, and the dimension being squeezed is not\n of length 1.\n \"\"\"\n # make a copy of the original dims\n old = self.dims[:]\n\n # squeeze the data and determine which axis must be squeezed\n new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)\n\n if axis is not None and new._coordset is not None:\n # if there are coordinates they have to be squeezed as well (remove\n # coordinate for the squeezed axis)\n\n for i in axis:\n dim = old[i]\n del new._coordset[dim]\n\n return new\n\n def expand_dims(self, dim=None):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded array shape.\n\n Parameters\n ----------\n dim : int or str\n Position in the expanded axes where the new axis (or axes) is placed.\n\n Returns\n -------\n result : ndarray\n View of `a` with the number of dimensions increased.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n \"\"\" # TODO\n\n # ..................................................................................................................\n def swapdims(self, dim1, dim2, inplace=False):\n \"\"\"\n Interchange two dimensions of a NDDataset.\n\n Parameters\n ----------\n dim1 : int\n First axis.\n dim2 : int\n Second axis.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n swaped_dataset\n\n See Also\n --------\n transpose\n \"\"\"\n\n new = super().swapdims(dim1, dim2, inplace=inplace)\n new.history = f'Data swapped between dims {dim1} and {dim2}'\n return new\n\n # ..................................................................................................................\n @property\n def T(self):\n \"\"\"\n Transposed |NDDataset|.\n\n The same object is returned if `ndim` is less than 2.\n \"\"\"\n return self.transpose()\n\n # ..................................................................................................................\n def take(self, indices, **kwargs):\n \"\"\"\n Take elements from an array\n\n Parameters\n ----------\n indices\n kwargs\n\n Returns\n -------\n \"\"\"\n\n # handle the various syntax to pass the axis\n dims = self._get_dims_from_args(**kwargs)\n axis = self._get_dims_index(dims)\n axis = axis[0] if axis else None\n\n # indices = indices.tolist()\n if axis is None:\n # just do a fancy indexing\n return self[indices]\n\n if axis < 0:\n axis = self.ndim + axis\n\n index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])\n new = self[index]\n return new\n\n def to_array(self):\n \"\"\"\n Return a numpy masked array (i.e., other NDDataset attributes are lost.\n\n Examples\n ========\n >>> import spectrochempy as scp\n >>> dataset = scp.read('wodger.spg')\n >>> a = scp.to_array(dataset)\n\n equivalent to:\n\n >>> a = np.ma.array(dataset)\n\n or\n\n >>> a= dataset.masked_data\n \"\"\"\n return np.ma.array(self)\n\n # ..................................................................................................................\n def to_xarray(self, **kwargs):\n \"\"\"\n Convert a NDDataset instance to an `~xarray.DataArray` object\n ( the xarray library must be available )\n\n Parameters\n\n Returns\n -------\n object : a xarray.DataArray object\n \"\"\"\n # Information about DataArray from the DataArray docstring\n #\n # Attributes\n # ----------\n # dims: tuple\n # Dimension names associated with this array.\n # values: np.ndarray\n # Access or modify DataArray values as a numpy array.\n # coords: dict-like\n # Dictionary of DataArray objects that label values along each dimension.\n # name: str or None\n # Name of this array.\n # attrs: OrderedDict\n # Dictionary for holding arbitrary metadata.\n # Init docstring\n #\n # Parameters\n # ----------\n # data: array_like\n # Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n # or castable to an ``ndarray``.\n # coords: sequence or dict of array_like objects, optional\n # Coordinates (tick labels) to use for indexing along each dimension.\n # If dict-like, should be a mapping from dimension names to the\n # corresponding coordinates. If sequence-like, should be a sequence\n # of tuples where the first element is the dimension name and the\n # second element is the corresponding coordinate array_like object.\n # dims: str or sequence of str, optional\n # Name(s) of the data dimension(s). Must be either a string (only\n # for 1D data) or a sequence of strings with length equal to the\n # number of dimensions. If this argument is omitted, dimension names\n # are taken from ``coords`` (if possible) and otherwise default to\n # ``['dim_0', ... 'dim_n']``.\n # name: str or None, optional\n # Name of this array.\n # attrs: dict_like or None, optional\n # Attributes to assign to the new instance. By default, an empty\n # attribute dictionary is initialized.\n # encoding: dict_like or None, optional\n # Dictionary specifying how to encode this array's data into a\n # serialized format like netCDF4. Currently used keys (for netCDF)\n # include '_FillValue', 'scale_factor', 'add_offset', 'dtype',\n # 'units' and 'calendar' (the later two only for datetime arrays).\n # Unrecognized keys are ignored.\n\n if not HAS_XARRAY:\n warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)\n return None\n\n x, y = self.x, self.y\n tx = x.title\n if y:\n ty = y.title\n da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )\n\n da.attrs['units'] = self.units\n else:\n da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )\n\n da.attrs['units'] = self.units\n\n da.attrs['title'] = self.title\n\n return da\n\n # ..................................................................................................................\n def transpose(self, *dims, inplace=False):\n \"\"\"\n Permute the dimensions of a NDDataset.\n\n Parameters\n ----------\n dims : sequence of dimension indexes or names, optional\n By default, reverse the dimensions, otherwise permute the dimensions\n according to the values given.\n inplace : bool, optional, default=`False`\n Flag to say that the method return a new object (default)\n or not (inplace=True).\n\n Returns\n -------\n transposed_array\n\n See Also\n --------\n swapdims : Interchange two dimensions of a NDDataset.\n \"\"\"\n new = super().transpose(*dims, inplace=inplace)\n new.history = f'Data transposed between dims: {dims}' if dims else ''\n\n return new\n\n # ------------------------------------------------------------------------------------------------------------------\n # private methods\n # ------------------------------------------------------------------------------------------------------------------\n\n # ..................................................................................................................\n def _cstr(self):\n # Display the metadata of the object and partially the data\n out = ''\n out += ' name: {}\\n'.format(self.name)\n out += ' author: {}\\n'.format(self.author)\n out += ' created: {}\\n'.format(self._date)\n # out += ' modified: {}\\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''\n\n wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,\n width=self._text_width)\n\n pars = self.description.strip().splitlines()\n if pars:\n out += ' description: '\n desc = ''\n if pars:\n desc += '{}\\n'.format(wrapper1.fill(pars[0]))\n for par in pars[1:]:\n desc += '{}\\n'.format(textwrap.indent(par, ' ' * 15))\n # the three escaped null characters are here to facilitate\n # the generation of html outputs\n desc = '\\0\\0\\0{}\\0\\0\\0\\n'.format(desc.rstrip())\n out += desc\n\n if self._history:\n pars = self.history\n out += ' history: '\n hist = ''\n if pars:\n hist += '{}\\n'.format(wrapper1.fill(pars[0]))\n for par in pars[1:]:\n hist += '{}\\n'.format(textwrap.indent(par, ' ' * 15))\n # the three escaped null characters are here to facilitate\n # the generation of html outputs\n hist = '\\0\\0\\0{}\\0\\0\\0\\n'.format(hist.rstrip())\n out += hist\n\n out += '{}\\n'.format(self._str_value().rstrip())\n out += '{}\\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''\n out += '{}\\n'.format(self._str_dims().rstrip())\n\n if not out.endswith('\\n'):\n out += '\\n'\n out += '\\n'\n\n if not self._html_output:\n return colored_output(out.rstrip())\n else:\n return out.rstrip()\n\n # ..................................................................................................................\n def _loc2index(self, loc, dim=-1):\n # Return the index of a location (label or coordinates) along the dim\n # This can work only if `coords` exists.\n\n if self._coordset is None:\n raise SpectroChemPyException('No coords have been defined. Slicing or selection'\n ' by location ({}) needs coords definition.'.format(loc))\n\n coord = self.coord(dim)\n\n return coord._loc2index(loc)\n\n # ..................................................................................................................\n def _str_dims(self):\n if self.is_empty:\n return ''\n if len(self.dims) < 1 or not hasattr(self, \"_coordset\"):\n return ''\n if not self._coordset or len(self._coordset) < 1:\n return ''\n\n self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default\n\n txt = self._coordset._cstr()\n txt = txt.rstrip() # remove the trailing '\\n'\n return txt\n\n _repr_dims = _str_dims\n\n # ------------------------------------------------------------------------------------------------------------------\n # events\n # ------------------------------------------------------------------------------------------------------------------\n\n def _dims_update(self, change=None):\n # when notified that a coords names have been updated\n _ = self.dims # fire an update\n\n # ..................................................................................................................\n\n\n# ======================================================================================================================\n# module function\n# ======================================================================================================================\n\n# make some NDDataset operation accessible from the spectrochempy API\nthismodule = sys.modules[__name__]\n\napi_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',\n 'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',\n 'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']\n\n# todo: check the fact that some function are defined also in ndmath\nfor funcname in api_funcs:\n setattr(thismodule, funcname, getattr(NDDataset, funcname))\n\n thismodule.__all__.append(funcname)\n\n# load one method from NDIO\nload = NDDataset.load\n__all__ += ['load']\n\n# ======================================================================================================================\n# Set the operators\n# ======================================================================================================================\n\n_set_operators(NDDataset, priority=100000)\n_set_ufuncs(NDDataset)\n"
] | [
[
"numpy.ma.array",
"numpy.array"
]
] |
bicepjai/Deep-Survey-on-Text-Classification | [
"d935f0d4fc09213644d0291a0d64873912b2e331"
] | [
"lib/global_utils.py"
] | [
"import sys\nimport os\n\nimport re\nimport collections\nimport itertools\nimport bcolz\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport gc\nimport random\nimport smart_open\nimport h5py\nimport csv\nimport tensorflow as tf\nimport gensim\n\nimport datetime as dt\nfrom tqdm import tqdm_notebook as tqdm\n\n# import multiprocessing as mp\n# from itertools import repeat, product\n# from functools import partial\n\n# to be able to pickle class methods for multi processing\n# https://stackoverflow.com/questions/27318290/why-can-i-pass-an-instance-method-to-multiprocessing-process-but-not-a-multipro\n\ndef _instance_method_alias(obj, arg):\n \"\"\"\n Alias for instance method that allows the method to be called in a\n multiprocessing pool\n \"\"\"\n return obj.convertSent2WordIds(arg)\n\n\n\ndef get_embeddings_from_ft(fasttext_vec_file, dim, vocab_words):\n \"\"\"\n convert fast text .vec file to numpy array\n created embedding will be in order of words in vocab_words\n \"\"\"\n\n # gathering words from fasttext vec file--------------------\n ft_lines = None\n\n with open(fasttext_vec_file, \"r\") as f:\n ft_lines = f.readlines()\n\n ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])\n ft_vocab_size = ft_shape[0]\n\n ft_wvs_dict = {}\n\n for i, line in enumerate(ft_lines[1:]):\n str_list = line.split()\n word = str(str_list[0].strip())\n vec = np.array([np.float(f) for f in str_list[1:]])\n assert dim == len(vec), \"fast text some vectors doesn't match dimensions \"+str(dim)+\" != \"+str(len(vec))\n ft_wvs_dict[word] = vec\n\n assert ft_vocab_size == len(ft_wvs_dict), \"fast text vectors file read issue \"+str(ft_vocab_size)+\" != \"+str(len(ft_wvs_dict))\n\n # creating embedding matrix from the file --------------------\n wvs_embedding = np.random.randn(len(vocab_words), dim)\n for i,word in enumerate(vocab_words):\n if word in ft_wvs_dict:\n wvs_embedding[i] = ft_wvs_dict[word]\n\n return wvs_embedding\n\n\n#=============================================================\n# DOCUMENT PREPROCESSING\n#=============================================================\n\nCHAR_ALPHABETS = \"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\\n \"\nchar_start_tag_idx = len(CHAR_ALPHABETS) + 0\nchar_end_tag_idx = len(CHAR_ALPHABETS) + 1\nchar_unknown_tag_idx = len(CHAR_ALPHABETS) + 2\n\n# when sentences are converted to characters\n# these are appended to signal end of sentences\nchar_sent_start_tag_idx = len(CHAR_ALPHABETS) + 3\nchar_sent_end_tag_idx = len(CHAR_ALPHABETS) + 4\n\nCHAR_ALPHABETS_LEN = len(CHAR_ALPHABETS) + 4\n\nclass GenerateDataset(object):\n \"\"\"\n This class takes in preprocessed data frame and\n generated datasets as necessary\n \"\"\"\n\n def __init__(self, data_frame, vocab_idx):\n self.data_frame = data_frame\n self.vocab_idx = vocab_idx\n self.vocab_size = len(vocab_idx)\n\n # constants ================================================================================\n self.sentence_start_tag_idx = self.vocab_idx[\"<SOSent>\"]\n self.sentence_end_tag_idx = self.vocab_idx[\"<EOSent>\"]\n self.word_unknown_tag_idx = self.vocab_idx[\"<UNK>\"]\n\n self.default_unit_dict = {\n \"gene_unit\" : \"words\",\n \"variation_unit\" : \"words\",\n \"doc_unit\" : \"words\",\n \"doc_form\" : \"text\",\n \"doc_cntx_dir\" : \"forward\",\n \"divide_document\": \"single_unit\"\n }\n\n\n def convertSent2WordIds(self, sentence, add_start_end_tag=False):\n \"\"\"\n sentence is a list of word.\n It is converted to list of ids based on vocab_idx\n \"\"\"\n\n sent2id = []\n if add_start_end_tag:\n sent2id = [self.sentence_start_tag_idx]\n\n try:\n sent2id = sent2id + [self.vocab_idx[word] if self.vocab_idx[word]<self.vocab_size else self.word_unknown_tag_idx for word in sentence]\n except KeyError as e:\n print(e)\n print (sentence)\n raise ValueError('Fix this issue dude')\n\n if add_start_end_tag:\n sent2id = sent2id + [self.sentence_end_tag_idx]\n\n return sent2id\n\n\n\n def convertDoc2Sent2WordIds(self, document, add_start_end_tag=False):\n \"\"\"\n document is a list of sentence.\n sentence is a list of word.\n so given sent_list will be converted to list of list of ids based on vocab_idx\n \"\"\"\n\n return [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]\n\n\n\n def convertWord2Char2Ids(self, word, add_start_end_tag=False):\n \"\"\"\n word is a char sequence or list of characters,\n return list of ids in word or char sequence\n \"\"\"\n char2id = []\n if add_start_end_tag:\n char2id = [char_start_tag_idx]\n\n char2id = char2id + [CHAR_ALPHABETS.find(char) for char in word]\n\n if add_start_end_tag:\n char2id = char2id + [char_end_tag_idx]\n\n return char2id\n\n\n\n def convertSent2Word2Char2Ids(self, sentence, add_start_end_tag=False, unit=\"chars\"):\n \"\"\"\n sentence is list of words\n word is list of characters\n returns list of list of ids\n \"\"\"\n\n sent2words2char2id = []\n if unit == \"chars\":\n \"\"\"\n all the words are grouped as list of chars with pre-post added tags\n \"\"\"\n if add_start_end_tag:\n sent2words2char2id = [[char_sent_start_tag_idx]]\n\n sent2words2char2id = sent2words2char2id + [self.convertWord2Char2Ids(word, add_start_end_tag) if self.vocab_idx[word] < self.vocab_size else [char_unknown_tag_idx] for word in sentence]\n\n if add_start_end_tag:\n sent2words2char2id = sent2words2char2id + [[char_sent_end_tag_idx]]\n elif unit == \"raw_chars\":\n \"\"\"\n just a stream of characters\n \"\"\"\n if add_start_end_tag:\n sent2words2char2id = [char_sent_start_tag_idx]\n\n for word in sentence:\n if self.vocab_idx[word] < self.vocab_size:\n sent2words2char2id += [charid for charid in self.convertWord2Char2Ids(word, add_start_end_tag)]\n else:\n sent2words2char2id += [char_unknown_tag_idx]\n\n if add_start_end_tag:\n sent2words2char2id = sent2words2char2id + [char_sent_end_tag_idx]\n else:\n assert False, \"give valid doc_unit argument\"\n\n return sent2words2char2id\n\n\n\n def convertDoc2Sent2Word2Char2Ids(self, document, doc_form=\"sentences\", add_start_end_tag=False, unit=\"chars\"):\n \"\"\"\n document is a list of sentence.\n sentence is a list of word.\n so given sent_list will be converted to list of list of ids based on vocab_idx\n\n returns list of list if doc_form == \"text\"\n returns list of list of list if doc_form == \"sentences\"\n \"\"\"\n doc2word2char2ids = []\n\n if doc_form == \"sentences\":\n doc2word2char2ids = [self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit) for sentence in document]\n\n elif doc_form == \"text\":\n doc2word2char2ids = [list_or_charid for list_or_charid in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit)]\n else:\n assert False, \"give valid doc_form argument\"\n\n return doc2word2char2ids\n\n\n\n def generate_data(self, unit_dict=None, has_class=False, add_start_end_tag=False):\n \"\"\"\n dataframe expects to have Sentences, Variations, Genes, Class(has_class)\n\n Sentences Text attribute converted to list of sentences which in turn converted to list of words\n Variations just one sentence which is a list of words\n Genes just one sentence which is a list of words\n\n returns information based on request\n\n unit_dict contains these 5 keys that can have\n gene_unit can be [\"words\", \"chars\", \"raw_chars\"]\n variation_unit can be [\"words\", \"chars\", \"raw_chars\"]\n doc_unit can be [\"words\", \"word_list\", chars\", \"raw_chars\"]\n doc_form can be [\"sentences\", \"text\"]\n doc_cntx_dir can be [\"forward\", \"backward\"]\n divide_document can be [\"single_unit\", \"multiple_units\"]\n\n \"\"\"\n if not unit_dict:\n unit_dict = self.default_unit_dict\n\n try:\n unit_dict[\"doc_cntx_dir\"]\n except KeyError as e:\n unit_dict[\"doc_cntx_dir\"] = \"forward\"\n\n ids_document = []\n ids_labels = []\n ids_genes = []\n ids_variations = []\n\n # since sometimes the data will be shuffled in the frame\n # during train test split\n for index in self.data_frame.index:\n document = self.data_frame.Sentences[index]\n if unit_dict[\"divide_document\"] == \"single_unit\": #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n\n # doc units --------------------------------------------------------------\n if unit_dict[\"doc_unit\"] == \"words\" or unit_dict[\"doc_unit\"] == \"word_list\":\n\n if unit_dict[\"doc_form\"] == \"sentences\":\n ids_document.append(self.convertDoc2Sent2WordIds(document, add_start_end_tag))\n\n else: # unit_dict[\"doc_form\"] == \"text\"\n\n # using multiprocess to process each sentence in document and concatenate them to a single sentence\n # get_wordid_list = lambda d, setag : [wid for s in d for wid in self.convertSent2WordIds(s, setag)]\n # text_word_list = []\n # with mp.Pool(processes = 5) as pool:\n # # text_word_list = pool.starmap(get_wordid_list, product(document, [add_start_end_tag]*len(document)))\n # # text_word_list = pool.starmap(get_wordid_list, zip(document, repeat(add_start_end_tag)))\n # text_word_list = pool.map(partial(get_wordid_list, setag=add_start_end_tag), document)\n\n\n # without multiprocessing\n if unit_dict[\"doc_unit\"] == \"words\":\n text_word_list = [word_id for sentence in document for word_id in self.convertSent2WordIds(sentence, add_start_end_tag)]\n\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = text_word_list[::-1]\n\n else: # unit_dict[\"doc_unit\"] == \"word_list\": sentence form a list\n text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]\n\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag)[::-1] for sentence in document]\n\n ids_document.append(text_word_list)\n\n elif unit_dict[\"doc_unit\"] == \"chars\" or unit_dict[\"doc_unit\"] == \"raw_chars\":\n\n if unit_dict[\"doc_form\"] == \"sentences\":\n\n for sentence in document:\n ids_document.append(self.convertDoc2Sent2Word2Char2Ids(document,\n doc_form=unit_dict[\"doc_form\"], unit=unit_dict[\"doc_unit\"], add_start_end_tag=add_start_end_tag))\n\n else: # unit_dict[\"doc_form\"] == \"text\"\n text_char_list = [word_as_char_list_id for sentence in document for word_as_char_list_id in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit=unit_dict[\"doc_unit\"])]\n\n ids_document.append(text_char_list)\n\n else:\n assert False, \"give valid doc_unit key-value\"\n\n # others --------------------------------------------------------------\n if has_class:\n ids_labels.append(self.data_frame.Class[index])\n\n if unit_dict[\"gene_unit\"] == \"words\":\n ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))\n else:\n ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],\n add_start_end_tag, unit=unit_dict[\"doc_unit\"]))\n\n if unit_dict[\"variation_unit\"] == \"words\":\n ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))\n else:\n ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],\n add_start_end_tag, unit=unit_dict[\"doc_unit\"]))\n\n else: # unit_dict[\"divide_document\"] == \"multiple_unit\" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`\n for sentence in document:\n\n # doc units --------------------------------------------------------------\n if unit_dict[\"doc_unit\"] == \"words\":\n\n # doesnt matter if\n # unit_dict[\"doc_form\"] == \"sentences\"\n # unit_dict[\"doc_form\"] == \"text\"\n\n try:\n sentence_list = self.convertSent2WordIds(sentence, add_start_end_tag)\n if unit_dict[\"doc_cntx_dir\"] == \"backward\":\n text_word_list = self.convertSent2WordIds(sentence, add_start_end_tag)[::-1]\n\n ids_document.append(sentence_list)\n\n except ValueError as e:\n print(e)\n print (index)\n raise ValueError('Fix this issue dude !')\n\n elif unit_dict[\"doc_unit\"] == \"chars\" or unit_dict[\"doc_unit\"] == \"raw_chars\":\n\n # doesnt matter if\n # unit_dict[\"doc_form\"] == \"sentences\"\n # unit_dict[\"doc_form\"] == \"text\"\n\n ids_document.append(self.convertSent2Word2Char2Ids(sentence, add_start_end_tag,\n unit=unit_dict[\"doc_unit\"]))\n\n\n # others --------------------------------------------------------------\n if has_class:\n ids_labels.append(self.data_frame.Class[index])\n\n if unit_dict[\"gene_unit\"] == \"words\":\n ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))\n else:\n ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],\n add_start_end_tag, unit=unit_dict[\"gene_unit\"]))\n\n if unit_dict[\"variation_unit\"] == \"words\":\n ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))\n else:\n ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],\n add_start_end_tag, unit=unit_dict[\"variation_unit\"]))\n\n\n return ids_document, ids_genes, ids_variations, ids_labels\n\n\n\n def placeholder_function(self, unit_dict=None, limit_dict=None,\n has_class=False, add_start_end_tag=False):\n \"\"\"\n dataframe expects to have Sentences, Variations, Genes, Class(has_class)\n\n Sentences Text attribute converted to list of sentences which in turn converted to list of words\n Variations just one sentence which is a list of words\n Genes just one sentence which is a list of words\n\n returns information based on request\n\n unit_dict contains these 5 keys that can have\n gene_unit can be [\"words\", \"chars\"]\n variation_unit can be [\"words\", \"chars\"]\n doc_unit can be [\"words\", \"chars\"]\n doc_form can be [\"sentences\", \"text\"]\n divide_document can be [\"single_unit\", \"multiple_units\"]\n\n limit_dict contains max sequence len to form valid matrices\n Text attribute options\n max_text_seq_len => maximum number of words in a text\n max_text_document_len => maximum number of sentences in a document\n max_text_sentence_len => maximum number of words in a sentence\n max_text_word_len => maximum number of chars in a word\n\n Gene Attribute options\n max_gene_sentence_len => maximum number of words in a sentence\n max_gene_word_len => maximum number of chars in a word\n\n Variation Attribute options\n max_variation_sentence_len => maximum number of words in a sentence\n max_variation_word_len => maximum number of chars in a word\n\n \"\"\"\n\n ids_document, ids_genes, ids_variations, ids_labels = self.generate_dataset(unit_dict, has_class, add_start_end_tag)\n\n\n# testing ======================================================================================\n\ndef test_class():\n document = [\n ['beautiful', 'is', 'better', 'than', 'ugly.'],\n ['explicit', 'is', 'better', 'than', 'implicit.'],\n ['simple', 'is', 'better', 'than', 'complex.'],\n ['complex', 'is', 'better', 'than', 'complicated.'],\n ['flat', 'is', 'better', 'than', 'nested.'],\n # ['sparse', 'is', 'better', 'than', 'dense.'],\n # ['readability', 'counts.'],\n # ['special', 'cases', \"aren't\", 'special', 'enough', 'to', 'break', 'the', 'rules.'],\n # ['although', 'practicality', 'beats', 'purity.'],\n # ['errors', 'should', 'never', 'pass', 'silently.'],\n # ['unless', 'explicitly', 'silenced.'],\n # ['in', 'the', 'face', 'of', 'ambiguity,', 'refuse', 'the', 'temptation', 'to', 'guess.'],\n # ['there', 'should', 'be', 'one--', 'and', 'preferably', 'only', 'one', '--obvious', 'way', 'to', 'do', 'it.'],\n # ['although', 'that', 'way', 'may', 'not', 'be', 'obvious', 'at', 'first', 'unless', \"you're\", 'Dutch.'],\n # ['now', 'is', 'better', 'than', 'never.'], ['Although', 'never', 'is', 'often', 'better', 'than', '*right*', 'now.'],\n # ['if', 'the', 'implementation', 'is', 'hard', 'to', 'explain,', \"it's\", 'a', 'bad', 'idea.'],\n # ['if', 'the', 'implementation', 'is', 'easy', 'to', 'explain,', 'it', 'may', 'be', 'a', 'good', 'idea.'],\n # ['namespaces', 'are', 'one', 'honking', 'great', 'idea', '--', \"let's\", 'do', 'more', 'of', 'those!'],\n ]\n\n data_dict = {\n \"ID\" : 0,\n \"Gene\" : [[\"beautiful\"]],\n \"Variation\" : [[\"complex\", \"simple\"]],\n \"Class\" : 0,\n \"Sentences\" : [document[:]]\n }\n\n custom_unit_dict = {\n \"gene_unit\" : \"raw_chars\",\n \"variation_unit\" : \"raw_chars\",\n # text transformed to sentences attribute\n \"doc_unit\" : \"raw_chars\",\n \"doc_form\" : \"sentences\",\n # \"doc_cntx_dir\" : \"forward\",\n \"divide_document\" : \"single_unit\"\n }\n\n df = pd.DataFrame(data=data_dict)\n corpus = sorted(list(set([word for sentence in document for word in sentence])))\n corpus_wordidx = {word:i for i,word in enumerate(corpus)}\n corpus_wordidx[\"<SOSent>\"] = len(corpus)\n corpus_wordidx[\"<EOSent>\"] = len(corpus) + 1\n corpus_wordidx[\"<UNK>\"] = len(corpus) + 2\n\n gen_data = GenerateDataset(df, corpus_wordidx)\n x_T, x_G, x_V, x_C = gen_data.generate_data(custom_unit_dict, has_class=True, add_start_end_tag=True)\n\n print(\"data\", df.Sentences[0], \"\\n\")\n print(corpus_wordidx)\n index = 0\n print(\"text\",np.array(x_T).shape, x_T[index])\n print(\"gene\",np.array(x_G).shape, x_G[index])\n print(\"variation\",np.array(x_V).shape, x_V[index])\n print(\"classes\",np.array(x_C).shape, x_C[index])\n\n\nif __name__ == \"__main__\":\n test_class()\n\n"
] | [
[
"numpy.array",
"numpy.float",
"pandas.DataFrame"
]
] |
Sensors-in-Paradise/OpportunityML | [
"a123b4842de45f735d517be6bcd96ca35171db91"
] | [
"src/loader/load_opportunity_dataset.py"
] | [
"import itertools\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom utils.Recording import Recording\nimport utils.settings as settings\n\n\ndef load_opportunity_dataset(opportunity_dataset_path: str) -> \"list[Recording]\":\n \"\"\"\n Returns a list of Recordings from the opportunity dataset\n \"\"\"\n print(\"Will read the opportunity dataset\")\n opportunity_dataset_path += \"/dataset\"\n subject_ids = range(1, 5)\n recording_ids = range(1, 6)\n\n # see loader/opportunity_col_names to make your selection\n selected_feature_names = [\n \"IMU-BACK-accX\",\n \"IMU-BACK-accY\",\n \"IMU-BACK-accZ\",\n \"IMU-BACK-Quaternion1\",\n \"IMU-BACK-Quaternion2\",\n \"IMU-BACK-Quaternion3\",\n \"IMU-BACK-Quaternion4\",\n \"IMU-RLA-accX\",\n \"IMU-RLA-accY\",\n \"IMU-RLA-accZ\",\n \"IMU-RLA-Quaternion1\",\n \"IMU-RLA-Quaternion2\",\n \"IMU-RLA-Quaternion3\",\n \"IMU-RLA-Quaternion4\",\n \"IMU-LLA-accX\",\n \"IMU-LLA-accY\",\n \"IMU-LLA-accZ\",\n \"IMU-LLA-Quaternion1\",\n \"IMU-LLA-Quaternion2\",\n \"IMU-LLA-Quaternion3\",\n \"IMU-LLA-Quaternion4\",\n \"IMU-L-SHOE-EuX\",\n \"IMU-L-SHOE-EuY\",\n \"IMU-L-SHOE-EuZ\",\n \"IMU-L-SHOE-Nav_Ax\",\n \"IMU-L-SHOE-Nav_Ay\",\n \"IMU-L-SHOE-Nav_Az\",\n \"IMU-L-SHOE-Body_Ax\",\n \"IMU-L-SHOE-Body_Ay\",\n \"IMU-L-SHOE-Body_Az\",\n \"IMU-L-SHOE-AngVelBodyFrameX\",\n \"IMU-L-SHOE-AngVelBodyFrameY\",\n \"IMU-L-SHOE-AngVelBodyFrameZ\",\n \"IMU-L-SHOE-AngVelNavFrameX\",\n \"IMU-L-SHOE-AngVelNavFrameY\",\n \"IMU-L-SHOE-AngVelNavFrameZ\",\n \"IMU-R-SHOE-EuX\",\n \"IMU-R-SHOE-EuY\",\n \"IMU-R-SHOE-EuZ\",\n \"IMU-R-SHOE-Nav_Ax\",\n \"IMU-R-SHOE-Nav_Ay\",\n \"IMU-R-SHOE-Nav_Az\",\n \"IMU-R-SHOE-Body_Ax\",\n \"IMU-R-SHOE-Body_Ay\",\n \"IMU-R-SHOE-Body_Az\",\n \"IMU-R-SHOE-AngVelBodyFrameX\",\n \"IMU-R-SHOE-AngVelBodyFrameY\",\n \"IMU-R-SHOE-AngVelBodyFrameZ\",\n \"IMU-R-SHOE-AngVelNavFrameX\",\n \"IMU-R-SHOE-AngVelNavFrameY\",\n \"IMU-R-SHOE-AngVelNavFrameZ\",\n ]\n print(f\"Selected features (n_features: {len(selected_feature_names)}):\\n\", \"\\n\".join([\"\\t\" + str(feature_name) for feature_name in selected_feature_names]))\n\n # Get column names\n col_names = []\n with open(\"src/loader/opportunity_col_names\", \"r\") as file:\n lines = file.read().splitlines()\n for line in lines:\n col_names.append(line)\n\n recordings = []\n for sub, rec in itertools.product(subject_ids, recording_ids):\n file_name = f\"S{sub}-ADL{rec}.dat\"\n file_path = os.path.join(opportunity_dataset_path, file_name)\n print(f\"Reading {file_path} ...\")\n file_df = pd.read_csv(file_path, delimiter=\" \", header=None)\n file_df.columns = col_names # give them the real column names\n\n recordings.append(Recording(\n sensor_frame = file_df.loc[:, selected_feature_names], \n time_frame = file_df.loc[:, 'MILLISEC'],\n activities = file_df.loc[:, 'HL_Activity'].map(\n lambda label: settings.DATA_CONFIG.raw_label_to_activity_idx(label)\n ), # Use `[0]` to get only one activity | maps 0, 101, 102, 103, 104, 105 to 0, 1, 2, 3, 4, 5\n subject=int(sub),\n recording_index=int(rec)\n ))\n\n print(f\"\\n => Total {len(recordings)} recordings read\")\n\n return recordings\n\n"
] | [
[
"pandas.read_csv"
]
] |
evertdeman/HD-BET | [
"817a50d2fe9b8663646cc74652cb50e26f343a3b"
] | [
"HD_BET/utils.py"
] | [
"from urllib.request import urlopen\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom skimage.morphology import label\nimport os\nfrom HD_BET.paths import folder_with_parameter_files\n\n\ndef get_params_fname(fold):\n return os.path.join(folder_with_parameter_files, \"%d.model\" % fold)\n\n\ndef maybe_download_parameters(fold=0, force_overwrite=False):\n \"\"\"\n Downloads the parameters for some fold if it is not present yet.\n :param fold:\n :param force_overwrite: if True the old parameter file will be deleted (if present) prior to download\n :return:\n \"\"\"\n\n assert 0 <= fold <= 4, \"fold must be between 0 and 4\"\n\n if not os.path.isdir(folder_with_parameter_files):\n maybe_mkdir_p(folder_with_parameter_files)\n\n out_filename = get_params_fname(fold)\n\n if force_overwrite and os.path.isfile(out_filename):\n os.remove(out_filename)\n\n if not os.path.isfile(out_filename):\n url = \"https://zenodo.org/record/2540695/files/%d.model?download=1\" % fold\n print(\"Downloading\", url, \"...\")\n data = urlopen(url).read()\n with open(out_filename, 'wb') as f:\n f.write(data)\n\n\ndef init_weights(module):\n if isinstance(module, nn.Conv3d):\n module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)\n if module.bias is not None:\n module.bias = nn.init.constant(module.bias, 0)\n\n\ndef softmax_helper(x):\n rpt = [1 for _ in range(len(x.size()))]\n rpt[1] = x.size(1)\n x_max = x.max(1, keepdim=True)[0].repeat(*rpt)\n e_x = torch.exp(x - x_max)\n return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)\n\n\nclass SetNetworkToVal(object):\n def __init__(self, use_dropout_sampling=False, norm_use_average=True):\n self.norm_use_average = norm_use_average\n self.use_dropout_sampling = use_dropout_sampling\n\n def __call__(self, module):\n if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):\n module.train(self.use_dropout_sampling)\n elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \\\n isinstance(module, nn.InstanceNorm1d) \\\n or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \\\n isinstance(module, nn.BatchNorm1d):\n module.train(not self.norm_use_average)\n\n\ndef postprocess_prediction(seg):\n # basically look for connected components and choose the largest one, delete everything else\n print(\"running postprocessing... \")\n mask = seg != 0\n lbls = label(mask, connectivity=mask.ndim)\n lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]\n largest_region = np.argmax(lbls_sizes[1:]) + 1\n seg[lbls != largest_region] = 0\n return seg\n\n\ndef subdirs(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))]\n if sort:\n res.sort()\n return res\n\n\ndef subfiles(folder, join=True, prefix=None, suffix=None, sort=True):\n if join:\n l = os.path.join\n else:\n l = lambda x, y: y\n res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))\n and (prefix is None or i.startswith(prefix))\n and (suffix is None or i.endswith(suffix))]\n if sort:\n res.sort()\n return res\n\n\nsubfolders = subdirs # I am tired of confusing those\n\n\ndef maybe_mkdir_p(directory):\n splits = directory.split(\"/\")[1:]\n for i in range(0, len(splits)):\n if not os.path.isdir(os.path.join(\"/\", *splits[:i+1])):\n os.mkdir(os.path.join(\"/\", *splits[:i+1]))\n"
] | [
[
"numpy.sum",
"torch.nn.init.constant",
"numpy.argmax",
"torch.exp",
"torch.nn.init.kaiming_normal",
"numpy.unique"
]
] |
mdhasan8/Machine-Learning-in-Python | [
"d66607d3003e8279e35cf176851f506aa833a9fe"
] | [
"Neural_Network_Tensorflow.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 16 22:30:11 2020\r\n\r\n@author: Easin\r\n\"\"\"\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import Model, layers\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# MNIST dataset parameters.\r\nnum_classes = 10 # total classes (0-9 digits).\r\nnum_features = 784 # data features (img shape: 28*28).\r\n\r\n# Training parameters.\r\nlearning_rate = 0.1\r\ntraining_steps = 2000\r\nbatch_size = 256\r\ndisplay_step = 100\r\n\r\n# Network parameters.\r\nn_hidden_1 = 128 # 1st layer number of neurons.\r\nn_hidden_2 = 256 # 2nd layer number of neurons.\r\n\r\n# Prepare MNIST data.\r\nfrom tensorflow.keras.datasets import mnist\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n# Convert to float32.\r\nx_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)\r\n# Flatten images to 1-D vector of 784 features (28*28).\r\nx_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])\r\n# Normalize images value from [0, 255] to [0, 1].\r\nx_train, x_test = x_train / 255., x_test / 255.\r\n\r\n# Use tf.data API to shuffle and batch data.\r\ntrain_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))\r\ntrain_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)\r\n\r\n# Create TF Model.\r\nclass NeuralNet(Model):\r\n # Set layers.\r\n def __init__(self):\r\n super(NeuralNet, self).__init__()\r\n # First fully-connected hidden layer.\r\n self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)\r\n # First fully-connected hidden layer.\r\n self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)\r\n # Second fully-connecter hidden layer.\r\n self.out = layers.Dense(num_classes)\r\n\r\n # Set forward pass.\r\n def call(self, x, is_training=False):\r\n x = self.fc1(x)\r\n x = self.fc2(x)\r\n x = self.out(x)\r\n if not is_training:\r\n # tf cross entropy expect logits without softmax, so only\r\n # apply softmax when not training.\r\n x = tf.nn.softmax(x)\r\n return x\r\n\r\n# Build neural network model.\r\nneural_net = NeuralNet()\r\n\r\n\r\n# Cross-Entropy Loss.\r\n# Note that this will apply 'softmax' to the logits.\r\ndef cross_entropy_loss(x, y):\r\n # Convert labels to int 64 for tf cross-entropy function.\r\n y = tf.cast(y, tf.int64)\r\n # Apply softmax to logits and compute cross-entropy.\r\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)\r\n # Average loss across the batch.\r\n return tf.reduce_mean(loss)\r\n\r\n# Accuracy metric.\r\ndef accuracy(y_pred, y_true):\r\n # Predicted class is the index of highest score in prediction vector (i.e. argmax).\r\n correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\r\n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\r\n\r\n# Stochastic gradient descent optimizer.\r\noptimizer = tf.optimizers.SGD(learning_rate)\r\n\r\n\r\n# Optimization process. \r\ndef run_optimization(x, y):\r\n # Wrap computation inside a GradientTape for automatic differentiation.\r\n with tf.GradientTape() as g:\r\n # Forward pass.\r\n pred = neural_net(x, is_training=True)\r\n # Compute loss.\r\n loss = cross_entropy_loss(pred, y)\r\n \r\n # Variables to update, i.e. trainable variables.\r\n trainable_variables = neural_net.trainable_variables\r\n\r\n # Compute gradients.\r\n gradients = g.gradient(loss, trainable_variables)\r\n \r\n # Update W and b following gradients.\r\n optimizer.apply_gradients(zip(gradients, trainable_variables))\r\n \r\n\r\n# Run training for the given number of steps.\r\nfor step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):\r\n # Run the optimization to update W and b values.\r\n run_optimization(batch_x, batch_y)\r\n \r\n if step % display_step == 0:\r\n pred = neural_net(batch_x, is_training=True)\r\n loss = cross_entropy_loss(pred, batch_y)\r\n acc = accuracy(pred, batch_y)\r\n print(\"step: %i, loss: %f, accuracy: %f\" % (step, loss, acc))\r\n \r\n# Test model on validation set.\r\npred = neural_net(x_test, is_training=False)\r\nprint(\"Test Accuracy: %f\" % accuracy(pred, y_test))\r\n\r\n\r\n# Predict 5 images from validation set.\r\nn_images = 5\r\ntest_images = x_test[:n_images]\r\npredictions = neural_net(test_images)\r\n\r\n# Display image and model prediction.\r\nfor i in range(n_images):\r\n plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')\r\n plt.show()\r\n print(\"Model prediction: %i\" % np.argmax(predictions.numpy()[i]))\r\n \r\n\r\n"
] | [
[
"numpy.reshape",
"tensorflow.reduce_mean",
"tensorflow.nn.softmax",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.cast",
"matplotlib.pyplot.show",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"tensorflow.optimizers.SGD",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.argmax"
]
] |
983632847/covid19_pocus_ultrasound | [
"3625e95bbf189926dbd12966ef59ee71ed10e453"
] | [
"pocovidnet/scripts/eval_vid_classifier.py"
] | [
"import argparse\nimport json\nimport os\nimport pickle\nimport numpy as np\nfrom pocovidnet.evaluate_genesis import GenesisEvaluator\nfrom pocovidnet.evaluate_video import VideoEvaluator\nfrom tensorflow.keras import backend as K\nfrom pocovidnet.videoto3d import Videoto3D\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate genesis and cam')\n parser.add_argument('--json', type=str, default=\"../data/cross_val.json\")\n parser.add_argument(\n '--genesis_weights', type=str, default='video_genesis_lr1e4'\n )\n parser.add_argument(\n '--cam_weights', type=str, default='trained_models_cam'\n )\n parser.add_argument(\n '--videos', type=str, default='../data/pocus_videos/convex'\n )\n args = parser.parse_args()\n\n with open(args.json, \"r\") as infile:\n cross_val_split = json.load(infile)\n\n VIDEO_DIR = args.videos\n all_genesis_preds = []\n all_frame_preds = []\n for i in range(5):\n gen_eval = GenesisEvaluator(\n weights_dir=args.genesis_weights, ensemble=False, split=i\n )\n K.set_image_data_format(\"channels_last\")\n normal_eval = VideoEvaluator(\n weights_dir=args.cam_weights,\n ensemble=False,\n split=i,\n model_id=\"vgg_cam\",\n num_classes=4\n )\n files = cross_val_split[str(i)][\"test\"][0]\n # print(files)\n for f in files:\n print(\"evaluate\", f)\n # TEST if the video is working\n vid3d = Videoto3D(\"\", 64, 64, 5, 5)\n vid3d.max_vid = {\"cov\": 20, \"pne\": 20, \"reg\": 20}\n X_test, _, fn = vid3d.video3d(\n [os.path.join(VIDEO_DIR, f)], [\"cov\"]\n )\n if len(np.unique(fn)) != 1:\n print(\"ERROR: WRONG FILE!\")\n print(fn)\n print(X_test.shape)\n continue\n # run genesis model\n K.set_image_data_format(\"channels_first\")\n preds = gen_eval(os.path.join(VIDEO_DIR, f))\n vid_pred_genesis = np.argmax(np.mean(preds, axis=(0, 1)))\n all_genesis_preds.append(preds)\n # run cam model\n K.set_image_data_format(\"channels_last\")\n preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f))\n frame_pred = np.argmax(np.mean(preds_framebased, axis=0))\n all_frame_preds.append(preds_framebased)\n print(preds.shape, preds_framebased.shape)\n print(\n \"genesis pred\", vid_pred_genesis, \"frame based pred\",\n frame_pred\n )\n print(\"-------------\")\n with open(\"evaluation_outputs.dat\", \"wb\") as outfile:\n pickle.dump((all_genesis_preds, all_frame_preds), outfile)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.mean",
"numpy.unique",
"tensorflow.keras.backend.set_image_data_format"
]
] |
pworinger/kornia | [
"a8bddbc5412694d778b1a7338e0d001910bb8024"
] | [
"test/geometry/transform/crop/test_crop2d.py"
] | [
"from typing import Tuple\n\nimport pytest\n\nimport kornia as kornia\nimport kornia.testing as utils # test utils\n\nimport torch\nfrom torch.testing import assert_allclose\nfrom torch.autograd import gradcheck\n\n\nclass TestBoundingBoxInferring:\n def test_bounding_boxes_dim_inferring(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n\n h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)\n assert (h, w) == (2, 3)\n\n def test_bounding_boxes_dim_inferring_batch(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ], [\n [2., 2.],\n [4., 2.],\n [4., 3.],\n [2., 3.],\n ]], device=device, dtype=dtype)\n h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)\n assert (h.unique().item(), w.unique().item()) == (2, 3)\n\n def test_gradcheck(self, device, dtype):\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n boxes = utils.tensor_to_gradcheck_var(boxes)\n assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,\n (boxes,), raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.geometry.transform.crop.infer_box_shape\n op_script = torch.jit.script(op)\n # Define input\n boxes = torch.tensor([[\n [1., 1.],\n [3., 1.],\n [3., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype)\n\n actual = op_script(boxes)\n expected = op(boxes)\n assert_allclose(actual, expected)\n\n\nclass TestCropAndResize:\n def test_align_corners_true(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 2, 3\n\n expected = torch.tensor(\n [[[[6.0000, 6.5000, 7.0000],\n [10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n # default should use align_coners True\n patches = kornia.crop_and_resize(inp, boxes, (height, width))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_align_corners_false(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 2, 3\n expected = torch.tensor(\n [[[[6.7222, 7.1667, 7.6111],\n [9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_crop_batch(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]], [[\n [1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]], [[\n [7., 15.],\n [8., 16.],\n ]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ], [\n [1., 2.],\n [3., 2.],\n [3., 3.],\n [1., 3.],\n ]], device=device, dtype=dtype) # 2x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (2, 2))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_crop_batch_broadcast(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]], [[\n [1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]], [[\n [6., 10.],\n [7., 11.],\n ]]], device=device, dtype=dtype)\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n patches = kornia.crop_and_resize(inp, boxes, (2, 2))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)\n img = utils.tensor_to_gradcheck_var(img) # to var\n\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False) # to var\n\n assert gradcheck(kornia.crop_and_resize,\n (img, boxes, (4, 2),),\n raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.crop_and_resize\n op_script = torch.jit.script(op)\n # Define input\n img = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n boxes = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n crop_height, crop_width = 4, 2\n actual = op_script(img, boxes, (crop_height, crop_width))\n expected = op(img, boxes, (crop_height, crop_width))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n\nclass TestCenterCrop:\n def test_center_crop_h2_w4(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (2, 4))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_center_crop_h4_w2(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n height, width = 4, 2\n expected = torch.tensor([[[\n [2., 3.],\n [6., 7.],\n [10., 11.],\n [14., 15.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (height, width))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_center_crop_h4_w2_batch(self, device, dtype):\n inp = torch.tensor([\n [[[1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.]]],\n [[[1., 5., 9., 13.],\n [2., 6., 10., 14.],\n [3., 7., 11., 15.],\n [4., 8., 12., 16.]]]\n ], device=device, dtype=dtype)\n\n expected = torch.tensor([[[\n [2., 3.],\n [6., 7.],\n [10., 11.],\n [14., 15.],\n ]], [[\n [5., 9.],\n [6., 10.],\n [7., 11.],\n [8., 12.],\n ]]], device=device, dtype=dtype)\n\n out_crop = kornia.center_crop(inp, (4, 2))\n assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)\n img = utils.tensor_to_gradcheck_var(img) # to var\n\n assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)\n\n def test_jit(self, device, dtype):\n # Define script\n op = kornia.center_crop\n op_script = torch.jit.script(op)\n # Define input\n img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)\n\n actual = op_script(img, (4, 2))\n expected = op(img, (4, 2))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n def test_jit_trace(self, device, dtype):\n # Define script\n op = kornia.center_crop\n op_script = torch.jit.script(op)\n # Define input\n img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)\n op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))\n img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)\n # Run\n actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))\n expected = op(img, (2, 3))\n assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)\n\n\nclass TestCropByBoxes:\n def test_crop_by_boxes_no_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n src = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n dst = torch.tensor([[\n [0., 0.],\n [1., 0.],\n [1., 1.],\n [0., 1.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)\n assert_allclose(patches, expected)\n\n def test_crop_by_boxes_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n src = torch.tensor([[\n [1., 1.],\n [2., 1.],\n [2., 2.],\n [1., 2.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n dst = torch.tensor([[\n [0., 0.],\n [2., 0.],\n [2., 1.],\n [0., 1.],\n ]], device=device, dtype=dtype) # 1x4x2\n\n expected = torch.tensor([[[\n [6., 6.5, 7.],\n [10., 10.5, 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)\n src = torch.tensor([[\n [1., 0.],\n [2., 0.],\n [2., 1.],\n [1., 1.]]], device=device, dtype=dtype)\n dst = torch.tensor([[\n [0., 0.],\n [1., 0.],\n [1., 1.],\n [0., 1.]]], device=device, dtype=dtype)\n\n inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var\n\n assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,\n (inp, src, dst,),\n raise_exception=True)\n\n\nclass TestCropByTransform:\n def test_crop_by_transform_no_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n transform = torch.tensor([[\n [1., 0., -1.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n expected = torch.tensor([[[\n [6., 7.],\n [10., 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))\n assert_allclose(patches, expected)\n\n def test_crop_by_boxes_resizing(self, device, dtype):\n inp = torch.tensor([[[\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.],\n [13., 14., 15., 16.],\n ]]], device=device, dtype=dtype)\n\n transform = torch.tensor([[\n [2., 0., -2.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n expected = torch.tensor([[[\n [6., 6.5, 7.],\n [10., 10.5, 11.],\n ]]], device=device, dtype=dtype)\n\n patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))\n assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)\n\n def test_gradcheck(self, device, dtype):\n inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)\n transform = torch.tensor([[\n [2., 0., -2.],\n [0., 1., -1.],\n [0., 0., 1.],\n ]], device=device, dtype=dtype) # 1x3x3\n\n inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var\n\n assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,\n (inp, transform, (2, 2),),\n raise_exception=True)\n"
] | [
[
"torch.jit.script",
"torch.ones",
"torch.testing.assert_allclose",
"torch.randn",
"torch.rand",
"torch.tensor",
"torch.autograd.gradcheck"
]
] |
GonzalezDiazJ/pyviz_geoviews_clone | [
"cac9afd1bc0d25313c84ea617300bbe40207d044"
] | [
"geoviews/operation/projection.py"
] | [
"import param\nimport numpy as np\n\nfrom cartopy import crs as ccrs\nfrom cartopy.img_transform import warp_array, _determine_bounds\nfrom holoviews.core.util import cartesian_product, get_param_values\nfrom holoviews.operation import Operation\nfrom shapely.geometry import Polygon, LineString, MultiPolygon, MultiLineString\n\nfrom ..element import (Image, Shape, Polygons, Path, Points, Contours,\n RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField,\n HexTiles, Labels)\nfrom ..util import (\n project_extents, geom_to_array, wrap_path_data, is_multi_geometry,\n polygon_to_geom, path_to_geom\n)\n\n\nclass _project_operation(Operation):\n \"\"\"\n Baseclass for projection operations, projecting elements from their\n source coordinate reference system to the supplied projection.\n \"\"\"\n\n projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,\n class_=ccrs.Projection,\n instantiate=False, doc=\"\"\"\n Projection the shape type is projected to.\"\"\")\n\n # Defines the types of elements supported by the operation\n supported_types = []\n\n def _process(self, element, key=None):\n return element.map(self._process_element, self.supported_types)\n\n\nclass project_path(_project_operation):\n \"\"\"\n Projects Polygons and Path Elements from their source coordinate\n reference system to the supplied projection.\n \"\"\"\n\n supported_types = [Polygons, Path, Contours, EdgePaths]\n\n def _project_path(self, element, path, data, boundary, geom_type, multi_type):\n \"\"\"\n Handle case of continuously varying path\n \"\"\"\n xdim, ydim = path.kdims[:2]\n xs, ys = (path.dimension_values(i) for i in range(2))\n if not len(xs):\n return []\n\n proj_arr = self.p.projection.quick_vertices_transform(\n np.column_stack([xs, ys]), element.crs)\n\n if proj_arr is None:\n vertices = np.column_stack([xs, ys])\n if hasattr(element.crs, '_bbox_and_offset'):\n vertices = wrap_path_data(vertices, element.crs, element.crs)\n path = geom_type(vertices)\n if boundary:\n path = path.intersection(boundary)\n if not path:\n return []\n proj = self.p.projection.project_geometry(path, element.crs)\n proj_arr = geom_to_array(proj)\n data[xdim.name] = proj_arr[:, 0]\n data[ydim.name] = proj_arr[:, 1]\n return [data]\n\n def _project_contour(self, element, contour, data, boundary, geom_type, multi_type):\n \"\"\"\n Handle case of iso-contour\n \"\"\"\n xdim, ydim = contour.kdims[:2]\n data = {k: vals[0] for k, vals in data.items()}\n\n # Wrap longitudes\n vertices = contour.array([0, 1])\n if hasattr(element.crs, '_bbox_and_offset'):\n vertices = wrap_path_data(vertices, element.crs, element.crs)\n element = type(element)([vertices])\n to_geom = polygon_to_geom if isinstance(element, Polygon) else path_to_geom\n\n # Clip path to projection boundaries\n geoms = []\n for g in to_geom(element, multi=False, skip_invalid=False):\n if np.isinf(np.array(g.array_interface_base['data'])).sum():\n # Skip if infinity in path\n continue\n try:\n # Compute boundary intersections\n if boundary:\n g = g.intersection(boundary)\n except:\n continue\n if is_multi_geometry(g):\n for p in g:\n try:\n geoms.append(geom_type(p))\n except:\n continue\n else:\n geoms.append(g)\n\n # Project geometry\n projected = []\n for g in geoms:\n proj = self.p.projection.project_geometry(g, contour.crs)\n proj = proj if is_multi_geometry(proj) else [proj]\n for geom in proj:\n vertices = np.array(geom.array_interface_base['data']).reshape(-1, 2)\n xs, ys = vertices.T\n if len(xs):\n projected.append(dict(data, **{xdim.name: xs, ydim.name: ys}))\n return projected\n\n def _project_geodataframe(self, element):\n geoms = element.split(datatype='geom')\n projected = [self.p.projection.project_geometry(geom, element.crs)\n for geom in geoms]\n new_data = element.data.copy()\n new_data['geometry'] = projected\n return element.clone(new_data, crs=self.p.projection)\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n elif element.interface.datatype == 'geodataframe':\n return self._project_geodataframe(element)\n\n boundary = element.crs.project_geometry(Polygon(self.p.projection.boundary),\n self.p.projection)\n\n\n if isinstance(element, Polygons):\n multi_type, geom_type = MultiPolygon, Polygon\n else:\n multi_type, geom_type = MultiLineString, LineString\n\n projected = []\n paths = element.split()\n for path in paths:\n data = {vd.name: path.dimension_values(vd, expanded=False) for vd in path.vdims}\n if any(len(vals) > 1 for vals in data.values()):\n projected += self._project_path(element, path, data, boundary, geom_type, multi_type)\n else:\n projected += self._project_contour(element, path, data, boundary, geom_type, multi_type)\n\n if len(paths) and len(projected) == 0:\n self.warning('While projecting a %s element from a %s coordinate '\n 'reference system (crs) to a %s projection none of '\n 'the projected paths were contained within the bounds '\n 'specified by the projection. Ensure you have specified '\n 'the correct coordinate system for your data.' %\n (type(element).__name__, type(element.crs).__name__,\n type(self.p.projection).__name__))\n\n return element.clone(projected, crs=self.p.projection)\n\n\nclass project_shape(_project_operation):\n \"\"\"\n Projects Shape Element from the source coordinate reference system\n to the supplied projection.\n \"\"\"\n\n supported_types = [Shape]\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n geom = element.geom()\n vertices = geom_to_array(geom)\n if isinstance(geom, (MultiPolygon, Polygon)):\n obj = Polygons([vertices])\n else:\n obj = Path([vertices])\n geom = project_path(obj, projection=self.p.projection).geom()\n return element.clone(geom, crs=self.p.projection)\n\n\nclass project_points(_project_operation):\n\n supported_types = [Points, Nodes, VectorField, HexTiles, Labels]\n\n def _process_element(self, element):\n if not len(element):\n return element.clone(crs=self.p.projection)\n xdim, ydim = element.dimensions()[:2]\n xs, ys = (element.dimension_values(i) for i in range(2))\n coordinates = self.p.projection.transform_points(element.crs, xs, ys)\n mask = np.isfinite(coordinates[:, 0])\n new_data = {k: v[mask] for k, v in element.columns().items()}\n new_data[xdim.name] = coordinates[mask, 0]\n new_data[ydim.name] = coordinates[mask, 1]\n datatype = [element.interface.datatype]+element.datatype\n\n if len(new_data[xdim.name]) == 0:\n self.warning('While projecting a %s element from a %s coordinate '\n 'reference system (crs) to a %s projection none of '\n 'the projected paths were contained within the bounds '\n 'specified by the projection. Ensure you have specified '\n 'the correct coordinate system for your data.' %\n (type(element).__name__, type(element.crs).__name__,\n type(self.p.projection).__name__))\n\n return element.clone(new_data, crs=self.p.projection,\n datatype=datatype)\n\n\nclass project_graph(_project_operation):\n\n supported_types = [Graph]\n\n def _process_element(self, element):\n nodes = project_points(element.nodes, projection=self.projection)\n data = (element.data, nodes)\n if element._edgepaths:\n data = data + (project_path(element.edgepaths, projection=self.projection),)\n return element.clone(data, crs=self.projection)\n\n\nclass project_quadmesh(_project_operation):\n\n supported_types = [QuadMesh]\n\n def _process_element(self, element):\n proj = self.p.projection\n irregular = any(element.interface.irregular(element, kd)\n for kd in element.kdims)\n zs = element.dimension_values(2, flat=False)\n if irregular:\n X, Y = [np.asarray(element.interface.coords(element, kd, expanded=True))\n for kd in element.kdims]\n else:\n X = element.dimension_values(0, expanded=True)\n Y = element.dimension_values(1, expanded=True)\n zs = zs.T\n\n coords = proj.transform_points(element.crs, X, Y)\n PX, PY = coords[..., 0], coords[..., 1]\n\n # Mask quads which are wrapping around the x-axis\n wrap_proj_types = (ccrs._RectangularProjection,\n ccrs._WarpedRectangularProjection,\n ccrs.InterruptedGoodeHomolosine,\n ccrs.Mercator)\n if isinstance(proj, wrap_proj_types):\n with np.errstate(invalid='ignore'):\n edge_lengths = np.hypot(\n np.diff(PX , axis=1),\n np.diff(PY, axis=1)\n )\n to_mask = (\n (edge_lengths >= abs(proj.x_limits[1] -\n proj.x_limits[0]) / 2) |\n np.isnan(edge_lengths)\n )\n if np.any(to_mask):\n mask = np.zeros(zs.shape, dtype=np.bool)\n mask[:, 1:][to_mask] = True\n mask[:, 2:][to_mask[:, :-1]] = True\n mask[:, :-1][to_mask] = True\n mask[:, :-2][to_mask[:, 1:]] = True\n mask[1:, 1:][to_mask[:-1]] = True\n mask[1:, :-1][to_mask[:-1]] = True\n mask[:-1, 1:][to_mask[1:]] = True\n mask[:-1, :-1][to_mask[1:]] = True\n zs[mask] = np.NaN\n\n params = get_param_values(element)\n if PX.ndim < 2:\n PX = PX.reshape(zs.shape)\n if PY.ndim < 2:\n PY = PY.reshape(zs.shape)\n return QuadMesh((PX, PY, zs), crs=self.projection, **params)\n\n\nclass project_image(_project_operation):\n \"\"\"\n Projects an geoviews Image to the specified projection,\n returning a regular HoloViews Image type. Works by\n regridding the data along projected bounds. Only supports\n rectangular projections.\n \"\"\"\n\n fast = param.Boolean(default=False, doc=\"\"\"\n Whether to enable fast reprojection with (much) better\n performance but poorer handling in polar regions.\"\"\")\n\n width = param.Integer(default=None, doc=\"\"\"\n Width of the reprojectd Image\"\"\")\n\n height = param.Integer(default=None, doc=\"\"\"\n Height of the reprojected Image\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying project_image, backends that support linked streams\n update RangeXY streams on the inputs of the operation.\"\"\")\n\n supported_types = [Image]\n\n def _process(self, img, key=None):\n if self.p.fast:\n return self._fast_process(img, key)\n proj = self.p.projection\n if proj == img.crs:\n return img\n x0, x1 = img.range(0)\n y0, y1 = img.range(1)\n xn, yn = img.interface.shape(img, gridded=True)[:2]\n px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),\n img.crs, proj)\n src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)\n arrays = []\n for vd in img.vdims:\n arr = img.dimension_values(vd, flat=False)\n if arr.size:\n projected, extents = warp_array(arr, proj, img.crs, (xn, yn),\n src_ext, trgt_ext)\n else:\n projected, extents = arr, trgt_ext\n arrays.append(projected)\n projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]\n data = np.flipud(projected)\n bounds = (extents[0], extents[2], extents[1], extents[3])\n return img.clone(data, bounds=bounds, kdims=img.kdims,\n vdims=img.vdims, crs=proj, xdensity=None,\n ydensity=None)\n\n def _fast_process(self, element, key=None):\n # Project coordinates\n proj = self.p.projection\n if proj == element.crs:\n return element\n\n h, w = element.interface.shape(element, gridded=True)[:2]\n xs = element.dimension_values(0)\n ys = element.dimension_values(1)\n if isinstance(element, RGB):\n rgb = element.rgb\n array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))\n for d in rgb.vdims])\n else:\n array = element.dimension_values(2, flat=False)\n\n (x0, y0, x1, y1) = element.bounds.lbrt()\n width = int(w) if self.p.width is None else self.p.width\n height = int(h) if self.p.height is None else self.p.height\n\n bounds = _determine_bounds(xs, ys, element.crs)\n yb = bounds['y']\n resampled = []\n xvalues = []\n for xb in bounds['x']:\n px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj)\n if len(bounds['x']) > 1:\n xfraction = (xb[1]-xb[0])/(x1-x0)\n fraction_width = int(width*xfraction)\n else:\n fraction_width = width\n xs = np.linspace(px0, px1, fraction_width)\n ys = np.linspace(py0, py1, height)\n cxs, cys = cartesian_product([xs, ys])\n\n pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T\n icxs = (((pxs-x0) / (x1-x0)) * w).astype(int)\n icys = (((pys-y0) / (y1-y0)) * h).astype(int)\n xvalues.append(xs)\n\n icxs[icxs<0] = 0\n icys[icys<0] = 0\n icxs[icxs>=w] = w-1\n icys[icys>=h] = h-1\n resampled_arr = array[icys, icxs]\n if isinstance(element, RGB):\n nvdims = len(element.vdims)\n resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2])\n else:\n resampled_arr = resampled_arr.reshape((fraction_width, height)).T\n resampled.append(resampled_arr)\n xs = np.concatenate(xvalues[::-1])\n resampled = np.hstack(resampled[::-1])\n datatypes = [element.interface.datatype, 'xarray', 'grid']\n data = (xs, ys)\n for i in range(len(element.vdims)):\n if resampled.ndim > 2:\n data = data + (resampled[::-1, :, i],)\n else:\n data = data + (resampled,)\n return element.clone(data, crs=proj, bounds=None, datatype=datatypes)\n\n\nclass project(Operation):\n \"\"\"\n Projects GeoViews Element types to the specified projection.\n \"\"\"\n\n projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,\n class_=ccrs.Projection,\n instantiate=False, doc=\"\"\"\n Projection the image type is projected to.\"\"\")\n\n _operations = [project_path, project_image, project_shape,\n project_graph, project_quadmesh, project_points]\n\n def _process(self, element, key=None):\n for op in self._operations:\n element = element.map(op.instance(projection=self.p.projection),\n op.supported_types)\n return element\n"
] | [
[
"numpy.flipud",
"numpy.zeros",
"numpy.diff",
"numpy.any",
"numpy.column_stack",
"numpy.asarray",
"numpy.errstate",
"numpy.hstack",
"numpy.dstack",
"numpy.isnan",
"numpy.array",
"numpy.concatenate",
"numpy.linspace",
"numpy.isfinite"
]
] |
necromuralist/resampling | [
"0b48a51cb5f8e21a3f52508ecc74f12fa03d9b25"
] | [
"resampling/foundations_for_inference/gender_discrimination.py"
] | [
"\n# pandas standard library\nimport sys\n\n# third-party\nimport pandas\nimport matplotlib\nimport matplotlib.pyplot as plot\n\nmatplotlib.style.use('ggplot')\n\nGENDER_COUNT = 24\nMALES_PROMOTED = 21\nFEMALES_PROMOTED = 14\nGENDER_DIFFERENCE = MALES_PROMOTED - FEMALES_PROMOTED\nFEMALES_NOT_PROMOTED = GENDER_COUNT - FEMALES_PROMOTED\nMALES_NOT_PROMOTED = GENDER_COUNT - MALES_PROMOTED\n\nexperiment_data = pandas.DataFrame({\"Promoted\": [MALES_PROMOTED,\n FEMALES_PROMOTED],\n \"Not Promoted\": [MALES_NOT_PROMOTED,\n FEMALES_NOT_PROMOTED]},\n index='male female'.split(),\n columns=[\"Promoted\", \"Not Promoted\"])\n\nexperiment_frame = experiment_data.copy()\nexperiment_frame['Total'] = sum((experiment_frame[column] for column in\n experiment_frame.columns))\nlast_row = pandas.DataFrame(experiment_frame.sum()).transpose()\nlast_row.index = pandas.Index(['Total'])\nexperiment_frame = pandas.concat((experiment_frame, last_row))\n\nclass IndentOutput(object):\n \"\"\"Fake file output for csv-writing \"\"\"\n @classmethod\n def write(cls, line):\n \"\"\"Write line to stdout with three spaces prepended\"\"\"\n sys.stdout.write(\" {0}\".format(line))\n\nprint('.. csv-table:: Experiment Outcome')\nprint(' :header: ,{0}\\n'.format(','.join(experiment_frame.columns)))\n\nexperiment_frame.to_csv(IndentOutput, header=False)\n\nprint('.. csv-table:: Experiment proportions')\nprint(' :header: ,{0}\\n'.format(','.join(experiment_frame.columns)))\n\ntotals = pandas.Series([GENDER_COUNT, GENDER_COUNT, GENDER_COUNT * 2],\n index='male female Total'.split())\ntotal_frame = pandas.DataFrame({'Promoted': totals,\n \"Not Promoted\": totals,\n \"Total\": totals})\nproportions = experiment_frame/total_frame\nproportions.to_csv(IndentOutput, header=False,\n columns=['Promoted', 'Not Promoted', 'Total'],\n float_format=\"%.3f\")\n\npath = 'figures/gender_experiment_bar.svg'\nfigure = plot.figure()\naxe = figure.gca()\nexperiment_data.plot(kind='bar', ax=axe)\nfigure.savefig(path)\nprint('.. image:: {0}'.format(path))\n\nprint(\" \\\\frac{{{0}}}{{{2}}}- \\\\frac{{{1}}}{{{2}}}&=\\\\frac{{{3}}}{{{2}}}\\\\\\\\\".format(MALES_PROMOTED,\n FEMALES_PROMOTED,\n GENDER_COUNT,\n GENDER_DIFFERENCE))\nprint(\" &\\\\approx {:.3f}\\\\\\\\\".format(GENDER_DIFFERENCE/GENDER_COUNT))\n"
] | [
[
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.style.use",
"pandas.concat",
"pandas.Index"
]
] |
hugeinteger/InterFaceGAN | [
"59e75c0b4dcdbcea693b31ff11cf239c39e14ed1"
] | [
"utils/manipulator.py"
] | [
"# python3.7\n\"\"\"Utility functions for latent codes manipulation.\"\"\"\n\nimport numpy as np\nfrom sklearn import svm\n\nfrom .logger import setup_logger\n\n__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']\n\n\ndef train_boundary(latent_codes,\n scores,\n chosen_num_or_ratio=0.02,\n split_ratio=0.7,\n invalid_value=None,\n logger=None):\n \"\"\"Trains boundary in latent space with offline predicted attribute scores.\n\n Given a collection of latent codes and the attribute scores predicted from the\n corresponding images, this function will train a linear SVM by treating it as\n a bi-classification problem. Basically, the samples with highest attribute\n scores are treated as positive samples, while those with lowest scores as\n negative. For now, the latent code can ONLY be with 1 dimension.\n\n NOTE: The returned boundary is with shape (1, latent_space_dim), and also\n normalized with unit norm.\n\n Args:\n latent_codes: Input latent codes as training data.\n scores: Input attribute scores used to generate training labels.\n chosen_num_or_ratio: How many samples will be chosen as positive (negative)\n samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *\n latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,\n 0.5 * latent_codes_num)` will be used. (default: 0.02)\n split_ratio: Ratio to split training and validation sets. (default: 0.7)\n invalid_value: This field is used to filter out data. (default: None)\n logger: Logger for recording log messages. If set as `None`, a default\n logger, which prints messages from all levels to screen, will be created.\n (default: None)\n\n Returns:\n A decision boundary with type `numpy.ndarray`.\n\n Raises:\n ValueError: If the input `latent_codes` or `scores` are with invalid format.\n \"\"\"\n if not logger:\n logger = setup_logger(work_dir='', logger_name='train_boundary')\n\n if (not isinstance(latent_codes, np.ndarray) or\n not len(latent_codes.shape) == 2):\n raise ValueError(f'Input `latent_codes` should be with type'\n f'`numpy.ndarray`, and shape [num_samples, '\n f'latent_space_dim]!')\n num_samples = latent_codes.shape[0]\n latent_space_dim = latent_codes.shape[1]\n if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or\n not scores.shape[0] == num_samples or not scores.shape[1] == 1):\n raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '\n f'shape [num_samples, 1], where `num_samples` should be '\n f'exactly same as that of input `latent_codes`!')\n if chosen_num_or_ratio <= 0:\n raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '\n f'but {chosen_num_or_ratio} received!')\n\n logger.info(f'Filtering training data.')\n if invalid_value is not None:\n latent_codes = latent_codes[scores != invalid_value]\n scores = scores[scores != invalid_value]\n\n logger.info(f'Sorting scores to get positive and negative samples.')\n sorted_idx = np.argsort(scores, axis=0)[::-1, 0]\n latent_codes = latent_codes[sorted_idx]\n scores = scores[sorted_idx]\n num_samples = latent_codes.shape[0]\n if 0 < chosen_num_or_ratio <= 1:\n chosen_num = int(num_samples * chosen_num_or_ratio)\n else:\n chosen_num = chosen_num_or_ratio\n chosen_num = min(chosen_num, num_samples // 2)\n\n logger.info(f'Spliting training and validation sets:')\n train_num = int(chosen_num * split_ratio)\n val_num = chosen_num - train_num\n # Positive samples.\n positive_idx = np.arange(chosen_num)\n np.random.shuffle(positive_idx)\n positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]\n positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]\n # Negative samples.\n negative_idx = np.arange(chosen_num)\n np.random.shuffle(negative_idx)\n negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]\n negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]\n # Training set.\n train_data = np.concatenate([positive_train, negative_train], axis=0)\n train_label = np.concatenate([np.ones(train_num, dtype=np.int),\n np.zeros(train_num, dtype=np.int)], axis=0)\n logger.info(f' Training: {train_num} positive, {train_num} negative.')\n # Validation set.\n val_data = np.concatenate([positive_val, negative_val], axis=0)\n val_label = np.concatenate([np.ones(val_num, dtype=np.int),\n np.zeros(val_num, dtype=np.int)], axis=0)\n logger.info(f' Validation: {val_num} positive, {val_num} negative.')\n # Remaining set.\n remaining_num = num_samples - chosen_num * 2\n remaining_data = latent_codes[chosen_num:-chosen_num]\n remaining_scores = scores[chosen_num:-chosen_num]\n decision_value = (scores[0] + scores[-1]) / 2\n remaining_label = np.ones(remaining_num, dtype=np.int)\n remaining_label[remaining_scores.ravel() < decision_value] = 0\n remaining_positive_num = np.sum(remaining_label == 1)\n remaining_negative_num = np.sum(remaining_label == 0)\n logger.info(f' Remaining: {remaining_positive_num} positive, '\n f'{remaining_negative_num} negative.')\n\n logger.info(f'Training boundary.')\n clf = svm.SVC(kernel='linear')\n classifier = clf.fit(train_data, train_label)\n logger.info(f'Finish training.')\n\n if val_num:\n val_prediction = classifier.predict(val_data)\n correct_num = np.sum(val_label == val_prediction)\n logger.info(f'Accuracy for validation set: '\n f'{correct_num} / {val_num * 2} = '\n f'{correct_num / (val_num * 2):.6f}')\n\n if remaining_num:\n remaining_prediction = classifier.predict(remaining_data)\n correct_num = np.sum(remaining_label == remaining_prediction)\n logger.info(f'Accuracy for remaining set: '\n f'{correct_num} / {remaining_num} = '\n f'{correct_num / remaining_num:.6f}')\n\n a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)\n return a / np.linalg.norm(a)\n\n\ndef project_boundary(primal, *args):\n \"\"\"Projects the primal boundary onto condition boundaries.\n\n The function is used for conditional manipulation, where the projected vector\n will be subscribed from the normal direction of the original boundary. Here,\n all input boundaries are supposed to have already been normalized to unit\n norm, and with same shape [1, latent_space_dim].\n\n NOTE: For now, at most two condition boundaries are supported.\n\n Args:\n primal: The primal boundary.\n *args: Other boundaries as conditions.\n\n Returns:\n A projected boundary (also normalized to unit norm), which is orthogonal to\n all condition boundaries.\n\n Raises:\n NotImplementedError: If there are more than two condition boundaries.\n \"\"\"\n if len(args) > 2:\n raise NotImplementedError(f'This function supports projecting with at most '\n f'two conditions.')\n assert len(primal.shape) == 2 and primal.shape[0] == 1\n\n if not args:\n return primal\n if len(args) == 1:\n cond = args[0]\n assert (len(cond.shape) == 2 and cond.shape[0] == 1 and\n cond.shape[1] == primal.shape[1])\n new = primal - primal.dot(cond.T) * cond\n return new / np.linalg.norm(new)\n if len(args) == 2:\n cond_1 = args[0]\n cond_2 = args[1]\n assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and\n cond_1.shape[1] == primal.shape[1])\n assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and\n cond_2.shape[1] == primal.shape[1])\n primal_cond_1 = primal.dot(cond_1.T)\n primal_cond_2 = primal.dot(cond_2.T)\n cond_1_cond_2 = cond_1.dot(cond_2.T)\n alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / (\n 1 - cond_1_cond_2 ** 2 + 1e-8)\n beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / (\n 1 - cond_1_cond_2 ** 2 + 1e-8)\n new = primal - alpha * cond_1 - beta * cond_2\n return new / np.linalg.norm(new)\n\n raise NotImplementedError\n\n\ndef linear_interpolate(latent_code,\n boundary,\n start_distance=-3.0,\n end_distance=3.0,\n steps=10):\n \"\"\"Manipulates the given latent code with respect to a particular boundary.\n\n Basically, this function takes a latent code and a boundary as inputs, and\n outputs a collection of manipulated latent codes. For example, let `steps` to\n be 10, then the input `latent_code` is with shape [1, latent_space_dim], input\n `boundary` is with shape [1, latent_space_dim] and unit norm, the output is\n with shape [10, latent_space_dim]. The first output latent code is\n `start_distance` away from the given `boundary`, while the last output latent\n code is `end_distance` away from the given `boundary`. Remaining latent codes\n are linearly interpolated.\n\n Input `latent_code` can also be with shape [1, num_layers, latent_space_dim]\n to support W+ space in Style GAN. In this case, all features in W+ space will\n be manipulated same as each other. Accordingly, the output will be with shape\n [10, num_layers, latent_space_dim].\n\n NOTE: Distance is sign sensitive.\n\n Args:\n latent_code: The input latent code for manipulation.\n boundary: The semantic boundary as reference.\n start_distance: The distance to the boundary where the manipulation starts.\n (default: -3.0)\n end_distance: The distance to the boundary where the manipulation ends.\n (default: 3.0)\n steps: Number of steps to move the latent code from start position to end\n position. (default: 10)\n \"\"\"\n assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and\n len(boundary.shape) == 2 and\n boundary.shape[1] == latent_code.shape[-1])\n\n linspace = np.linspace(start_distance, end_distance, steps)\n if len(latent_code.shape) == 2:\n linspace = linspace - latent_code.dot(boundary.T)\n linspace = linspace.reshape(-1, 1).astype(np.float32)\n return latent_code + linspace * boundary\n if len(latent_code.shape) == 3:\n linspace = linspace.reshape(-1, 1, 1).astype(np.float32)\n return latent_code + linspace * boundary.reshape(1, 1, -1)\n raise ValueError(f'Input `latent_code` should be with shape '\n f'[1, latent_space_dim] or [1, N, latent_space_dim] for '\n f'W+ space in Style GAN!\\n'\n f'But {latent_code.shape} is received.')\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.random.shuffle",
"numpy.linalg.norm",
"sklearn.svm.SVC",
"numpy.zeros",
"numpy.argsort",
"numpy.arange",
"numpy.concatenate",
"numpy.linspace"
]
] |
NREL/reVX | [
"4d62eb2c003c3b53b959f7a58bdc342d18098884"
] | [
"reVX/least_cost_xmission/least_cost_xmission.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModule to compute least cost xmission paths, distances, and costs one or\nmore SC points\n\"\"\"\nfrom concurrent.futures import as_completed\nimport geopandas as gpd\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nfrom pyproj.crs import CRS\nimport rasterio\nfrom scipy.spatial import cKDTree\nfrom shapely.geometry import Point\nimport time\n\nfrom reV.handlers.exclusions import ExclusionLayers\nfrom reV.supply_curve.points import SupplyCurveExtent\nfrom rex.utilities.execution import SpawnProcessPool\nfrom rex.utilities.loggers import log_mem\n\nfrom reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT,\n SINK_CAT, SUBSTATION_CAT)\nfrom reVX.least_cost_xmission.least_cost_paths import LeastCostPaths\nfrom reVX.least_cost_xmission.trans_cap_costs import TransCapCosts\n\nlogger = logging.getLogger(__name__)\n\n\nclass LeastCostXmission(LeastCostPaths):\n \"\"\"\n Compute Least Cost tie-line paths and full transmission cap cost\n for all possible connections to all supply curve points\n -\n \"\"\"\n REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions']\n\n def __init__(self, cost_fpath, features_fpath, resolution=128,\n xmission_config=None):\n \"\"\"\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n resolution : int, optional\n SC point resolution, by default 128\n xmission_config : str | dict | XmissionConfig, optional\n Path to Xmission config .json, dictionary of Xmission config\n .jsons, or preloaded XmissionConfig objects, by default None\n \"\"\"\n self._check_layers(cost_fpath)\n self._config = TransCapCosts._parse_config(\n xmission_config=xmission_config)\n\n (self._sc_points, self._features,\n self._sub_lines_mapping, self._shape) =\\\n self._map_to_costs(cost_fpath, features_fpath,\n resolution=resolution)\n self._cost_fpath = cost_fpath\n self._tree = None\n self._sink_coords = None\n self._min_line_len = (resolution * 0.09) / 2\n\n logger.debug('{} initialized'.format(self))\n\n def __repr__(self):\n msg = (\"{} to be computed for {} sc_points and {} features\"\n .format(self.__class__.__name__,\n len(self.sc_points),\n len(self.features)))\n\n return msg\n\n @property\n def sc_points(self):\n \"\"\"\n Table of supply curve points\n\n Returns\n -------\n gpd.GeoDataFrame\n \"\"\"\n return self._sc_points\n\n @property\n def features(self):\n \"\"\"\n Table of features to compute paths for\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return self._features\n\n @property\n def sub_lines_mapping(self):\n \"\"\"\n Series mapping substations to the transmission lines connected\n to each substation\n\n Returns\n -------\n pandas.Series\n \"\"\"\n return self._sub_lines_mapping\n\n @property\n def sink_coords(self):\n \"\"\"\n Inf sink coordinates (row, col)\n\n Returns\n -------\n ndarray\n \"\"\"\n if self._sink_coords is None:\n mask = self.features['category'] == SINK_CAT\n self._sink_coords = self.features.loc[mask, ['row', 'col']].values\n\n return self._sink_coords\n\n @property\n def sink_tree(self):\n \"\"\"\n cKDTree for infinite sinks\n\n Returns\n -------\n cKDTree\n \"\"\"\n if self._tree is None:\n self._tree = cKDTree(self.sink_coords)\n\n return self._tree\n\n @staticmethod\n def _load_trans_feats(features_fpath):\n \"\"\"\n Load existing transmission features from disk. Substations will be\n loaded from cache file if it exists\n\n Parameters\n ----------\n features_fpath : str\n Path to geopackage with trans features\n\n Returns\n -------\n features : gpd.GeoDataFrame\n DataFrame of transmission features\n sub_line_map : pandas.Series\n Mapping of sub-station trans_gid to connected tranmission line\n trans_gids\n \"\"\"\n logger.debug('Loading transmission features')\n features = gpd.read_file(features_fpath)\n features = features.drop(columns=['bgid', 'egid', 'cap_left'],\n errors='ignore')\n mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'}\n features = features.rename(columns=mapping)\n\n features['min_volts'] = 0\n features['max_volts'] = 0\n\n # Transmission lines\n mask = features['category'] == TRANS_LINE_CAT\n voltage = features.loc[mask, 'voltage'].values\n features.loc[mask, 'min_volts'] = voltage\n features.loc[mask, 'max_volts'] = voltage\n\n # Load Center and Sinks\n mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT])\n features.loc[mask, 'min_volts'] = 1\n features.loc[mask, 'max_volts'] = 9999\n\n sub_lines_map = {}\n mask = features['category'] == SUBSTATION_CAT\n bad_subs = np.zeros(len(features), dtype=bool)\n for idx, row in features.loc[mask].iterrows():\n gid = row['trans_gid']\n lines = row['trans_line_gids']\n if isinstance(lines, str):\n lines = json.loads(lines)\n\n sub_lines_map[gid] = lines\n lines_mask = features['trans_gid'].isin(lines)\n voltage = features.loc[lines_mask, 'voltage'].values\n\n if np.max(voltage) >= 69:\n features.loc[idx, 'min_volts'] = np.min(voltage)\n features.loc[idx, 'max_volts'] = np.max(voltage)\n else:\n bad_subs[idx] = True\n\n if any(bad_subs):\n msg = (\"The following sub-stations do not have the minimum \"\n \"required voltage of 69 kV and will be dropped:\\n{}\"\n .format(features.loc[bad_subs, 'trans_gid']))\n logger.warning(msg)\n features = features.loc[~bad_subs].reset_index(drop=True)\n\n return features, pd.Series(sub_lines_map)\n\n @staticmethod\n def _create_sc_points(cost_fpath, resolution=128):\n \"\"\"\n Load SC points, covert row/col to array wide, and determine x/y for\n reV projection\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n resolution : int, optional\n SC point resolution, by default 128\n\n Returns\n sc_points : gpd.GeoDataFrame\n SC points\n \"\"\"\n logger.debug('Loading Supply Curve Points')\n sce = SupplyCurveExtent(cost_fpath, resolution=resolution)\n sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind',\n 'col_ind': 'sc_col_ind'})\n shape = sce.excl_shape\n sc_points['sc_point_gid'] = sc_points.index.values\n\n row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2)\n row = np.where(row >= shape[0], shape[0] - 1, row)\n sc_points['row'] = row.astype(int)\n\n col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2)\n col = np.where(col >= shape[1], shape[1] - 1, col)\n sc_points['col'] = col.astype(int)\n\n return sc_points\n\n @staticmethod\n def _get_feature_cost_indices(features, crs, transform, shape):\n \"\"\"\n Map features to cost row, col indicies using rasterio transform\n\n Parameters\n ----------\n features : gpd.GeoDataFrame\n GeoDataFrame of features to map to cost raster\n crs : pyproj.crs.CRS\n CRS of cost raster\n transform : raster.Affine\n Transform of cost raster\n shape : tuple\n Cost raster shape\n\n Returns\n -------\n row : ndarray\n Vector of row indicies for each feature\n col : ndarray\n Vector of col indicies for each features\n mask : ndarray\n Boolean mask of features with indicies outside of cost raster\n \"\"\"\n row, col, mask = super(LeastCostXmission,\n LeastCostXmission)._get_feature_cost_indices(\n features, crs, transform, shape)\n\n t_lines = features['category'] == TRANS_LINE_CAT\n mask |= t_lines\n\n row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0)\n row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines],\n shape[0] - 1)\n col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0)\n col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines],\n shape[1] - 1)\n\n return row, col, mask\n\n @classmethod\n def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128):\n \"\"\"\n Map supply curve points and transmission features to cost array pixel\n indices\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n resolution : int, optional\n SC point resolution, by default 128\n\n Returns\n -------\n sc_point : gpd.GeoDataFrame\n Table of supply curve points to connect to tranmission\n features : gpd.GeoDataFrame\n Table of transmission features\n sub_lines_map : pandas.Series\n Series mapping substations to the transmission lines connected\n to each substation\n \"\"\"\n with ExclusionLayers(cost_fpath) as f:\n crs = CRS.from_string(f.crs)\n transform = rasterio.Affine(*f.profile['transform'])\n shape = f.shape\n regions = f['ISO_regions']\n\n features, sub_lines_map = cls._load_trans_feats(features_fpath)\n row, col, mask = cls._get_feature_cost_indices(features, crs,\n transform, shape)\n if any(~mask):\n msg = (\"The following features are outside of the cost exclusion \"\n \"domain and will be dropped:\\n{}\"\n .format(features.loc[~mask, 'trans_gid']))\n logger.warning(msg)\n row = row[mask]\n col = col[mask]\n features = features.loc[mask].reset_index(drop=True)\n\n features['row'] = row\n features['col'] = col\n features['region'] = regions[row, col]\n\n logger.debug('Converting SC points to GeoDataFrame')\n sc_points = cls._create_sc_points(cost_fpath, resolution=resolution)\n x, y = rasterio.transform.xy(transform, sc_points['row'].values,\n sc_points['col'].values)\n geo = [Point(xy) for xy in zip(x, y)]\n sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs,\n geometry=geo)\n\n return sc_points, features, sub_lines_map, shape\n\n def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2,\n clipping_buffer=1.05):\n \"\"\"\n Clip costs raster to AOI around SC point, and get substations,\n load centers, and sinks within the clipped region.\n\n Parameters\n ----------\n sc_point : gpd.GeoSeries\n SC point to clip raster around\n nn_sinks : int, optional\n Number of nearest neighbor sinks to clip to\n clipping_buffer : float, optional\n Buffer to increase clipping radius by, by default 1.05\n\n Returns\n -------\n radius : int\n Clipping radius in cost raster pixels\n x_feats : pd.DataFrame\n Substatations, load centers, sinks, and nearest points on t-lines\n to SC point\n \"\"\"\n logger.debug('Clipping features to sc_point {}'.format(sc_point.name))\n if len(self.sink_coords) > 2:\n row, col = sc_point[['row', 'col']].values\n _, pos = self.sink_tree.query([row, col], k=nn_sinks)\n radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max()\n radius = int(np.ceil(radius * clipping_buffer))\n\n logger.debug('Radius to {} nearest sink is: {}'\n .format(nn_sinks, radius))\n row_min = max(row - radius, 0)\n row_max = min(row + radius, self._shape[0])\n col_min = max(col - radius, 0)\n col_max = min(col + radius, self._shape[1])\n logger.debug('Extracting all transmission features in the row '\n 'slice {}:{} and column slice {}:{}'\n .format(row_min, row_max, col_min, col_max))\n\n # Clip transmission features\n mask = self.features['row'] >= row_min\n mask &= self.features['row'] < row_max\n mask &= self.features['col'] >= col_min\n mask &= self.features['col'] < col_max\n sc_features = self.features.loc[mask].copy(deep=True)\n logger.debug('{} transmission features found in clipped area with '\n 'radius {}'\n .format(len(sc_features), radius))\n else:\n radius = None\n sc_features = self.features.copy(deep=True)\n\n mask = self.features['max_volts'] >= tie_line_voltage\n sc_features = sc_features.loc[mask].copy(deep=True)\n logger.debug('{} transmission features found in clipped area with '\n 'minimum max voltage of {}'\n .format(len(sc_features), tie_line_voltage))\n\n # Find t-lines connected to substations within clip\n logger.debug('Collecting transmission lines connected to substations')\n mask = sc_features['category'] == SUBSTATION_CAT\n if mask.any():\n trans_gids = sc_features.loc[mask, 'trans_gid'].values\n trans_gids = \\\n np.concatenate(self.sub_lines_mapping.loc[trans_gids].values)\n trans_gids = np.unique(trans_gids)\n line_mask = self.features['trans_gid'].isin(trans_gids)\n trans_lines = self.features.loc[line_mask].copy(deep=True)\n line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid'])\n trans_lines = trans_lines.loc[~line_mask]\n logger.debug('Adding all {} transmission lines connected to '\n 'substations with minimum max voltage of {}'\n .format(len(trans_lines), tie_line_voltage))\n sc_features = sc_features.append(trans_lines)\n\n return sc_features, radius\n\n def process_sc_points(self, capacity_class, sc_point_gids=None, nn_sinks=2,\n clipping_buffer=1.05, barrier_mult=100,\n max_workers=None):\n \"\"\"\n Compute Least Cost Tranmission for desired sc_points\n\n Parameters\n ----------\n capacity_class : str | int\n Capacity class of transmission features to connect supply curve\n points to\n sc_point_gids : list, optional\n List of sc_point_gids to connect to, by default None\n nn_sinks : int, optional\n Number of nearest neighbor sinks to use for clipping radius\n calculation, by default 2\n clipping_buffer : float, optional\n Buffer to expand clipping radius by, by default 1.05\n barrier_mult : int, optional\n Tranmission barrier multiplier, used when computing the least\n cost tie-line path, by default 100\n max_workers : int, optional\n Number of workers to use for processing, if 1 run in serial,\n if None use all available cores, by default None\n\n Returns\n -------\n least_costs : pandas.DataFrame\n Least cost connections between all supply curve points and the\n transmission features with the given capacity class that are within\n \"nn_sink\" nearest infinite sinks\n \"\"\"\n max_workers = os.cpu_count() if max_workers is None else max_workers\n\n if sc_point_gids is None:\n sc_point_gids = self.sc_points['sc_point_gid'].values\n\n tie_line_voltage = self._config.capacity_to_kv(capacity_class)\n least_costs = []\n if max_workers > 1:\n logger.info('Computing Least Cost Transmission for SC points in '\n 'parallel on {} workers'.format(max_workers))\n loggers = [__name__, 'reV', 'reVX']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n for _, sc_point in self.sc_points.iterrows():\n gid = sc_point['sc_point_gid']\n if gid in sc_point_gids:\n sc_features, radius = self._clip_to_sc_point(\n sc_point, tie_line_voltage, nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer)\n\n future = exe.submit(TransCapCosts.run,\n self._cost_fpath,\n sc_point.copy(deep=True),\n sc_features, capacity_class,\n radius=radius,\n xmission_config=self._config,\n barrier_mult=barrier_mult,\n min_line_length=self._min_line_len)\n futures.append(future)\n\n for i, future in enumerate(as_completed(futures)):\n sc_costs = future.result()\n if sc_costs is not None:\n least_costs.append(sc_costs)\n\n logger.debug('SC point {} of {} complete!'\n .format(i + 1, len(futures)))\n log_mem(logger)\n\n else:\n logger.info('Computing Least Cost Transmission for SC points in '\n 'serial')\n i = 1\n for _, sc_point in self.sc_points.iterrows():\n gid = sc_point['sc_point_gid']\n if gid in sc_point_gids:\n sc_features, radius = self._clip_to_sc_point(\n sc_point, tie_line_voltage, nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer)\n\n sc_costs = TransCapCosts.run(\n self._cost_fpath, sc_point.copy(deep=True),\n sc_features, capacity_class,\n radius=radius,\n xmission_config=self._config,\n barrier_mult=barrier_mult,\n min_line_length=self._min_line_len)\n\n if sc_costs is not None:\n least_costs.append(sc_costs)\n\n logger.debug('SC point {} of {} complete!'\n .format(i, len(sc_point_gids)))\n log_mem(logger)\n i += 1\n\n least_costs = pd.concat(least_costs).sort_values(['sc_point_gid',\n 'trans_gid'])\n capacity_class = self._config._parse_cap_class(capacity_class)\n least_costs['max_cap'] = self._config['power_classes'][capacity_class]\n lcp_frac = (len(least_costs['sc_point_gid'].unique())\n / len(sc_point_gids) * 100)\n logger.info('{:.4f}% of requested sc point gids were succesfully '\n 'mapped to transmission features'.format(lcp_frac))\n\n return least_costs.reset_index(drop=True)\n\n @classmethod\n def run(cls, cost_fpath, features_fpath, capacity_class, resolution=128,\n xmission_config=None, sc_point_gids=None, nn_sinks=2,\n clipping_buffer=1.05, barrier_mult=100, max_workers=None):\n \"\"\"\n Find Least Cost Tranmission connections between desired sc_points to\n given tranmission features for desired capacity class\n\n Parameters\n ----------\n cost_fpath : str\n Path to h5 file with cost rasters and other required layers\n features_fpath : str\n Path to geopackage with transmission features\n capacity_class : str | int\n Capacity class of transmission features to connect supply curve\n points to\n resolution : int, optional\n SC point resolution, by default 128\n xmission_config : str | dict | XmissionConfig, optional\n Path to Xmission config .json, dictionary of Xmission config\n .jsons, or preloaded XmissionConfig objects, by default None\n sc_point_gids : list, optional\n List of sc_point_gids to connect to, by default None\n nn_sinks : int, optional\n Number of nearest neighbor sinks to use for clipping radius\n calculation, by default 2\n clipping_buffer : float, optional\n Buffer to expand clipping radius by, by default 1.05\n barrier_mult : int, optional\n Tranmission barrier multiplier, used when computing the least\n cost tie-line path, by default 100\n max_workers : int, optional\n Number of workers to use for processing, if 1 run in serial,\n if None use all available cores, by default None\n\n Returns\n -------\n least_costs : pandas.DataFrame\n Least cost connections between all supply curve points and the\n transmission features with the given capacity class that are within\n \"nn_sink\" nearest infinite sinks\n \"\"\"\n ts = time.time()\n lcx = cls(cost_fpath, features_fpath, resolution=resolution,\n xmission_config=xmission_config)\n least_costs = lcx.process_sc_points(capacity_class,\n sc_point_gids=sc_point_gids,\n nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer,\n barrier_mult=barrier_mult,\n max_workers=max_workers)\n\n logger.info('{} connections were made to {} SC points in {:.4f} '\n 'minutes'\n .format(len(least_costs),\n len(least_costs['sc_point_gid'].unique()),\n (time.time() - ts) / 60))\n\n return least_costs\n"
] | [
[
"pandas.Series",
"numpy.ceil",
"numpy.where",
"scipy.spatial.cKDTree",
"numpy.max",
"numpy.min",
"pandas.concat",
"numpy.round",
"numpy.concatenate",
"numpy.array",
"numpy.unique"
]
] |
Nikoula86/organoidSegment | [
"b5d00256c15302ccd76b8b7a412852750476504b"
] | [
"morgana/GUIs/fluo.py"
] | [
"from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,\n QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas \nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport numpy as np\nimport warnings, os, time\nfrom skimage.io import imsave\nimport scipy.ndimage as ndi\nfrom matplotlib.figure import Figure\nfrom scipy.interpolate import interp1d\nimport matplotlib as mpl\nwarnings.filterwarnings(\"ignore\")\nfrom matplotlib import rc\nrc('font', size=12)\nrc('font', family='Arial')\n# rc('font', serif='Times')\nrc('pdf', fonttype=42)\n# rc('text', usetex=True)\n\n\nclass profileAP_condMode(QWidget):\n def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):\n super(profileAP_condMode, self).__init__(parent)\n\n self.data_all = data_all\n self.channel = channel\n self.colors = colors\n self.profileType = profileType\n self.ylabel = ylabel\n\n self.make()\n\n def make(self):\n self.figure = Figure(figsize=(4, 2.5), dpi=100)\n self.canvas = FigureCanvas(self.figure)\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel(self.ylabel)\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n self.canvas.draw()\n\n self.YnormBtn = QComboBox()\n self.YnormBtn.addItem('No normalization')\n self.YnormBtn.addItem('Global percentile')\n self.YnormBtn.addItem('Group percentile')\n self.YnormBtn.addItem('Folder percentile')\n self.YnormBtn.addItem('Manual')\n\n self.XnormBtn = QCheckBox('')\n self.XnormBtn.setChecked(False)\n self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)\n\n self.bckgBtn = QComboBox()\n self.bckgBtn.addItem('None')\n self.bckgBtn.addItem('Background')\n self.bckgBtn.addItem('Minimum')\n\n self.orientationBtn = QComboBox()\n self.orientationBtn.addItem('Signal based')\n self.orientationBtn.addItem('NO')\n\n self.alignmentBtn = QComboBox()\n self.alignmentBtn.addItem('Left')\n self.alignmentBtn.addItem('Right')\n self.alignmentBtn.addItem('Center')\n\n self.groupSelection = self.makeGroupSelectionBtns()\n\n self.applyBtn = QPushButton('Apply Settings')\n self.applyBtn.clicked.connect(self.remakePlot)\n\n lay = QGridLayout(self)\n lay.setSpacing(10)\n lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)\n lay.addWidget(self.canvas,1,0,1,2)\n lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)\n lay.addWidget(self.bckgBtn,2,1,1,1)\n lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)\n lay.addWidget(self.YnormBtn,4,1,1,1)\n lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)\n lay.addWidget(self.XnormBtn,5,1,1,1)\n lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)\n lay.addWidget(self.orientationBtn,6,1,1,1)\n lay.addWidget(QLabel('Alignment:'),7,0,1,1)\n lay.addWidget(self.alignmentBtn,7,1,1,1)\n lay.addWidget(self.groupSelection,8,0,1,2)\n lay.addWidget(self.applyBtn,9,0,1,2)\n\n self.remakePlot()\n\n self.setWindowTitle('Channel')\n QApplication.setStyle('Fusion')\n\n def onCheckingXnormBtn(self):\n if self.XnormBtn.isChecked():\n self.alignmentBtn.setEnabled(False)\n else:\n self.alignmentBtn.setEnabled(True)\n\n def makeGroupSelectionBtns(self):\n group = QGroupBox(\"Groups to plot\")\n self.groupPlotBtn = []\n for i in range(len(self.data_all)):\n self.groupPlotBtn.append(QCheckBox('Group '+str(i)))\n self.groupPlotBtn[-1].setChecked(True)\n \n self.legendBtn = QCheckBox('Legend')\n self.legendBtn.setChecked(False)\n\n self.rawBtn = QCheckBox('Plot raw data')\n self.rawBtn.setChecked(True)\n\n lay = QGridLayout()\n for i in range(len(self.data_all)):\n lay.addWidget(self.groupPlotBtn[i],i,0,1,1)\n lay.addWidget(self.legendBtn,0,1,1,1)\n lay.addWidget(self.rawBtn,1,1,1,1)\n\n group.setLayout(lay)\n return group\n\n def remakePlot(self):\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel(self.ylabel)\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n\n n_groups = len(self.data_all)\n n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]\n n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]\n\n # rearrange dataset\n profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])\n # subtract background or not\n if self.bckgBtn.currentText() == 'Background':\n profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]\n if self.bckgBtn.currentText() == 'Minimum':\n profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])\n\n # normalize fluorescence intensity accordingly\n if self.YnormBtn.currentText() == 'Global percentile':\n flat = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat.append(l)\n percs = np.percentile(np.array(flat),(.3,99.7))\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)\n elif self.YnormBtn.currentText() == 'Group percentile':\n flat = [[]for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i].append(l)\n percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)\n elif self.YnormBtn.currentText() == 'Folder percentile':\n flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i][j].append(l)\n percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i][j])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)\n \n # normalize AP axis if necessary\n if self.XnormBtn.isChecked():\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = profiles_all[i][j][k]\n x = np.linspace(0,1,len(profile))\n fun = interp1d(x,profile)\n new_x = np.linspace(0,1,101)\n profiles_all[i][j][k] = fun(new_x)\n\n # compute length of longest gastruloid\n max_length = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n max_length.append(len(profiles_all[i][j][k]))\n max_length = np.max(max_length)\n\n # orient plots according to setting\n if self.orientationBtn.currentText() == 'Signal based':\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]\n n_p = len(y)\n if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):\n profiles_all[i][j][k] = profiles_all[i][j][k][::-1]\n\n # pad array to the right or left\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n w = max_length-len(profiles_all[i][j][k])\n if self.alignmentBtn.currentText() == 'Left':\n pad_width = (0,w)\n if self.alignmentBtn.currentText() == 'Right':\n pad_width = (w,0)\n elif self.alignmentBtn.currentText() == 'Center':\n if 2*int(w/2)==w:\n pad_width = (int(w/2),int(w/2))\n else:\n pad_width = (int(w/2)+1,int(w/2))\n profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)\n\n ### make plot\n lines = []\n for i in range(n_groups):\n # plot this group only if the button is checked\n if self.groupPlotBtn[i].isChecked():\n ydata_group = []\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n ydata_group.append(profiles_all[i][j][k])\n # plot the raw data if the button is checked\n if self.rawBtn.isChecked():\n ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)\n # compute and plot mean and std\n max_length = np.max([len(d) for d in ydata_group])\n _mean = np.zeros(max_length)\n _std = np.zeros(max_length)\n for j in range(max_length):\n datapoint = []\n for data in ydata_group:\n datapoint.append(data[j])\n _mean[j] = np.nanmean(datapoint)\n _std[j] = np.nanstd(datapoint)\n line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]\n ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')\n lines.append(line)\n \n # adjust axes lims\n ax.set_ylim(0,None)\n ax.set_xlim(0,None)\n if self.XnormBtn.isChecked():\n ax.set_xlim(0,100)\n if self.YnormBtn.currentText() != 'No normalization':\n ax.set_ylim(0,1)\n\n # add legend\n if self.legendBtn.isChecked():\n l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])\n l.get_frame().set_linewidth(0.0)\n\n self.canvas.draw()\n\nclass profileAP_tlMode(QWidget):\n #############\n # TO BE IMPLEMENTED!!!\n #############\n def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None):\n super(profileAP_tlMode, self).__init__(parent)\n\n self.data_all = data_all\n self.n_groups = len(data_all)\n self.channel = channel\n self.colors = colors\n self.profileType = profileType\n\n self.make()\n\n def make(self):\n\n self.figure = Figure(figsize=(4, 2.5), dpi=100)\n self.canvas = FigureCanvas(self.figure)\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel('Time')\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off')\n self.canvas.draw()\n\n ###############################################\n settings_group = QGroupBox('Plot settings')\n\n self.YnormBtn = QComboBox()\n self.YnormBtn.addItem('No normalization')\n self.YnormBtn.addItem('Global percentile')\n self.YnormBtn.addItem('Group percentile')\n self.YnormBtn.addItem('Folder percentile')\n self.YnormBtn.addItem('Manual')\n\n self.XnormBtn = QCheckBox('')\n self.XnormBtn.setChecked(False)\n self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)\n\n self.bckgBtn = QComboBox()\n self.bckgBtn.addItem('None')\n self.bckgBtn.addItem('Background')\n self.bckgBtn.addItem('Minimum')\n\n self.orientationBtn = QComboBox()\n self.orientationBtn.addItem('Signal based')\n self.orientationBtn.addItem('NO')\n\n self.alignmentBtn = QComboBox()\n self.alignmentBtn.addItem('Left')\n self.alignmentBtn.addItem('Right')\n self.alignmentBtn.addItem('Center')\n\n self.aspectRatioBtn = QCheckBox('')\n self.aspectRatioBtn.setChecked(True)\n\n self.groupPlotBtn = QComboBox()\n for i in range(len(self.data_all)):\n self.groupPlotBtn.addItem('Group '+str(i+1))\n\n lay = QGridLayout(self)\n lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)\n lay.addWidget(self.bckgBtn,2,1,1,1)\n lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)\n lay.addWidget(self.YnormBtn,4,1,1,1)\n lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)\n lay.addWidget(self.XnormBtn,5,1,1,1)\n lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)\n lay.addWidget(self.orientationBtn,6,1,1,1)\n lay.addWidget(QLabel('Alignment:'),7,0,1,1)\n lay.addWidget(self.alignmentBtn,7,1,1,1)\n lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)\n lay.addWidget(self.aspectRatioBtn,8,1,1,1)\n lay.addWidget(QLabel('Current group:'),9,0,1,1)\n lay.addWidget(self.groupPlotBtn,9,1,1,2)\n settings_group.setLayout(lay)\n\n #######################\n\n self.applyBtn = QPushButton('Apply Settings')\n self.applyBtn.clicked.connect(self.remakePlot)\n\n self.saveBtn = QPushButton('Save Tif image')\n self.saveBtn.clicked.connect(self.save_tif)\n\n lay = QGridLayout(self)\n lay.setSpacing(10)\n lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)\n lay.addWidget(self.canvas,1,0,1,2)\n lay.addWidget(settings_group,2,0,1,2) \n lay.addWidget(self.applyBtn,3,0,1,2)\n lay.addWidget(self.saveBtn,4,0,1,2)\n\n self.remakePlot()\n\n self.setWindowTitle('Channel')\n QApplication.setStyle('Macintosh')\n\n def onCheckingXnormBtn(self):\n if self.XnormBtn.isChecked():\n self.alignmentBtn.setEnabled(False)\n else:\n self.alignmentBtn.setEnabled(True)\n\n def remakePlot(self):\n\n n_groups = len(self.data_all)\n n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]\n n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]\n\n # rearrange dataset\n profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])\n # subtract background or not\n if self.bckgBtn.currentText() == 'Background':\n profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]\n if self.bckgBtn.currentText() == 'Minimum':\n profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])\n\n # normalize fluorescence intensity accordingly\n percs = [None,None]\n if self.YnormBtn.currentText() == 'Global percentile':\n flat = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat.append(l)\n percs = np.percentile(np.array(flat),(.3,99.7))\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)\n elif self.YnormBtn.currentText() == 'Group percentile':\n flat = [[]for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i].append(l)\n percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)\n elif self.YnormBtn.currentText() == 'Folder percentile':\n flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n for l in profiles_all[i][j][k]:\n flat[i][j].append(l)\n percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n # print(percs[i][j])\n profile = np.array(profiles_all[i][j][k])\n profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)\n self.percs = percs\n \n # normalize AP axis if necessary\n if self.XnormBtn.isChecked():\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n profile = profiles_all[i][j][k]\n x = np.linspace(0,1,len(profile))\n fun = interp1d(x,profile)\n new_x = np.linspace(0,1,101)\n profiles_all[i][j][k] = fun(new_x)\n\n # compute length of longest gastruloid\n max_length = []\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n max_length.append(len(profiles_all[i][j][k]))\n max_length = np.max(max_length)\n\n # orient plots according to setting\n if self.orientationBtn.currentText() == 'Signal based':\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]\n n_p = len(y)\n if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):\n profiles_all[i][j][k] = profiles_all[i][j][k][::-1]\n\n # pad array to the right or left\n for i in range(n_groups):\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n w = max_length-len(profiles_all[i][j][k])\n if self.alignmentBtn.currentText() == 'Left':\n pad_width = (0,w)\n if self.alignmentBtn.currentText() == 'Right':\n pad_width = (w,0)\n elif self.alignmentBtn.currentText() == 'Center':\n if 2*int(w/2)==w:\n pad_width = (int(w/2),int(w/2))\n else:\n pad_width = (int(w/2)+1,int(w/2))\n profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)\n\n ### make plot\n # lines = []\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)\n ax.set_xlabel(self.profileType)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.set_ylabel('Time')\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n # ax.axis('off') \n \n # plot the selected group only\n i = self.groupPlotBtn.currentIndex()\n\n # compute and plot mean and std of the selected group\n # prepare blank image\n max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])\n max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])\n\n data_mean = np.zeros((max_t,max_l))\n data_count = np.zeros((max_t,max_l))\n for j in range(n_folders[i]):\n for k in range(n_gastr[i][j]):\n data = np.nan_to_num(profiles_all[i][j][k])\n data_mean[k,:] += data \n data_count[k,:] += data!=0 \n # plot the raw data if the button is checked\n # if self.rawBtn.isChecked():\n # ax.plot(data_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)\n data_mean = data_mean.astype(np.float)/data_count.astype(np.float)\n data_mean = np.nan_to_num(data_mean)\n\n aspect = 'auto'\n if self.aspectRatioBtn.isChecked():\n aspect = 'equal'\n \n ax.imshow(data_mean, aspect=aspect)\n ax.set_title('Group '+str(i+1))\n self.tif_data = data_mean\n\n self.canvas.draw()\n \n def save_tif(self):\n name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')\n if name != '':\n ### check file extension: allow to save in other formats, but bias towards tif\n if os.path.splitext(name)[-1]!='.tif':\n buttonReply = QMessageBox.question(self,'File format warning!','File format not recommended. Do you want to save the image as tif?')\n if buttonReply == QMessageBox.Yes:\n name = os.path.splitext(name)[0]+'.tif'\n \n # convert the image into int16 with the right brightness and contrast\n if self.percs[0]!=None:\n self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])\n imsave(name+'', self.tif_data.astype(np.uint16))\n\n\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.zeros",
"numpy.nanmean",
"matplotlib.figure.Figure",
"numpy.pad",
"matplotlib.rc",
"numpy.nanstd",
"numpy.linspace",
"numpy.max",
"numpy.clip",
"numpy.min",
"numpy.isnan",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.nan_to_num",
"numpy.array"
]
] |
libinruan/hierarchical_bayesian_target_encoder | [
"7510028a8ad1dea308802c4ca3d3a05533a9c89b"
] | [
"BayCatEncoder/code.py"
] | [
"#%%\nimport numpy as np\nimport pandas as pd\nimport time\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom collections import defaultdict\nfrom sklearn.model_selection import KFold, StratifiedKFold \n\nclass Timer:\n def __enter__(self):\n self.start=time.time()\n return self\n def __exit__(self, *args):\n self.end=time.time()\n self.hour, temp = divmod((self.end - self.start), 3600)\n self.min, self.second = divmod(temp, 60)\n self.hour, self.min, self.second = int(self.hour), int(self.min), round(self.second, 2)\n return self\n\nclass BayCatEncoder(BaseEstimator, TransformerMixin):\n def __init__(self, \n group_cols, \n target_col='target', \n N_min=1, # the higher, the more regularization is introduced into the update.\n CV=True, \n n_fold=5,\n verbosity=True,\n delimiter='.',\n drop_original=False, \n drop_intermediate=False,\n random_seed=2020):\n self.group_cols = [group_cols] if isinstance(group_cols, str) else group_cols # List of column names combination: e.g. ['n1.n2.n4', 'n3.n4', 'n2'].\n self.target_col = target_col # String: 'target' by default.\n self.stats = defaultdict(dict) # key: column names combination; value: corresponding info about n, N, and computed code.\n self.N_min = N_min # regularization control\n self.drop_original = drop_original # toggle key for whether to drop original column name(s) or not.\n self.CV = CV # Bool\n self.n_fold = n_fold\n self.drop_intermediate = drop_intermediate\n self.delimiter = delimiter\n self.verbosity = verbosity # Bool\n self.seed = random_seed\n self.set_original_col = set()\n\n def fit(self, X, y): \n self.col_subsets = self._generate_subsets(self.group_cols)\n df = pd.concat([X.copy(), y.copy()], axis=1)\n assert(isinstance(self.target_col, str))\n df.columns = X.columns.tolist() + [self.target_col] \n assert(self._check_col_consistency(X))\n if not self.CV:\n self._single_fit(df)\n else:\n self._cv_fit(df)\n return self\n\n def _single_fit(self, df):\n size_col_subsets = len(self.col_subsets)\n count_subset = 0 \n print(f'start bayesian target encoding on cross features in the following order: {self.col_subsets}') \n for subset in self.col_subsets:\n count_subset += 1\n with Timer() as t:\n if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets}')\n df_stat, stat, cross_features = self._update(df, subset)\n features_encoded = cross_features + '_code'\n self.stats[cross_features] = pd.merge(\n stat, \n df_stat.groupby(subset)[features_encoded].mean(), \n left_index=True, \n right_index=True) \n if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds') \n return self \n\n def _cv_fit(self, df):\n kf = StratifiedKFold(n_splits = self.n_fold, shuffle = True, random_state=self.seed)\n size_col_subsets = len(self.col_subsets)\n count_subset = 0\n for subset in self.col_subsets:\n count_subset += 1\n with Timer() as t:\n for i, (tr_idx, val_idx) in enumerate(kf.split(df.drop(self.target_col, axis=1), df[self.target_col])):\n if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets} - Round {i+1}/{self.n_fold}')\n df_tr, df_val = df.iloc[tr_idx].copy(), df.iloc[val_idx].copy() # Vital for avoid \"A value is trying to be set on a copy of a slice from a DataFrame.\" warning.\n df_stat, stat, cross_features = self._update(df_tr, subset)\n features_encoded = cross_features + '_code'\n df.loc[df.index[val_idx], features_encoded] = pd.merge(\n df_val[subset], \n df_stat.groupby(subset)[features_encoded].mean(),\n left_on=subset,\n right_index=True,\n how='left'\n )[features_encoded].copy() \\\n .fillna(df[self.target_col].mean()) \n self.stats[cross_features] = df.groupby(subset)[features_encoded].mean().to_frame()\n if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds') \n return self \n\n def _update(self, df, subset):\n self.global_prior_mean = df[self.target_col].mean()\n if len(subset) == 1:\n self.set_original_col.add(*subset)\n upper_level_cols = 'global'\n if not upper_level_cols + '_prior_mean' in df.columns:\n df.loc[:, upper_level_cols + '_prior_mean'] = self.global_prior_mean\n else:\n upper_level_cols = self.delimiter.join(subset[:-1]) # e.g. the n1.n2 subset's upper level feature is `n1`.\n if not upper_level_cols + '_prior_mean' in df.columns: \n df.loc[:, upper_level_cols + '_prior_mean'] = pd.merge(\n df[subset[:-1]], \n self.stats[upper_level_cols][upper_level_cols + '_code'], \n left_on=subset[:-1], \n right_index=True, \n how='left'\n )[upper_level_cols + '_code'].copy()\n \n stat = df.groupby(subset).agg(\n n=(self.target_col, 'sum'),\n N=(self.target_col, 'count'),\n prior_mean=(upper_level_cols + '_prior_mean', 'mean')\n )\n # Calculate posterior mean\n df_stat = pd.merge(df[subset], stat, left_on=subset, right_index=True, how='left')\n df_stat['n'].mask(df_stat['n'].isnull(), df_stat['prior_mean'], inplace=True) \n df_stat['N'].fillna(1., inplace=True)\n df_stat.loc[:, 'N_prior'] = df_stat['N'].map(lambda x: max(self.N_min - x, 0))\n df_stat.loc[:, 'alpha_prior'] = df_stat['prior_mean'] * df_stat['N_prior']\n df_stat.loc[:, 'beta_prior'] = (1. - df_stat['prior_mean']) * df_stat['N_prior'] # Large N -> zero N_prior -> zero alpha_prior and zero beta_prior -> if n is zero as well -> alpha prior, beta prior both zero -> alpha zero -> posterior mean = zero as well. \n if len(subset) == 1:\n cross_features = subset[0]\n else:\n cross_features = self.delimiter.join(subset)\n df_stat.loc[:, cross_features + '_code'] = df_stat.apply(self._stat_mean, axis=1) # core # TEST set!!\n return df_stat, stat, cross_features\n\n def _generate_subsets(self, groups, delimiter='.'):\n subsets = defaultdict(list) \n for g in groups:\n chain = g.split(delimiter)\n for i in range(len(chain)):\n if chain[i] and not chain[:i+1] in subsets[i]: subsets[i].append(chain[:i+1])\n ret = []\n for _, v in subsets.items():\n if not v in ret: ret.extend(v)\n return ret \n\n def _stat_mean(self, X):\n df = X.copy()\n alpha = df['alpha_prior'] + df['n']\n beta = df['beta_prior'] + df['N'] - df['n']\n return alpha / (alpha + beta)\n\n def _check_col_consistency(self, df): \n \"\"\"Check whether columns specified in `self.group_cols` are all included in `df`.\n \"\"\" \n s = set()\n for col_subset in self.col_subsets:\n s |= set(col_subset)\n for col in s:\n if not col in df.columns: return False\n return True \n\n def transform(self, X):\n assert(self._check_col_consistency(X))\n for subset in self.col_subsets:\n key = '.'.join(subset)\n X = pd.merge(\n X, \n self.stats[key][key + '_code'], \n left_on=subset, \n right_index=True, \n how='left')\n if len(subset) == 1:\n X[key + '_code'].fillna(self.global_prior_mean, inplace=True)\n else:\n parent_key = '.'.join(subset[:-1]) + '_code' \n X[key + '_code'].fillna(X[parent_key].mask(X[parent_key] > self.global_prior_mean, self.global_prior_mean), inplace=True)\n if self.drop_original:\n for col in self.set_original_col:\n X.drop(col, axis=1, inplace=True)\n X.rename(columns={col+'_code': col}, inplace=True)\n if self.drop_intermediate: \n for col in X.columns:\n if col.endswith('_code') and not col.strip('_code') in self.group_cols:\n X.drop(col, axis=1, inplace=True)\n return X\n\n#%%\nif __name__ == '__main__':\n np.random.seed(1)\n k = 15\n n1 = np.random.choice(['a','b'], k)\n n2 = np.random.choice(['c','d'], k)\n n3 = np.random.choice(['e','f'], k)\n target = np.random.randint(0, 2, size=k)\n train = pd.DataFrame(\n {'n1': n1, 'n2': n2, 'n3':n3, 'target': target}, \n columns=['n1', 'n2', 'n3', 'target']\n )\n train.columns = ['n1','n2','n3', 'target']\n \n train\n\n k = 6\n n4 = np.random.choice(['a','b'], k)\n n5 = np.random.choice(['c','d'], k)\n n6 = np.random.choice(['e','f'], k)\n test = pd.DataFrame({'n4': n4, 'n2': n5, 'n3':n6})\n test.columns = ['n1','n2','n3']\n \n test\n \n te = BayCatEncoder(\n 'n1.n2.n3', #['n1.n2.n3', 'n2.n3', 'n3'], \n target_col='target', \n drop_original=False, \n drop_intermediate=False,\n CV=False\n ) \\\n .fit(train.drop('target', axis=1), train.target) \n # te.transform(test)\n te.transform(test)\n\n# %%\n"
] | [
[
"sklearn.model_selection.StratifiedKFold",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.random.choice",
"pandas.merge",
"numpy.random.randint"
]
] |
xinwang1/Quantum | [
"0f56e36e9e6111547547ae1b6cd5df307b41c1ac"
] | [
"paddle_quantum/QAOA/example/main.py"
] | [
"# Copyright (c) 2020 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nmain\n\"\"\"\n\nfrom paddle import fluid\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom paddle_quantum.utils import pauli_str_to_matrix\nfrom paddle_quantum.QAOA.Paddle_QAOA import Paddle_QAOA\nfrom paddle_quantum.QAOA.QAOA_Prefunc import generate_graph, H_generator\n\n\ndef main(N=4):\n # number of qubits or number of nodes in the graph\n N = 4\n classical_graph, classical_graph_adjacency = generate_graph(N, GRAPHMETHOD=1)\n print(classical_graph_adjacency)\n\n # Convert the Hamiltonian's list form to matrix form\n H_matrix = pauli_str_to_matrix(H_generator(N, classical_graph_adjacency), N)\n\n H_diag = np.diag(H_matrix).real\n H_max = np.max(H_diag)\n H_min = np.min(H_diag)\n\n print(H_diag)\n print('H_max:', H_max, ' H_min:', H_min)\n\n pos = nx.circular_layout(classical_graph)\n nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold')\n plt.show()\n\n classical_graph, classical_graph_adjacency = generate_graph(N, 1)\n\n opt_cir = Paddle_QAOA(classical_graph_adjacency, N=4, P=4, METHOD=1, ITR=120, LR=0.1)\n\n # Load the data of QAOA\n x1 = np.load('./output/summary_data.npz')\n\n H_min = np.ones([len(x1['iter'])]) * H_min\n\n # Plot loss\n loss_QAOA, = plt.plot(x1['iter'], x1['energy'], alpha=0.7, marker='', linestyle=\"--\", linewidth=2, color='m')\n benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=\":\", linewidth=2, color='b')\n plt.xlabel('Number of iteration')\n plt.ylabel('Performance of the loss function for QAOA')\n\n plt.legend(handles=[\n loss_QAOA,\n benchmark\n ],\n labels=[\n r'Loss function $\\left\\langle {\\psi \\left( {\\bf{\\theta }} \\right)} '\n r'\\right|H\\left| {\\psi \\left( {\\bf{\\theta }} \\right)} \\right\\rangle $',\n 'The benchmark result',\n ], loc='best')\n\n # Show the plot\n plt.show()\n\n with fluid.dygraph.guard():\n # Measure the output state of the QAOA circuit for 1024 shots by default\n prob_measure = opt_cir.measure(plot=True)\n\n # Find the max value in measured probability of bitstrings\n max_prob = max(prob_measure.values())\n # Find the bitstring with max probability\n solution_list = [result[0] for result in prob_measure.items() if result[1] == max_prob]\n print(\"The output bitstring:\", solution_list)\n\n # Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem\n head_bitstring = solution_list[0]\n\n node_cut = [\"blue\" if head_bitstring[node] == \"1\" else \"red\" for node in classical_graph]\n\n edge_cut = [\n \"solid\" if head_bitstring[node_row] == head_bitstring[node_col] else \"dashed\"\n for node_row, node_col in classical_graph.edges()\n ]\n nx.draw(\n classical_graph,\n pos,\n node_color=node_cut,\n style=edge_cut,\n width=4,\n with_labels=True,\n font_weight=\"bold\",\n )\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.load",
"matplotlib.pyplot.legend",
"numpy.diag",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
Rishank2610/gammapy | [
"3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76",
"3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76"
] | [
"gammapy/utils/testing.py",
"gammapy/datasets/tests/test_datasets.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utilities for testing\"\"\"\nimport os\nimport sys\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\n\n__all__ = [\n \"requires_dependency\",\n \"requires_data\",\n \"mpl_plot_check\",\n \"assert_quantity_allclose\",\n \"assert_skycoord_allclose\",\n \"assert_time_allclose\",\n \"Checker\",\n]\n\n# Cache for `requires_dependency`\n_requires_dependency_cache = {}\n\n\ndef requires_dependency(name):\n \"\"\"Decorator to declare required dependencies for tests.\n\n Examples\n --------\n ::\n\n from gammapy.utils.testing import requires_dependency\n\n @requires_dependency('scipy')\n def test_using_scipy():\n import scipy\n ...\n \"\"\"\n import pytest\n\n if name in _requires_dependency_cache:\n skip_it = _requires_dependency_cache[name]\n else:\n try:\n __import__(name)\n skip_it = False\n except ImportError:\n skip_it = True\n\n _requires_dependency_cache[name] = skip_it\n\n reason = f\"Missing dependency: {name}\"\n return pytest.mark.skipif(skip_it, reason=reason)\n\n\ndef has_data(name):\n \"\"\"Is a certain set of data available?\"\"\"\n if name == \"gammapy-extra\":\n return \"GAMMAPY_EXTRA\" in os.environ\n elif name == \"gammapy-data\":\n return \"GAMMAPY_DATA\" in os.environ\n elif name == \"gamma-cat\":\n return \"GAMMA_CAT\" in os.environ\n elif name == \"fermi-lat\":\n return \"GAMMAPY_FERMI_LAT_DATA\" in os.environ\n else:\n raise ValueError(f\"Invalid name: {name}\")\n\n\ndef requires_data(name=\"gammapy-data\"):\n \"\"\"Decorator to declare required data for tests.\n\n Examples\n --------\n ::\n\n from gammapy.utils.testing import requires_data\n\n @requires_data()\n def test_using_data_files():\n filename = \"$GAMMAPY_DATA/...\"\n ...\n \"\"\"\n import pytest\n\n if not isinstance(name, str):\n raise TypeError(\n \"You must call @requires_data with a name (str). \"\n \"Usually this: @requires_data()\"\n )\n\n skip_it = not has_data(name)\n\n reason = f\"Missing data: {name}\"\n return pytest.mark.skipif(skip_it, reason=reason)\n\n\ndef run_cli(cli, args, exit_code=0):\n \"\"\"Run Click command line tool.\n\n Thin wrapper around `click.testing.CliRunner`\n that prints info to stderr if the command fails.\n\n Parameters\n ----------\n cli : click.Command\n Click command\n args : list of str\n Argument list\n exit_code : int\n Expected exit code of the command\n\n Returns\n -------\n result : `click.testing.Result`\n Result\n \"\"\"\n from click.testing import CliRunner\n\n result = CliRunner().invoke(cli, args, catch_exceptions=False)\n\n if result.exit_code != exit_code:\n sys.stderr.write(\"Exit code mismatch!\\n\")\n sys.stderr.write(\"Output:\\n\")\n sys.stderr.write(result.output)\n\n return result\n\n\ndef assert_skycoord_allclose(actual, desired):\n \"\"\"Assert all-close for `astropy.coordinates.SkyCoord` objects.\n\n - Frames can be different, aren't checked at the moment.\n \"\"\"\n assert isinstance(actual, SkyCoord)\n assert isinstance(desired, SkyCoord)\n assert_allclose(actual.data.lon.deg, desired.data.lon.deg)\n assert_allclose(actual.data.lat.deg, desired.data.lat.deg)\n\n\ndef assert_time_allclose(actual, desired, atol=1e-3):\n \"\"\"Assert all-close for `astropy.time.Time` objects.\n\n atol is absolute tolerance in seconds.\n \"\"\"\n assert isinstance(actual, Time)\n assert isinstance(desired, Time)\n assert actual.scale == desired.scale\n assert actual.format == desired.format\n dt = actual - desired\n assert_allclose(dt.sec, 0, rtol=0, atol=atol)\n\n\ndef assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):\n \"\"\"Assert all-close for `astropy.units.Quantity` objects.\n\n Requires that ``unit`` is identical, not just that quantities\n are allclose taking different units into account.\n\n We prefer this kind of assert for testing, since units\n should only change on purpose, so this tests more behaviour.\n \"\"\"\n # TODO: change this later to explicitly check units are the same!\n # assert actual.unit == desired.unit\n args = _unquantify_allclose_arguments(actual, desired, rtol, atol)\n assert_allclose(*args, **kwargs)\n\n\ndef _unquantify_allclose_arguments(actual, desired, rtol, atol):\n actual = u.Quantity(actual, subok=True, copy=False)\n\n desired = u.Quantity(desired, subok=True, copy=False)\n try:\n desired = desired.to(actual.unit)\n except u.UnitsError:\n raise u.UnitsError(\n \"Units for 'desired' ({}) and 'actual' ({}) \"\n \"are not convertible\".format(desired.unit, actual.unit)\n )\n\n if atol is None:\n # by default, we assume an absolute tolerance of 0\n atol = u.Quantity(0)\n else:\n atol = u.Quantity(atol, subok=True, copy=False)\n try:\n atol = atol.to(actual.unit)\n except u.UnitsError:\n raise u.UnitsError(\n \"Units for 'atol' ({}) and 'actual' ({}) \"\n \"are not convertible\".format(atol.unit, actual.unit)\n )\n\n rtol = u.Quantity(rtol, subok=True, copy=False)\n try:\n rtol = rtol.to(u.dimensionless_unscaled)\n except Exception:\n raise u.UnitsError(\"`rtol` should be dimensionless\")\n\n return actual.value, desired.value, rtol.value, atol.value\n\n\ndef mpl_plot_check():\n \"\"\"Matplotlib plotting test context manager.\n\n It create a new figure on __enter__ and calls savefig for the\n current figure in __exit__. This will trigger a render of the\n Figure, which can sometimes raise errors if there is a problem.\n\n This is writing to an in-memory byte buffer, i.e. is faster\n than writing to disk.\n \"\"\"\n from io import BytesIO\n import matplotlib.pyplot as plt\n\n class MPLPlotCheck:\n def __enter__(self):\n plt.figure()\n\n def __exit__(self, type, value, traceback):\n plt.savefig(BytesIO(), format=\"png\")\n plt.close()\n\n return MPLPlotCheck()\n\n\nclass Checker:\n \"\"\"Base class for checker classes in Gammapy.\"\"\"\n\n def run(self, checks=\"all\"):\n if checks == \"all\":\n checks = self.CHECKS.keys()\n\n unknown_checks = sorted(set(checks).difference(self.CHECKS.keys()))\n if unknown_checks:\n raise ValueError(f\"Unknown checks: {unknown_checks!r}\")\n\n for check in checks:\n method = getattr(self, self.CHECKS[check])\n yield from method()\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling.tests.test_fit import MyDataset\n\n\[email protected](scope=\"session\")\ndef datasets():\n return Datasets([MyDataset(name=\"test-1\"), MyDataset(name=\"test-2\")])\n\n\ndef test_datasets_init(datasets):\n # Passing a Python list of `Dataset` objects should work\n Datasets(list(datasets))\n\n # Passing an existing `Datasets` object should work\n Datasets(datasets)\n\n\ndef test_datasets_types(datasets):\n assert datasets.is_all_same_type\n\n\ndef test_datasets_likelihood(datasets):\n likelihood = datasets.stat_sum()\n assert_allclose(likelihood, 14472200.0002)\n\n\ndef test_datasets_str(datasets):\n assert \"Datasets\" in str(datasets)\n\n\ndef test_datasets_getitem(datasets):\n assert datasets[\"test-1\"].name == \"test-1\"\n assert datasets[\"test-2\"].name == \"test-2\"\n\n\ndef test_names(datasets):\n assert datasets.names == [\"test-1\", \"test-2\"]\n\n\ndef test_Datasets_mutation():\n dat = MyDataset(name=\"test-1\")\n dats = Datasets([MyDataset(name=\"test-2\"), MyDataset(name=\"test-3\")])\n dats2 = Datasets([MyDataset(name=\"test-4\"), MyDataset(name=\"test-5\")])\n\n dats.insert(0, dat)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\"]\n\n dats.extend(dats2)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-4\", \"test-5\"]\n\n dat3 = dats[3]\n dats.remove(dats[3])\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-5\"]\n dats.append(dat3)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-5\", \"test-4\"]\n dats.pop(3)\n assert dats.names == [\"test-1\", \"test-2\", \"test-3\", \"test-4\"]\n\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.append(dat)\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.insert(0, dat)\n with pytest.raises(ValueError, match=\"Dataset names must be unique\"):\n dats.extend(dats2)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"numpy.testing.assert_allclose"
],
[
"numpy.testing.assert_allclose"
]
] |
Zihang97/PAGAN | [
"9233fc54ecf49d6a82bb0794333d61f707439a68"
] | [
"src/snlayers/snconv1d.py"
] | [
"# coding=utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules import conv\nfrom torch.nn.modules.utils import _single\nfrom ..functions.max_sv import max_singular_value\n\nclass SNConv1d(conv._ConvNd):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _single(kernel_size)\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n super(SNConv1d, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _single(0), groups, bias)\n self.register_buffer('u', torch.Tensor(1, out_channels).normal_())\n\n @property\n def W_(self):\n w_mat = self.weight.view(self.weight.size(0), -1)\n sigma, _u = max_singular_value(w_mat, self.u)\n self.u.copy_(_u)\n return self.weight / sigma\n\n def forward(self, input):\n return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)\n"
] | [
[
"torch.nn.modules.utils._single",
"torch.nn.functional.conv1d",
"torch.Tensor"
]
] |
samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | [
"619c5c0b17438d1014f7ca7e4ce13cc44c45de3c"
] | [
"src/classifiers.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ classifiers.py ]\n# Synopsis [ 'Naive Bayes' and 'Decision Tree' training, testing, and tunning functions ]\n# Author [ Ting-Wei Liu (Andi611) ]\n# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nfrom sklearn import tree\n\n\n############\n# CONSTANT #\n############\nN_FOLD = 10\nDEPTHS = np.arange(1, 64)\nALPHAS = np.arange(0.001, 1.0, 0.001)\nALPHAS_MUSHROOM = np.arange(0.0001, 1.0, 0.0001)\nBEST_DISTRIBUTION = 'Multinominal'\n\n\n\n###############\n# NAIVE BAYES #\n###############\nclass naive_bayes_runner(object):\n\n\tdef __init__(self, MODEL, train_x, train_y, test_x, test_y):\n\t\t\n\t\t#---data---#\n\t\tself.train_x = train_x\n\t\tself.train_y = train_y\n\t\tself.test_x = test_x\n\t\tself.test_y = test_y\n\n\t\t#---model---#\n\t\tself.cross_validate = False\n\t\tself.MODEL = MODEL\n\n\t\tif self.MODEL == 'NEWS':\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.065),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.136),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.002) }\n\t\tif self.MODEL == 'MUSHROOM':\n\t\t\tALPHAS = ALPHAS_MUSHROOM\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.0001),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.0001),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.0001) }\n\t\tif self.MODEL == 'INCOME':\n\t\t\tself.cross_validate = True\n\t\t\tself.models = {\t'Guassian' : GaussianNB(),\n\t\t\t\t\t \t \t'Multinominal' : MultinomialNB(alpha=0.959),\n\t\t\t\t\t\t\t'Complement' : ComplementNB(alpha=0.16),\n\t\t\t\t\t\t \t'Bernoulli' : BernoulliNB(alpha=0.001) }\n\n\n\tdef _fit_and_evaluate(self, model):\n\t\tmodel_fit = model.fit(self.train_x, self.train_y)\n\t\tpred_y = model_fit.predict(self.test_x)\n\t\tacc = metrics.accuracy_score(self.test_y, pred_y)\n\t\treturn acc, pred_y\n\t\n\n\tdef search_alpha(self):\n\t\ttry:\n\t\t\tfrom tqdm import tqdm\n\t\texcept:\n\t\t\traise ImportError('Failed to import tqdm, use the following command to install: pip3 install tqdm')\n\t\tfor distribution, model in self.models.items():\n\t\t\tbest_acc = 0.0\n\t\t\tbest_alpha = 0.001\n\t\t\tif distribution != 'Guassian': \n\t\t\t\tprint('>> [Naive Bayes Runner] Searching for best alpha value, distribution:', distribution)\n\t\t\t\tfor alpha in tqdm(ALPHAS):\n\t\t\t\t\tmodel.set_params(alpha=alpha)\n\t\t\t\t\tif self.cross_validate: \n\t\t\t\t\t\tscores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\t\t\tacc = scores.mean()\n\t\t\t\t\telse:\n\t\t\t\t\t\tacc, _ = self._fit_and_evaluate(model)\n\t\t\t\t\tif acc > best_acc:\n\t\t\t\t\t\tbest_acc = acc\n\t\t\t\t\t\tbest_alpha = alpha\n\t\t\t\tprint('>> [Naive Bayes Runner] '+ distribution + ' - Best Alpha Value:', best_alpha)\n\n\n\tdef run_best_all(self):\n\t\tfor distribution, model in self.models.items():\n\t\t\tif self.cross_validate: \n\t\t\t\tscores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\tacc = scores.mean()\n\t\t\telse:\n\t\t\t\tacc, _ = self._fit_and_evaluate(model)\n\t\t\tprint('>> [Naive Bayes Runner] '+ distribution + ' - Accuracy:', acc)\n\n\n\tdef run_best(self):\n\t\tif self.cross_validate: \n\t\t\tscores = cross_val_score(self.models[BEST_DISTRIBUTION], self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\tacc = scores.mean()\n\t\t\tmodel_fit = self.models[BEST_DISTRIBUTION].fit(self.train_x, self.train_y)\n\t\t\tpred_y = model_fit.predict(self.test_x)\n\t\telse:\n\t\t\tacc, pred_y = self._fit_and_evaluate(self.models[BEST_DISTRIBUTION])\n\t\tprint('>> [Naive Bayes Runner] '+ BEST_DISTRIBUTION + ' - Accuracy:', acc)\n\t\treturn pred_y\n\n\n#################\n# DECISION TREE #\n#################\nclass decision_tree_runner(object):\n\t\n\tdef __init__(self, MODEL, train_x, train_y, test_x, test_y):\n\t\t\n\t\t#---data---#\n\t\tself.train_x = train_x\n\t\tself.train_y = train_y\n\t\tself.test_x = test_x\n\t\tself.test_y = test_y\n\n\t\t#---model---#\n\t\tself.cross_validate = False\n\t\tself.MODEL = MODEL\n\n\t\tif self.MODEL == 'NEWS':\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='gini', \n\t\t\t\t\t\t\t\t\t\t\t\t\t splitter='random', \n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=47,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\t\telif self.MODEL == 'MUSHROOM':\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='gini', \n\t\t\t\t\t\t\t\t\t\t\t\t\t splitter='random', \n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=7,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\t\telif self.MODEL == 'INCOME':\n\t\t\tself.cross_validate = True\n\t\t\tself.model = tree.DecisionTreeClassifier(criterion='entropy', \n\t\t\t\t\t\t\t\t\t\t\t\t\t min_impurity_decrease=2e-4,\n\t\t\t\t\t\t\t\t\t\t\t\t\t max_depth=15,\n\t\t\t\t\t\t\t\t\t\t\t\t\t random_state=1337)\n\n\n\tdef _fit_and_evaluate(self):\n\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\tpred_y = model_fit.predict(self.test_x)\n\t\tacc = metrics.accuracy_score(self.test_y, pred_y)\n\t\treturn acc, pred_y\n\n\n\tdef search_max_depth(self):\n\t\ttry:\n\t\t\tfrom tqdm import tqdm\n\t\texcept:\n\t\t\traise ImportError('Failed to import tqdm, use the following command to install: $ pip3 install tqdm')\n\t\tbest_acc = 0.0\n\t\tbest_depth = 1\n\t\t\n\t\tprint('>> [Naive Bayes Runner] Searching for best max depth value...')\n\t\tfor depth in tqdm(DEPTHS):\n\t\t\tself.model.set_params(max_depth=depth)\n\t\t\tif self.cross_validate: \n\t\t\t\tscores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\t\tacc = scores.mean()\n\t\t\telse:\n\t\t\t\tacc, _ = self._fit_and_evaluate()\n\t\t\tif acc > best_acc:\n\t\t\t\tbest_acc = acc\n\t\t\t\tbest_depth = depth\n\t\tprint('>> [Decision Tree Runner] - Best Dpeth Value:', best_depth)\n\n\n\tdef visualize(self):\n\t\ttry:\n\t\t\timport graphviz\n\t\texcept:\n\t\t\traise ImportError('Failed to import graphviz, use the following command to install: $ pip3 install graphviz, and $ sudo apt-get install graphviz')\n\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\tdot_data = tree.export_graphviz(model_fit, out_file=None, \n\t\t\t\t\t\t\t\t\t\tfilled=True, rounded=True, \n\t\t\t\t\t\t\t\t\t\tspecial_characters=True) \n\t\tgraph = graphviz.Source(dot_data)\n\t\tgraph.format = 'png'\n\t\tgraph.render('../image/TREE_' + self.MODEL)\n\t\tprint('>> [Decision Tree Runner] - Tree visualization complete.')\n\n\n\tdef run_best(self):\n\t\tif self.cross_validate: \n\t\t\tscores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')\n\t\t\tacc = scores.mean()\n\t\t\tmodel_fit = self.model.fit(self.train_x, self.train_y)\n\t\t\tpred_y = model_fit.predict(self.test_x)\n\t\telse:\t\t\n\t\t\tacc, pred_y = self._fit_and_evaluate()\n\t\tprint('>> [Decision Tree Runner] - Accuracy:', acc)\n\t\treturn pred_y\n\n"
] | [
[
"sklearn.naive_bayes.ComplementNB",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.naive_bayes.MultinomialNB",
"numpy.arange",
"sklearn.metrics.accuracy_score",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.tree.export_graphviz",
"sklearn.model_selection.cross_val_score",
"sklearn.naive_bayes.GaussianNB"
]
] |
jphacks/C_2008 | [
"65d7a1d3a90045b149397cdd1e038ab648bb842e"
] | [
"sound_factory/sound_factory.py"
] | [
"import os\nimport re\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\n\nIMAGE_SHAPE = [(224, 224), (240, 240), (260, 260), (300, 300), (380, 380), (456, 456), (528, 528), (600, 600)]\n\ndef main(paths : list, model_name : str):\n try:\n model = tf.keras.models.load_model(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name))\n except Exception:\n print('そのようなモデルはありません')\n exit()\n\n model_index = int(re.search('\\d', model_name).group(0))\n with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name, 'labels.txt'), mode='r', encoding='utf-8') as f1:\n labels = [s.strip() for s in f1.readlines()]\n \n with open('manga_sound_labels.csv', mode='w', encoding='utf-8') as f2:\n for path in paths:\n if os.path.isfile(path):\n try:\n img = np.expand_dims(img_to_array(load_img(path,target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)\n except Exception:\n continue\n pridict = labels[np.argmax(model.predict(img)[0])]\n f2.write(path + ',' + pridict + '\\n')\n else:\n for filename in os.listdir(path):\n try:\n img = np.expand_dims(img_to_array(load_img(os.path.join(path, filename),target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)\n except Exception:\n continue\n pridict = labels[np.argmax(model.predict(img)[0])]\n f2.write(os.path.join(path, filename) + ',' + pridict + '\\n')\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='コマの画像から背景音を予測します')\n parser.add_argument('path',nargs='*', help='解析するファイル名かディレクトリ名')\n parser.add_argument('--model', default=os.path.join('best','b0'), help='クラス分けに使用するモデル名')\n args = parser.parse_args()\n if 'manga_sound_labels.csv' in os.listdir(os.getcwd()):\n print('manga_sound_labels.csvがすでにあるので終了します')\n exit()\n main(args.path, args.model)"
] | [
[
"tensorflow.keras.preprocessing.image.load_img"
]
] |
bhomaidan1990/reinforcement-learning-an-introduction | [
"fbf020d9da2daec3194a17f968ef29d12ebde6f6"
] | [
"chapter05/blackjack.py"
] | [
"#######################################################################\n# Copyright (C) #\n# 2016-2018 Shangtong Zhang([email protected]) #\n# 2016 Kenta Shimada([email protected]) #\n# 2017 Nicky van Foreest([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom tqdm import tqdm\n\n# actions: hit or stand\nACTION_HIT = 0\nACTION_STAND = 1 # \"strike\" in the book\nACTIONS = [ACTION_HIT, ACTION_STAND]\n\n# policy for player\nPOLICY_PLAYER = np.zeros(22, dtype=np.int)\nfor i in range(12, 20):\n POLICY_PLAYER[i] = ACTION_HIT\nPOLICY_PLAYER[20] = ACTION_STAND\nPOLICY_PLAYER[21] = ACTION_STAND\n\n# function form of target policy of player\ndef target_policy_player(usable_ace_player, player_sum, dealer_card):\n return POLICY_PLAYER[player_sum]\n\n# function form of behavior policy of player\ndef behavior_policy_player(usable_ace_player, player_sum, dealer_card):\n if np.random.binomial(1, 0.5) == 1:\n return ACTION_STAND\n return ACTION_HIT\n\n# policy for dealer\nPOLICY_DEALER = np.zeros(22)\nfor i in range(12, 17):\n POLICY_DEALER[i] = ACTION_HIT\nfor i in range(17, 22):\n POLICY_DEALER[i] = ACTION_STAND\n\n# get a new card\ndef get_card():\n card = np.random.randint(1, 14)\n card = min(card, 10)\n return card\n\n# get the value of a card (11 for ace).\ndef card_value(card_id):\n return 11 if card_id == 1 else card_id\n\n# play a game\n# @policy_player: specify policy for player\n# @initial_state: [whether player has a usable Ace, sum of player's cards, one card of dealer]\n# @initial_action: the initial action\ndef play(policy_player, initial_state=None, initial_action=None):\n # player status\n\n # sum of player\n player_sum = 0\n\n # trajectory of player\n player_trajectory = []\n\n # whether player uses Ace as 11\n usable_ace_player = False\n\n # dealer status\n dealer_card1 = 0\n dealer_card2 = 0\n usable_ace_dealer = False\n\n if initial_state is None:\n # generate a random initial state\n\n while player_sum < 12:\n # if sum of player is less than 12, always hit\n card = get_card()\n player_sum += card_value(card)\n\n # If the player's sum is larger than 21, he may hold one or two aces.\n if player_sum > 21:\n assert player_sum == 22\n # last card must be ace\n player_sum -= 10\n else:\n usable_ace_player |= (1 == card)\n\n # initialize cards of dealer, suppose dealer will show the first card he gets\n dealer_card1 = get_card()\n dealer_card2 = get_card()\n\n else:\n # use specified initial state\n usable_ace_player, player_sum, dealer_card1 = initial_state\n dealer_card2 = get_card()\n\n # initial state of the game\n state = [usable_ace_player, player_sum, dealer_card1]\n\n # initialize dealer's sum\n dealer_sum = card_value(dealer_card1) + card_value(dealer_card2)\n usable_ace_dealer = 1 in (dealer_card1, dealer_card2)\n # if the dealer's sum is larger than 21, he must hold two aces.\n if dealer_sum > 21:\n assert dealer_sum == 22\n # use one Ace as 1 rather than 11\n dealer_sum -= 10\n assert dealer_sum <= 21\n assert player_sum <= 21\n\n # game starts!\n\n # player's turn\n while True:\n if initial_action is not None:\n action = initial_action\n initial_action = None\n else:\n # get action based on current sum\n action = policy_player(usable_ace_player, player_sum, dealer_card1)\n\n # track player's trajectory for importance sampling\n player_trajectory.append([(usable_ace_player, player_sum, dealer_card1), action])\n\n if action == ACTION_STAND:\n break\n # if hit, get new card\n card = get_card()\n # Keep track of the ace count. the usable_ace_player flag is insufficient alone as it cannot\n # distinguish between having one ace or two.\n ace_count = int(usable_ace_player)\n if card == 1:\n ace_count += 1\n player_sum += card_value(card)\n # If the player has a usable ace, use it as 1 to avoid busting and continue.\n while player_sum > 21 and ace_count:\n player_sum -= 10\n ace_count -= 1\n # player busts\n if player_sum > 21:\n return state, -1, player_trajectory\n assert player_sum <= 21\n usable_ace_player = (ace_count == 1)\n\n # dealer's turn\n while True:\n # get action based on current sum\n action = POLICY_DEALER[dealer_sum]\n if action == ACTION_STAND:\n break\n # if hit, get a new card\n new_card = get_card()\n ace_count = int(usable_ace_dealer)\n if new_card == 1:\n ace_count += 1\n dealer_sum += card_value(new_card)\n # If the dealer has a usable ace, use it as 1 to avoid busting and continue.\n while dealer_sum > 21 and ace_count:\n dealer_sum -= 10\n ace_count -= 1\n # dealer busts\n if dealer_sum > 21:\n return state, 1, player_trajectory\n usable_ace_dealer = (ace_count == 1)\n\n # compare the sum between player and dealer\n assert player_sum <= 21 and dealer_sum <= 21\n if player_sum > dealer_sum:\n return state, 1, player_trajectory\n elif player_sum == dealer_sum:\n return state, 0, player_trajectory\n else:\n return state, -1, player_trajectory\n\n# Monte Carlo Sample with On-Policy\ndef monte_carlo_on_policy(episodes):\n states_usable_ace = np.zeros((10, 10))\n # initialze counts to 1 to avoid 0 being divided\n states_usable_ace_count = np.ones((10, 10))\n states_no_usable_ace = np.zeros((10, 10))\n # initialze counts to 1 to avoid 0 being divided\n states_no_usable_ace_count = np.ones((10, 10))\n for i in tqdm(range(0, episodes)):\n _, reward, player_trajectory = play(target_policy_player)\n for (usable_ace, player_sum, dealer_card), _ in player_trajectory:\n player_sum -= 12\n dealer_card -= 1\n if usable_ace:\n states_usable_ace_count[player_sum, dealer_card] += 1\n states_usable_ace[player_sum, dealer_card] += reward\n else:\n states_no_usable_ace_count[player_sum, dealer_card] += 1\n states_no_usable_ace[player_sum, dealer_card] += reward\n return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count\n\n# Monte Carlo with Exploring Starts\ndef monte_carlo_es(episodes):\n # (playerSum, dealerCard, usableAce, action)\n state_action_values = np.zeros((10, 10, 2, 2))\n # initialze counts to 1 to avoid division by 0\n state_action_pair_count = np.ones((10, 10, 2, 2))\n\n # behavior policy is greedy\n def behavior_policy(usable_ace, player_sum, dealer_card):\n usable_ace = int(usable_ace)\n player_sum -= 12\n dealer_card -= 1\n # get argmax of the average returns(s, a)\n values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \\\n state_action_pair_count[player_sum, dealer_card, usable_ace, :]\n return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])\n\n # play for several episodes\n for episode in tqdm(range(episodes)):\n # for each episode, use a randomly initialized state and action\n initial_state = [bool(np.random.choice([0, 1])),\n np.random.choice(range(12, 22)),\n np.random.choice(range(1, 11))]\n initial_action = np.random.choice(ACTIONS)\n current_policy = behavior_policy if episode else target_policy_player\n _, reward, trajectory = play(current_policy, initial_state, initial_action)\n first_visit_check = set()\n for (usable_ace, player_sum, dealer_card), action in trajectory:\n usable_ace = int(usable_ace)\n player_sum -= 12\n dealer_card -= 1\n state_action = (usable_ace, player_sum, dealer_card, action)\n if state_action in first_visit_check:\n continue\n first_visit_check.add(state_action)\n # update values of state-action pairs\n state_action_values[player_sum, dealer_card, usable_ace, action] += reward\n state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1\n\n return state_action_values / state_action_pair_count\n\n# Monte Carlo Sample with Off-Policy\ndef monte_carlo_off_policy(episodes):\n initial_state = [True, 13, 2]\n\n rhos = []\n returns = []\n\n for i in range(0, episodes):\n _, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)\n\n # get the importance ratio\n numerator = 1.0\n denominator = 1.0\n for (usable_ace, player_sum, dealer_card), action in player_trajectory:\n if action == target_policy_player(usable_ace, player_sum, dealer_card):\n denominator *= 0.5\n else:\n numerator = 0.0\n break\n rho = numerator / denominator\n rhos.append(rho)\n returns.append(reward)\n\n rhos = np.asarray(rhos)\n returns = np.asarray(returns)\n weighted_returns = rhos * returns\n\n weighted_returns = np.add.accumulate(weighted_returns)\n rhos = np.add.accumulate(rhos)\n\n ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)\n\n with np.errstate(divide='ignore',invalid='ignore'):\n weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)\n\n return ordinary_sampling, weighted_sampling\n\ndef figure_5_1():\n states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)\n states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)\n\n states = [states_usable_ace_1,\n states_usable_ace_2,\n states_no_usable_ace_1,\n states_no_usable_ace_2]\n\n titles = ['Usable Ace, 10000 Episodes',\n 'Usable Ace, 500000 Episodes',\n 'No Usable Ace, 10000 Episodes',\n 'No Usable Ace, 500000 Episodes']\n\n _, axes = plt.subplots(2, 2, figsize=(40, 30))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n axes = axes.flatten()\n\n for state, title, axis in zip(states, titles, axes):\n fig = sns.heatmap(np.flipud(state), cmap=\"YlGnBu\", ax=axis, xticklabels=range(1, 11),\n yticklabels=list(reversed(range(12, 22))))\n fig.set_ylabel('player sum', fontsize=30)\n fig.set_xlabel('dealer showing', fontsize=30)\n fig.set_title(title, fontsize=30)\n\n plt.savefig('../images/figure_5_1.png')\n plt.close()\n\ndef figure_5_2():\n state_action_values = monte_carlo_es(500000)\n\n state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)\n state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)\n\n # get the optimal policy\n action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)\n action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)\n\n images = [action_usable_ace,\n state_value_usable_ace,\n action_no_usable_ace,\n state_value_no_usable_ace]\n\n titles = ['Optimal policy with usable Ace',\n 'Optimal value with usable Ace',\n 'Optimal policy without usable Ace',\n 'Optimal value without usable Ace']\n\n _, axes = plt.subplots(2, 2, figsize=(40, 30))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n axes = axes.flatten()\n\n for image, title, axis in zip(images, titles, axes):\n fig = sns.heatmap(np.flipud(image), cmap=\"YlGnBu\", ax=axis, xticklabels=range(1, 11),\n yticklabels=list(reversed(range(12, 22))))\n fig.set_ylabel('player sum', fontsize=30)\n fig.set_xlabel('dealer showing', fontsize=30)\n fig.set_title(title, fontsize=30)\n\n plt.savefig('../images/figure_5_2.png')\n plt.close()\n\ndef figure_5_3():\n true_value = -0.27726\n episodes = 10000\n runs = 100\n error_ordinary = np.zeros(episodes)\n error_weighted = np.zeros(episodes)\n for i in tqdm(range(0, runs)):\n ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)\n # get the squared error\n error_ordinary += np.power(ordinary_sampling_ - true_value, 2)\n error_weighted += np.power(weighted_sampling_ - true_value, 2)\n error_ordinary /= runs\n error_weighted /= runs\n\n plt.plot(np.arange(1, episodes + 1), error_ordinary, color='green', label='Ordinary Importance Sampling')\n plt.plot(np.arange(1, episodes + 1), error_weighted, color='red', label='Weighted Importance Sampling')\n plt.ylim(-0.1, 5)\n plt.xlabel('Episodes (log scale)')\n plt.ylabel(f'Mean square error\\n(average over {runs} runs)')\n plt.xscale('log')\n plt.legend()\n\n plt.savefig('../images/figure_5_3.png')\n plt.close()\n\n\nif __name__ == '__main__':\n figure_5_1()\n figure_5_2()\n figure_5_3()\n"
] | [
[
"numpy.ones",
"numpy.asarray",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.savefig",
"numpy.random.choice",
"matplotlib.pyplot.xscale",
"matplotlib.use",
"numpy.where",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.arange",
"numpy.max",
"numpy.power",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"numpy.random.binomial",
"matplotlib.pyplot.legend",
"numpy.flipud",
"numpy.add.accumulate",
"numpy.errstate",
"numpy.random.randint",
"matplotlib.pyplot.xlabel"
]
] |
hxwork/OMNet | [
"be88a734e7327def365e1875bbc7cd2fea1539b0"
] | [
"common/manager.py"
] | [
"import os\r\nfrom collections import defaultdict\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom termcolor import colored\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nfrom common import utils\r\n\r\n\r\nclass Manager():\r\n def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):\r\n # params status\r\n self.params = params\r\n\r\n self.model = model\r\n self.optimizer = optimizer\r\n self.scheduler = scheduler\r\n self.dataloaders = dataloaders\r\n self.logger = logger\r\n\r\n self.epoch = 0\r\n self.step = 0\r\n self.best_val_score = np.inf\r\n self.cur_val_score = np.inf\r\n self.best_test_score = np.inf\r\n self.cur_test_score = np.inf\r\n\r\n # train status\r\n self.train_status = defaultdict(utils.AverageMeter)\r\n\r\n # val status\r\n self.val_status = defaultdict(utils.AverageMeter)\r\n\r\n # test status\r\n self.test_status = defaultdict(utils.AverageMeter)\r\n\r\n # model status\r\n self.loss_status = defaultdict(utils.AverageMeter)\r\n\r\n # init local tensorboard and html\r\n self.init_tb_and_html()\r\n\r\n def init_tb_and_html(self):\r\n # tensorboard loss\r\n local_tb_dir = os.path.join(self.params.model_dir, \"summary/loss\")\r\n os.makedirs(local_tb_dir, exist_ok=True)\r\n self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)\r\n # tensorboard metric\r\n local_tb_dir = os.path.join(self.params.model_dir, \"summary/metric\")\r\n os.makedirs(local_tb_dir, exist_ok=True)\r\n self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)\r\n # html\r\n local_html_dir = os.path.join(self.params.model_dir, \"summary/html\")\r\n os.makedirs(local_html_dir, exist_ok=True)\r\n self.local_html_dir = local_html_dir\r\n\r\n def update_step(self):\r\n self.step += 1\r\n\r\n def update_epoch(self):\r\n self.epoch += 1\r\n\r\n def update_loss_status(self, loss, batch_size):\r\n for k, v in loss.items():\r\n self.loss_status[k].update(val=v.item(), num=batch_size)\r\n\r\n def update_metric_status(self, metrics, split, batch_size):\r\n if split == \"val\":\r\n for k, v in metrics.items():\r\n self.val_status[k].update(val=v.item(), num=batch_size)\r\n self.cur_val_score = self.val_status[self.params.major_metric].avg\r\n elif split == \"test\":\r\n for k, v in metrics.items():\r\n self.test_status[k].update(val=v.item(), num=batch_size)\r\n self.cur_test_score = self.test_status[self.params.major_metric].avg\r\n else:\r\n raise ValueError(\"Wrong eval type: {}\".format(split))\r\n\r\n def summarize_metric_status(self, metrics, split):\r\n if split == \"val\":\r\n for k in metrics:\r\n if k.endswith('MSE'):\r\n self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))\r\n else:\r\n continue\r\n elif split == \"test\":\r\n for k in metrics:\r\n if k.endswith('MSE'):\r\n self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))\r\n else:\r\n continue\r\n else:\r\n raise ValueError(\"Wrong eval type: {}\".format(split))\r\n\r\n def reset_loss_status(self):\r\n for k, v in self.loss_status.items():\r\n self.loss_status[k].reset()\r\n\r\n def reset_metric_status(self, split):\r\n if split == \"val\":\r\n for k, v in self.val_status.items():\r\n self.val_status[k].reset()\r\n elif split == \"test\":\r\n for k, v in self.test_status.items():\r\n self.test_status[k].reset()\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def print_train_info(self):\r\n exp_name = self.params.model_dir.split('/')[-1]\r\n print_str = \"{} Epoch: {:4d}, lr={:.4f} \".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])\r\n print_str += \"total loss: %.4f(%.4f)\" % (self.loss_status['total'].val, self.loss_status['total'].avg)\r\n return print_str\r\n\r\n def print_metrics(self, split, title=\"Eval\", color=\"red\", only_best=False):\r\n if split == \"val\":\r\n metric_status = self.val_status\r\n is_best = self.cur_val_score < self.best_val_score\r\n elif split == \"test\":\r\n metric_status = self.test_status\r\n is_best = self.cur_test_score < self.best_test_score\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n print_str = \" | \".join(\"{}: {:4g}\".format(k, v.avg) for k, v in metric_status.items())\r\n if only_best:\r\n if is_best:\r\n self.logger.info(colored(\"Best Epoch: {}, {} Results: {}\".format(self.epoch, title, print_str), color, attrs=[\"bold\"]))\r\n else:\r\n self.logger.info(colored(\"Epoch: {}, {} Results: {}\".format(self.epoch, title, print_str), color, attrs=[\"bold\"]))\r\n\r\n def write_loss_to_tb(self, split):\r\n for k, v in self.loss_status.items():\r\n if split == \"train\":\r\n self.local_loss_writter.add_scalar(\"train_Loss/{}\".format(k), v.val, self.step)\r\n elif split == \"val\":\r\n self.local_loss_writter.add_scalar(\"val_Loss/{}\".format(k), v.val, self.step)\r\n elif split == \"test\":\r\n self.local_loss_writter.add_scalar(\"test_Loss/{}\".format(k), v.val, self.step)\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def write_metric_to_tb(self, split):\r\n if split == \"val\":\r\n for k, v in self.val_status.items():\r\n self.local_metric_writter.add_scalar(\"val_Metric/{}\".format(k), v.avg, self.epoch)\r\n elif split == \"test\":\r\n for k, v in self.test_status.items():\r\n self.local_metric_writter.add_scalar(\"test_Metric/{}\".format(k), v.avg, self.epoch)\r\n else:\r\n raise ValueError(\"Wrong split string: {}\".format(split))\r\n\r\n def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):\r\n\r\n state = {\r\n \"state_dict\": self.model.state_dict(),\r\n \"optimizer\": self.optimizer.state_dict(),\r\n \"scheduler\": self.scheduler.state_dict(),\r\n \"step\": self.step,\r\n \"epoch\": self.epoch,\r\n }\r\n if self.dataloaders[\"val\"] is not None:\r\n state[\"best_val_score\"] = self.best_val_score\r\n if self.dataloaders[\"test\"] is not None:\r\n state[\"best_test_score\"] = self.best_test_score\r\n\r\n # save latest checkpoint\r\n if self.epoch % save_latest_freq == 0:\r\n latest_ckpt_name = os.path.join(self.params.model_dir, \"model_latest.pth\")\r\n torch.save(state, latest_ckpt_name)\r\n self.logger.info(\"Saved latest checkpoint to: {}\".format(latest_ckpt_name))\r\n\r\n # save val latest metrics, and check if val is best checkpoints\r\n if self.dataloaders[\"val\"] is not None:\r\n val_latest_metrics_name = os.path.join(self.params.model_dir, \"val_metrics_latest.json\")\r\n utils.save_dict_to_json(self.val_status, val_latest_metrics_name)\r\n is_best = self.cur_val_score < self.best_val_score\r\n if is_best:\r\n # save metrics\r\n self.best_val_score = self.cur_val_score\r\n best_metrics_name = os.path.join(self.params.model_dir, \"val_metrics_best.json\")\r\n utils.save_dict_to_json(self.val_status, best_metrics_name)\r\n self.logger.info(\"Current is val best, score={:.7f}\".format(self.best_val_score))\r\n # save checkpoint\r\n if self.epoch > save_best_after:\r\n best_ckpt_name = os.path.join(self.params.model_dir, \"val_model_best.pth\")\r\n torch.save(state, best_ckpt_name)\r\n self.logger.info(\"Saved val best checkpoint to: {}\".format(best_ckpt_name))\r\n\r\n # save test latest metrics, and check if test is best checkpoints\r\n if self.dataloaders[\"test\"] is not None:\r\n test_latest_metrics_name = os.path.join(self.params.model_dir, \"test_metrics_latest.json\")\r\n utils.save_dict_to_json(self.test_status, test_latest_metrics_name)\r\n is_best = self.cur_test_score < self.best_test_score\r\n if is_best:\r\n # save metrics\r\n self.best_test_score = self.cur_test_score\r\n best_metrics_name = os.path.join(self.params.model_dir, \"test_metrics_best.json\")\r\n utils.save_dict_to_json(self.test_status, best_metrics_name)\r\n self.logger.info(\"Current is test best, score={:.7f}\".format(self.best_test_score))\r\n # save checkpoint\r\n if self.epoch > save_best_after:\r\n best_ckpt_name = os.path.join(self.params.model_dir, \"test_model_best.pth\")\r\n torch.save(state, best_ckpt_name)\r\n self.logger.info(\"Saved test best checkpoint to: {}\".format(best_ckpt_name))\r\n\r\n def load_checkpoints(self):\r\n state = torch.load(self.params.restore_file)\r\n\r\n ckpt_component = []\r\n if \"state_dict\" in state and self.model is not None:\r\n try:\r\n self.model.load_state_dict(state[\"state_dict\"])\r\n except RuntimeError:\r\n print(\"Using custom loading net\")\r\n net_dict = self.model.state_dict()\r\n if \"module\" not in list(state[\"state_dict\"].keys())[0]:\r\n state_dict = {\"module.\" + k: v for k, v in state[\"state_dict\"].items() if \"module.\" + k in net_dict.keys()}\r\n else:\r\n state_dict = {k: v for k, v in state[\"state_dict\"].items() if k in net_dict.keys()}\r\n net_dict.update(state_dict)\r\n self.model.load_state_dict(net_dict, strict=False)\r\n ckpt_component.append(\"net\")\r\n\r\n if not self.params.only_weights:\r\n\r\n if \"optimizer\" in state and self.optimizer is not None:\r\n try:\r\n self.optimizer.load_state_dict(state[\"optimizer\"])\r\n\r\n except RuntimeError:\r\n print(\"Using custom loading optimizer\")\r\n optimizer_dict = self.optimizer.state_dict()\r\n state_dict = {k: v for k, v in state[\"optimizer\"].items() if k in optimizer_dict.keys()}\r\n optimizer_dict.update(state_dict)\r\n self.optimizer.load_state_dict(optimizer_dict)\r\n ckpt_component.append(\"opt\")\r\n\r\n if \"scheduler\" in state and self.train_status[\"scheduler\"] is not None:\r\n try:\r\n self.scheduler.load_state_dict(state[\"scheduler\"])\r\n\r\n except RuntimeError:\r\n print(\"Using custom loading scheduler\")\r\n scheduler_dict = self.scheduler.state_dict()\r\n state_dict = {k: v for k, v in state[\"scheduler\"].items() if k in scheduler_dict.keys()}\r\n scheduler_dict.update(state_dict)\r\n self.scheduler.load_state_dict(scheduler_dict)\r\n ckpt_component.append(\"sch\")\r\n\r\n if \"step\" in state:\r\n self.step = state[\"step\"] + 1\r\n ckpt_component.append(\"step\")\r\n\r\n if \"epoch\" in state:\r\n self.epoch = state[\"epoch\"] + 1\r\n ckpt_component.append(\"epoch\")\r\n\r\n if \"best_val_score\" in state:\r\n self.best_val_score = state[\"best_val_score\"]\r\n ckpt_component.append(\"best val score: {:.3g}\".format(self.best_val_score))\r\n\r\n if \"best_test_score\" in state:\r\n self.best_test_score = state[\"best_test_score\"]\r\n ckpt_component.append(\"best test score: {:.3g}\".format(self.best_test_score))\r\n\r\n ckpt_component = \", \".join(i for i in ckpt_component)\r\n self.logger.info(\"Loaded models from: {}\".format(self.params.restore_file))\r\n self.logger.info(\"Ckpt load: {}\".format(ckpt_component))\r\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter",
"torch.save",
"numpy.sqrt",
"torch.load"
]
] |
shinhaha/tensorflow | [
"4647017a727985d64c5b0addee92f0ec516952c1"
] | [
"Inflearn_SungKim/1.LinearRegression/LinearRegression(placeholders).py"
] | [
"import tensorflow as tf\n\n#placeholder variable(scalar)\nX=tf.placeholder(tf.float32,shape=[None])\nY=tf.placeholder(tf.float32,shape=[None])\n\nW=tf.Variable(tf.random_normal([1]),name='weight')\nb=tf.Variable(tf.random_normal([1]),name='bias')\n\nhypothesis=X*W+b\n#average\ncost=tf.reduce_mean(tf.square(hypothesis-Y))\n\noptimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)\n#minimize cost\ntrain=optimizer.minimize(cost)\n\nsess=tf.Session()\n#initialize var\nsess.run(tf.global_variables_initializer())\n\n#learning\nfor step in range(2001):\n cost_val,W_val,b_val,_=sess.run([cost,W,b,train],\n feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})\n if step%20==0:\n print(step,cost_val,W_val,b_val)\n\n#evlauation\nprint(sess.run(hypothesis,feed_dict={X:[5]}))\nprint(sess.run(hypothesis,feed_dict={X:[2.5]}))\nprint(sess.run(hypothesis,feed_dict={X:[1.5,3.5]}))"
] | [
[
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.square",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.random_normal"
]
] |
tizian/layer-laboratory | [
"008cc94b76127e9eb74227fcd3d0145da8ddec30"
] | [
"src/films/tests/test_hdrfilm.py"
] | [
"import mitsuba\nimport pytest\nimport os\nimport enoki as ek\n\n\ndef test01_construct(variant_scalar_rgb):\n from mitsuba.core.xml import load_string\n\n # With default reconstruction filter\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\"></film>\"\"\")\n assert film is not None\n assert film.reconstruction_filter() is not None\n\n # With a provided reconstruction filter\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <rfilter type=\"gaussian\">\n <float name=\"stddev\" value=\"18.5\"/>\n </rfilter>\n </film>\"\"\")\n assert film is not None\n assert film.reconstruction_filter().radius() == (4 * 18.5)\n\n # Certain parameter values are not allowed\n with pytest.raises(RuntimeError):\n load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <string name=\"component_format\" value=\"uint8\"/>\n </film>\"\"\")\n with pytest.raises(RuntimeError):\n load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <string name=\"pixel_format\" value=\"brga\"/>\n </film>\"\"\")\n\n\ndef test02_crops(variant_scalar_rgb):\n from mitsuba.core.xml import load_string\n\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"32\"/>\n <integer name=\"height\" value=\"21\"/>\n <integer name=\"crop_width\" value=\"11\"/>\n <integer name=\"crop_height\" value=\"5\"/>\n <integer name=\"crop_offset_x\" value=\"2\"/>\n <integer name=\"crop_offset_y\" value=\"3\"/>\n <boolean name=\"high_quality_edges\" value=\"true\"/>\n <string name=\"pixel_format\" value=\"rgba\"/>\n </film>\"\"\")\n assert film is not None\n assert ek.all(film.size() == [32, 21])\n assert ek.all(film.crop_size() == [11, 5])\n assert ek.all(film.crop_offset() == [2, 3])\n assert film.has_high_quality_edges()\n\n # Crop size doesn't adjust its size, so an error should be raised if the\n # resulting crop window goes out of bounds.\n incomplete = \"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"32\"/>\n <integer name=\"height\" value=\"21\"/>\n <integer name=\"crop_offset_x\" value=\"30\"/>\n <integer name=\"crop_offset_y\" value=\"20\"/>\"\"\"\n with pytest.raises(RuntimeError):\n film = load_string(incomplete + \"</film>\")\n film = load_string(incomplete + \"\"\"\n <integer name=\"crop_width\" value=\"2\"/>\n <integer name=\"crop_height\" value=\"1\"/>\n </film>\"\"\")\n assert film is not None\n assert ek.all(film.size() == [32, 21])\n assert ek.all(film.crop_size() == [2, 1])\n assert ek.all(film.crop_offset() == [30, 20])\n\n\[email protected]('file_format', ['exr', 'rgbe', 'pfm'])\ndef test03_develop(variant_scalar_rgb, file_format, tmpdir):\n from mitsuba.core.xml import load_string\n from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype\n from mitsuba.render import ImageBlock\n import numpy as np\n\n \"\"\"Create a test image. Develop it to a few file format, each time reading\n it back and checking that contents are unchanged.\"\"\"\n np.random.seed(12345 + ord(file_format[0]))\n # Note: depending on the file format, the alpha channel may be automatically removed.\n film = load_string(\"\"\"<film version=\"2.0.0\" type=\"hdrfilm\">\n <integer name=\"width\" value=\"41\"/>\n <integer name=\"height\" value=\"37\"/>\n <string name=\"file_format\" value=\"{}\"/>\n <string name=\"pixel_format\" value=\"rgba\"/>\n <string name=\"component_format\" value=\"float32\"/>\n <rfilter type=\"box\"/>\n </film>\"\"\".format(file_format))\n # Regardless of the output file format, values are stored as XYZAW (5 channels).\n contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5))\n # RGBE and will only reconstruct well images that have similar scales on\n # all channel (because exponent is shared between channels).\n if file_format is \"rgbe\":\n contents = 1 + 0.1 * contents\n # Use unit weights.\n contents[:, :, 4] = 1.0\n\n block = ImageBlock(film.size(), 5, film.reconstruction_filter())\n\n block.clear()\n for x in range(film.size()[1]):\n for y in range(film.size()[0]):\n block.put([y+0.5, x+0.5], contents[x, y, :])\n\n film.prepare(['X', 'Y', 'Z', 'A', 'W'])\n film.put(block)\n\n with pytest.raises(RuntimeError):\n # Should raise when the destination file hasn't been specified.\n film.develop()\n\n filename = str(tmpdir.join('test_image.' + file_format))\n film.set_destination_file(filename)\n film.develop()\n\n # Read back and check contents\n other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False)\n img = np.array(other, copy=False)\n\n if False:\n import matplotlib.pyplot as plt\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.imshow(contents[:, :, :3])\n plt.subplot(1, 3, 2)\n plt.imshow(img[:, :, :3])\n plt.subplot(1, 3, 3)\n plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm')\n plt.colorbar()\n plt.show()\n\n if file_format == \"exr\":\n assert ek.allclose(img, contents, atol=1e-5)\n else:\n if file_format == \"rgbe\":\n assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \\\n '\\n{}\\nvs\\n{}\\n'.format(img[:4, :4, :3], contents[:4, :4, :3])\n else:\n assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5)\n # Alpha channel was ignored, alpha and weights should default to 1.0.\n assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.colorbar"
]
] |
kvzhao/pytorch-metric-learning | [
"9eb792bcfc1616b599e6ee457514e3cb3a7235dd"
] | [
"src/pytorch_metric_learning/utils/logging_presets.py"
] | [
"import logging\r\nfrom . import common_functions as c_f\r\nimport os\r\nimport torch\r\nfrom collections import defaultdict\r\nimport sqlite3\r\n\r\n# You can write your own hooks for logging.\r\n# But if you'd like something that just works, then use this HookContainer.\r\n# You'll need to install record-keeper and tensorboard.\r\n# pip install record-keeper tensorboard\r\n\r\nclass HookContainer: \r\n\r\n def __init__(self, record_keeper, \r\n record_group_name_prefix=None, \r\n primary_metric=\"mean_average_precision_at_r\", \r\n validation_split_name=\"val\"):\r\n self.record_keeper = record_keeper\r\n self.record_group_name_prefix = record_group_name_prefix\r\n self.saveable_trainer_objects = [\"models\", \"optimizers\", \"lr_schedulers\", \"loss_funcs\", \"mining_funcs\"]\r\n self.primary_metric = primary_metric\r\n self.validation_split_name = validation_split_name\r\n\r\n ############################################\r\n ############################################\r\n ################## HOOKS #################\r\n ############################################\r\n ############################################\r\n\r\n ### Define the end_of_iteration hook. This will be executed at the end of every iteration. ###\r\n def end_of_iteration_hook(self, trainer):\r\n record_these = [[trainer.loss_tracker.losses, {\"input_group_name_for_non_objects\": \"loss_histories\"}],\r\n [trainer.loss_tracker.loss_weights, {\"input_group_name_for_non_objects\": \"loss_weights\"}],\r\n [trainer.loss_funcs, {\"recursive_types\": [torch.nn.Module]}],\r\n [trainer.mining_funcs, {}],\r\n [trainer.models, {}],\r\n [trainer.optimizers, {\"custom_attr_func\": self.optimizer_custom_attr_func}]]\r\n for record, kwargs in record_these:\r\n self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)\r\n\r\n # This hook will be passed into the trainer and will be executed at the end of every epoch.\r\n def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, test_collate_fn=None):\r\n if not self.primary_metric in tester.accuracy_calculator.get_curr_metrics():\r\n raise ValueError(\"HookContainer `primary_metric` must be one of: {}\".format(tester.accuracy_calculator.get_curr_metrics()))\r\n if not os.path.exists(model_folder): os.makedirs(model_folder)\r\n def actual_hook(trainer):\r\n continue_training = True\r\n if trainer.epoch % test_interval == 0:\r\n best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, test_collate_fn)\r\n continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)\r\n return continue_training\r\n return actual_hook\r\n\r\n def end_of_testing_hook(self, tester):\r\n for split_name, accuracies in tester.all_accuracies.items():\r\n epoch = accuracies[\"epoch\"]\r\n self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))\r\n _, _, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, split_name, epoch)\r\n best = {\"best_epoch\":best_epoch, \"best_accuracy\": best_accuracy}\r\n self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name)) \r\n\r\n for split_name, u in tester.dim_reduced_embeddings.items():\r\n for k, (dim_reduced, labels) in u.items():\r\n tag = '%s/%s'%(self.record_group_name(tester, split_name), k)\r\n self.record_keeper.add_embedding_plot(dim_reduced, labels, tag, epoch)\r\n\r\n\r\n\r\n ############################################\r\n ############################################\r\n ######### MODEL LOADING AND SAVING #########\r\n ############################################\r\n ############################################\r\n\r\n def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):\r\n if device is None: device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n resume_epoch, model_suffix = c_f.latest_version(model_folder, \"trunk_*.pth\", best=best)\r\n if resume_epoch > 0:\r\n for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:\r\n c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)\r\n return resume_epoch + 1\r\n\r\n\r\n def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):\r\n for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:\r\n c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)\r\n if prev_suffix is not None:\r\n c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder) \r\n\r\n def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, collate_fn):\r\n epoch = trainer.epoch\r\n tester.test(dataset_dict, epoch, trainer.models[\"trunk\"], trainer.models[\"embedder\"], list(dataset_dict.keys()), collate_fn)\r\n prev_best_epoch, _ = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)\r\n is_new_best, curr_accuracy, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)\r\n self.record_keeper.save_records()\r\n trainer.step_lr_plateau_schedulers(curr_accuracy)\r\n self.save_models(trainer, model_folder, epoch, epoch-test_interval) # save latest model\r\n if is_new_best:\r\n logging.info(\"New best accuracy! {}\".format(curr_accuracy))\r\n curr_suffix = \"best%d\"%best_epoch\r\n prev_suffix = \"best%d\"%prev_best_epoch if prev_best_epoch is not None else None\r\n self.save_models(trainer, model_folder, curr_suffix, prev_suffix) # save best model \r\n return best_epoch\r\n\r\n def is_new_best_accuracy(self, tester, split_name, epoch):\r\n curr_accuracy = self.get_curr_primary_metric(tester, split_name)\r\n best_epoch, best_accuracy = self.get_best_epoch_and_accuracy(tester, split_name)\r\n is_new_best = False\r\n if (curr_accuracy > best_accuracy) or (best_epoch is None):\r\n best_epoch, best_accuracy = epoch, curr_accuracy\r\n is_new_best = True\r\n return is_new_best, curr_accuracy, best_epoch, best_accuracy \r\n\r\n\r\n ############################################\r\n ############################################\r\n ##### BEST EPOCH AND ACCURACY TRACKING #####\r\n ############################################\r\n ############################################\r\n\r\n\r\n def get_loss_history(self, loss_names=()):\r\n columns = \"*\" if len(loss_names) == 0 else \", \".join(loss_names)\r\n table_name = \"loss_histories\"\r\n if not self.record_keeper.table_exists(table_name):\r\n return {}\r\n output = self.record_keeper.query(\"SELECT {} FROM {}\".format(columns, table_name), return_dict=True)\r\n output.pop(\"id\", None)\r\n return output\r\n\r\n\r\n def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):\r\n table_name = self.record_group_name(tester, split_name)\r\n\r\n if not self.record_keeper.table_exists(table_name):\r\n return {}\r\n\r\n def get_accuracies(keys):\r\n keys = \"*\" if return_all_metrics else \"epoch, %s\"%keys\r\n query = \"SELECT {} FROM {}\".format(keys, table_name)\r\n return self.record_keeper.query(query, return_dict=True)\r\n\r\n keys = metrics if len(metrics) > 0 else [self.primary_metric]\r\n output = self.try_keys(keys, tester, get_accuracies)\r\n output.pop(\"id\", None)\r\n return output\r\n\r\n\r\n def get_curr_primary_metric(self, tester, split_name):\r\n def get_curr(key):\r\n return tester.all_accuracies[split_name][key]\r\n return self.try_primary_metric(tester, get_curr)\r\n\r\n def try_keys(self, input_keys, tester, input_func):\r\n for average in [True, False]:\r\n keys = \", \".join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])\r\n try:\r\n return input_func(keys)\r\n except (KeyError, sqlite3.OperationalError):\r\n pass\r\n raise KeyError \r\n\r\n def try_primary_metric(self, tester, input_func):\r\n return self.try_keys([self.primary_metric], tester, input_func)\r\n\r\n # returns accuracies of a specified epoch\r\n def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):\r\n table_name = self.record_group_name(tester, split_name)\r\n if not self.record_keeper.table_exists(table_name):\r\n return []\r\n def get_accuracies(key):\r\n columns = \"*\" if select_all else \"epoch, %s\"%key\r\n query = \"SELECT %s FROM %s WHERE epoch=?\"%(columns, table_name)\r\n return self.record_keeper.query(query, (epoch, ))\r\n return self.try_primary_metric(tester, get_accuracies)\r\n\r\n # returns accuracies of best epoch and the metric name used to determine best acuracy\r\n def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=(-1,)):\r\n table_name = self.record_group_name(tester, split_name)\r\n if not self.record_keeper.table_exists(table_name):\r\n return [], None \r\n def get_accuracies(key):\r\n columns = \"*\" if select_all else \"epoch, %s\"%key\r\n params = \", \".join([\"?\"]*len(ignore_epoch))\r\n query = \"\"\"SELECT {0} FROM {1} WHERE {2}=\r\n (SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))\r\n AND epoch NOT IN ({3})\"\"\".format(columns, table_name, key, params)\r\n output = self.record_keeper.query(query, ignore_epoch+ignore_epoch)\r\n return output, key\r\n return self.try_primary_metric(tester, get_accuracies)\r\n\r\n def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=(-1,)):\r\n accuracies, key = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)\r\n if len(accuracies) > 0:\r\n return accuracies[0][\"epoch\"], accuracies[0][key]\r\n return None, 0\r\n\r\n def patience_remaining(self, epoch, best_epoch, patience):\r\n if patience is not None and best_epoch is not None:\r\n if epoch - best_epoch > patience:\r\n logging.info(\"Validation accuracy has plateaued. Exiting.\")\r\n return False\r\n return True\r\n\r\n def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):\r\n if skip_eval_if_already_done:\r\n splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)\r\n if len(splits_to_eval) == 0:\r\n logging.info(\"Already evaluated\")\r\n return False\r\n tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)\r\n return True\r\n\r\n def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):\r\n input_splits_to_eval = list(dataset_dict.keys()) if input_splits_to_eval is None else input_splits_to_eval\r\n splits_to_eval = []\r\n for split in input_splits_to_eval:\r\n if len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0:\r\n splits_to_eval.append(split)\r\n return splits_to_eval\r\n\r\n def base_record_group_name(self, tester):\r\n base_record_group_name = \"%s_\"%self.record_group_name_prefix if self.record_group_name_prefix else ''\r\n base_record_group_name += tester.description_suffixes(\"accuracies\")\r\n return base_record_group_name\r\n\r\n def record_group_name(self, tester, split_name):\r\n base_record_group_name = self.base_record_group_name(tester)\r\n return \"%s_%s\"%(base_record_group_name, split_name.upper())\r\n\r\n def optimizer_custom_attr_func(self, optimizer):\r\n return {\"lr\": optimizer.param_groups[0][\"lr\"]}\r\n\r\n\r\n\r\nclass EmptyContainer:\r\n def end_of_epoch_hook(self, *args):\r\n return None\r\n end_of_iteration_hook = None\r\n end_of_testing_hook = None\r\n\r\n\r\n\r\ndef get_record_keeper(csv_folder, tensorboard_folder, global_db_path=None, experiment_name=None, is_new_experiment=True, save_figures=False, save_lists=False):\r\n try:\r\n import record_keeper as record_keeper_package\r\n from torch.utils.tensorboard import SummaryWriter\r\n record_writer = record_keeper_package.RecordWriter(folder = csv_folder, \r\n global_db_path = global_db_path, \r\n experiment_name = experiment_name, \r\n is_new_experiment = is_new_experiment, \r\n save_lists = save_lists)\r\n tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)\r\n record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer = tensorboard_writer, \r\n record_writer = record_writer, \r\n attributes_to_search_for = c_f.list_of_recordable_attributes_list_names(),\r\n save_figures=save_figures)\r\n return record_keeper, record_writer, tensorboard_writer\r\n\r\n except ModuleNotFoundError as e:\r\n logging.warn(e)\r\n logging.warn(\"There won't be any logging or model saving.\")\r\n logging.warn(\"To fix this, pip install record-keeper tensorboard\")\r\n return None, None, None\r\n\r\n\r\ndef get_hook_container(record_keeper, **kwargs):\r\n if record_keeper:\r\n return HookContainer(record_keeper, **kwargs)\r\n else:\r\n logging.warn(\"No record_keeper, so no preset hooks are being returned.\")\r\n return EmptyContainer()\r\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.